1 // SPDX-License-Identifier: GPL-2.0-only
3 * Xilinx Axi Ethernet device driver
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
47 #include "xilinx_axienet.h"
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT 128
51 #define RX_BD_NUM_DEFAULT 1024
52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX 4096
54 #define RX_BD_NUM_MAX 4096
55 #define DMA_NUM_APP_WORDS 5
57 #define RX_BUF_NUM_DEFAULT 128
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME "xaxienet"
61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION "1.00a"
64 #define AXIENET_REGS_N 40
66 static void axienet_rx_submit_desc(struct net_device *ndev);
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
82 .opt = XAE_OPTION_JUMBO,
84 .m_or = XAE_TC_JUM_MASK,
86 .opt = XAE_OPTION_JUMBO,
87 .reg = XAE_RCW1_OFFSET,
88 .m_or = XAE_RCW1_JUM_MASK,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt = XAE_OPTION_VLAN,
92 .m_or = XAE_TC_VLAN_MASK,
94 .opt = XAE_OPTION_VLAN,
95 .reg = XAE_RCW1_OFFSET,
96 .m_or = XAE_RCW1_VLAN_MASK,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt = XAE_OPTION_FCS_STRIP,
99 .reg = XAE_RCW1_OFFSET,
100 .m_or = XAE_RCW1_FCS_MASK,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt = XAE_OPTION_FCS_INSERT,
103 .reg = XAE_TC_OFFSET,
104 .m_or = XAE_TC_FCS_MASK,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt = XAE_OPTION_LENTYPE_ERR,
107 .reg = XAE_RCW1_OFFSET,
108 .m_or = XAE_RCW1_LT_DIS_MASK,
109 }, { /* Turn on Rx flow control */
110 .opt = XAE_OPTION_FLOW_CONTROL,
111 .reg = XAE_FCC_OFFSET,
112 .m_or = XAE_FCC_FCRX_MASK,
113 }, { /* Turn on Tx flow control */
114 .opt = XAE_OPTION_FLOW_CONTROL,
115 .reg = XAE_FCC_OFFSET,
116 .m_or = XAE_FCC_FCTX_MASK,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt = XAE_OPTION_PROMISC,
119 .reg = XAE_FMI_OFFSET,
120 .m_or = XAE_FMI_PM_MASK,
121 }, { /* Enable transmitter */
122 .opt = XAE_OPTION_TXEN,
123 .reg = XAE_TC_OFFSET,
124 .m_or = XAE_TC_TX_MASK,
125 }, { /* Enable receiver */
126 .opt = XAE_OPTION_RXEN,
127 .reg = XAE_RCW1_OFFSET,
128 .m_or = XAE_RCW1_RX_MASK,
133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
148 * Return: The contents of the Axi DMA register
150 * This function returns the contents of the corresponding Axi DMA register.
152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
154 return ioread32(lp->dma_regs + reg);
157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 struct axidma_bd *desc)
160 desc->phys = lower_32_bits(addr);
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
162 desc->phys_msb = upper_32_bits(addr);
165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 struct axidma_bd *desc)
168 dma_addr_t ret = desc->phys;
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
184 static void axienet_dma_bd_release(struct net_device *ndev)
187 struct axienet_local *lp = netdev_priv(ndev);
189 /* If we end up here, tx_bd_v must have been DMA allocated. */
190 dma_free_coherent(lp->dev,
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
198 for (i = 0; i < lp->rx_bd_num; i++) {
201 /* A NULL skb means this descriptor has not been initialised
204 if (!lp->rx_bd_v[i].skb)
207 dev_kfree_skb(lp->rx_bd_v[i].skb);
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 dma_unmap_single(lp->dev, phys,
216 lp->max_frm_size, DMA_FROM_DEVICE);
220 dma_free_coherent(lp->dev,
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
227 * axienet_usec_to_timer - Calculate IRQ delay timer value
228 * @lp: Pointer to the axienet_local structure
229 * @coalesce_usec: Microseconds to convert into timer value
231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
237 clk_rate = clk_get_rate(lp->axi_clk);
239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
249 * axienet_dma_start - Set up DMA registers and start DMA operation
250 * @lp: Pointer to the axienet_local structure
252 static void axienet_dma_start(struct axienet_local *lp)
254 /* Start updating the Rx channel control register */
255 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
256 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
257 /* Only set interrupt delay timer if not generating an interrupt on
258 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
260 if (lp->coalesce_count_rx > 1)
261 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
262 << XAXIDMA_DELAY_SHIFT) |
263 XAXIDMA_IRQ_DELAY_MASK;
264 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
266 /* Start updating the Tx channel control register */
267 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
268 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
269 /* Only set interrupt delay timer if not generating an interrupt on
270 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
272 if (lp->coalesce_count_tx > 1)
273 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
274 << XAXIDMA_DELAY_SHIFT) |
275 XAXIDMA_IRQ_DELAY_MASK;
276 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
278 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
279 * halted state. This will make the Rx side ready for reception.
281 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
282 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
283 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
284 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
285 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
287 /* Write to the RS (Run-stop) bit in the Tx channel control register.
288 * Tx channel is now ready to run. But only after we write to the
289 * tail pointer register that the Tx channel will start transmitting.
291 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
292 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
293 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
297 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
298 * @ndev: Pointer to the net_device structure
300 * Return: 0, on success -ENOMEM, on failure
302 * This function is called to initialize the Rx and Tx DMA descriptor
303 * rings. This initializes the descriptors with required default values
304 * and is called when Axi Ethernet driver reset is called.
306 static int axienet_dma_bd_init(struct net_device *ndev)
310 struct axienet_local *lp = netdev_priv(ndev);
312 /* Reset the indexes which are used for accessing the BDs */
317 /* Allocate the Tx and Rx buffer descriptors. */
318 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
319 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
320 &lp->tx_bd_p, GFP_KERNEL);
324 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
325 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
326 &lp->rx_bd_p, GFP_KERNEL);
330 for (i = 0; i < lp->tx_bd_num; i++) {
331 dma_addr_t addr = lp->tx_bd_p +
332 sizeof(*lp->tx_bd_v) *
333 ((i + 1) % lp->tx_bd_num);
335 lp->tx_bd_v[i].next = lower_32_bits(addr);
336 if (lp->features & XAE_FEATURE_DMA_64BIT)
337 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
340 for (i = 0; i < lp->rx_bd_num; i++) {
343 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
344 ((i + 1) % lp->rx_bd_num);
345 lp->rx_bd_v[i].next = lower_32_bits(addr);
346 if (lp->features & XAE_FEATURE_DMA_64BIT)
347 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
349 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
353 lp->rx_bd_v[i].skb = skb;
354 addr = dma_map_single(lp->dev, skb->data,
355 lp->max_frm_size, DMA_FROM_DEVICE);
356 if (dma_mapping_error(lp->dev, addr)) {
357 netdev_err(ndev, "DMA mapping error\n");
360 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
362 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
365 axienet_dma_start(lp);
369 axienet_dma_bd_release(ndev);
374 * axienet_set_mac_address - Write the MAC address
375 * @ndev: Pointer to the net_device structure
376 * @address: 6 byte Address to be written as MAC address
378 * This function is called to initialize the MAC address of the Axi Ethernet
379 * core. It writes to the UAW0 and UAW1 registers of the core.
381 static void axienet_set_mac_address(struct net_device *ndev,
384 struct axienet_local *lp = netdev_priv(ndev);
387 eth_hw_addr_set(ndev, address);
388 if (!is_valid_ether_addr(ndev->dev_addr))
389 eth_hw_addr_random(ndev);
391 /* Set up unicast MAC address filter set its mac address */
392 axienet_iow(lp, XAE_UAW0_OFFSET,
393 (ndev->dev_addr[0]) |
394 (ndev->dev_addr[1] << 8) |
395 (ndev->dev_addr[2] << 16) |
396 (ndev->dev_addr[3] << 24));
397 axienet_iow(lp, XAE_UAW1_OFFSET,
398 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
399 ~XAE_UAW1_UNICASTADDR_MASK) |
401 (ndev->dev_addr[5] << 8))));
405 * netdev_set_mac_address - Write the MAC address (from outside the driver)
406 * @ndev: Pointer to the net_device structure
407 * @p: 6 byte Address to be written as MAC address
409 * Return: 0 for all conditions. Presently, there is no failure case.
411 * This function is called to initialize the MAC address of the Axi Ethernet
412 * core. It calls the core specific axienet_set_mac_address. This is the
413 * function that goes into net_device_ops structure entry ndo_set_mac_address.
415 static int netdev_set_mac_address(struct net_device *ndev, void *p)
417 struct sockaddr *addr = p;
419 axienet_set_mac_address(ndev, addr->sa_data);
424 * axienet_set_multicast_list - Prepare the multicast table
425 * @ndev: Pointer to the net_device structure
427 * This function is called to initialize the multicast table during
428 * initialization. The Axi Ethernet basic multicast support has a four-entry
429 * multicast table which is initialized here. Additionally this function
430 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
431 * means whenever the multicast table entries need to be updated this
432 * function gets called.
434 static void axienet_set_multicast_list(struct net_device *ndev)
437 u32 reg, af0reg, af1reg;
438 struct axienet_local *lp = netdev_priv(ndev);
440 reg = axienet_ior(lp, XAE_FMI_OFFSET);
441 reg &= ~XAE_FMI_PM_MASK;
442 if (ndev->flags & IFF_PROMISC)
443 reg |= XAE_FMI_PM_MASK;
445 reg &= ~XAE_FMI_PM_MASK;
446 axienet_iow(lp, XAE_FMI_OFFSET, reg);
448 if (ndev->flags & IFF_ALLMULTI ||
449 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
451 axienet_iow(lp, XAE_FMI_OFFSET, reg);
452 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
453 axienet_iow(lp, XAE_AF1_OFFSET, 0);
454 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
455 axienet_iow(lp, XAE_AM1_OFFSET, 0);
456 axienet_iow(lp, XAE_FFE_OFFSET, 1);
458 } else if (!netdev_mc_empty(ndev)) {
459 struct netdev_hw_addr *ha;
461 netdev_for_each_mc_addr(ha, ndev) {
462 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
465 af0reg = (ha->addr[0]);
466 af0reg |= (ha->addr[1] << 8);
467 af0reg |= (ha->addr[2] << 16);
468 af0reg |= (ha->addr[3] << 24);
470 af1reg = (ha->addr[4]);
471 af1reg |= (ha->addr[5] << 8);
476 axienet_iow(lp, XAE_FMI_OFFSET, reg);
477 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
478 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
479 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
480 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
481 axienet_iow(lp, XAE_FFE_OFFSET, 1);
486 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
489 axienet_iow(lp, XAE_FMI_OFFSET, reg);
490 axienet_iow(lp, XAE_FFE_OFFSET, 0);
495 * axienet_setoptions - Set an Axi Ethernet option
496 * @ndev: Pointer to the net_device structure
497 * @options: Option to be enabled/disabled
499 * The Axi Ethernet core has multiple features which can be selectively turned
500 * on or off. The typical options could be jumbo frame option, basic VLAN
501 * option, promiscuous mode option etc. This function is used to set or clear
502 * these options in the Axi Ethernet hardware. This is done through
503 * axienet_option structure .
505 static void axienet_setoptions(struct net_device *ndev, u32 options)
508 struct axienet_local *lp = netdev_priv(ndev);
509 struct axienet_option *tp = &axienet_options[0];
512 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
513 if (options & tp->opt)
515 axienet_iow(lp, tp->reg, reg);
519 lp->options |= options;
522 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
526 if (lp->reset_in_progress)
527 return lp->hw_stat_base[stat];
529 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
530 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
533 static void axienet_stats_update(struct axienet_local *lp, bool reset)
535 enum temac_stat stat;
537 write_seqcount_begin(&lp->hw_stats_seqcount);
538 lp->reset_in_progress = reset;
539 for (stat = 0; stat < STAT_COUNT; stat++) {
540 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
542 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
543 lp->hw_last_counter[stat] = counter;
545 write_seqcount_end(&lp->hw_stats_seqcount);
548 static void axienet_refresh_stats(struct work_struct *work)
550 struct axienet_local *lp = container_of(work, struct axienet_local,
553 mutex_lock(&lp->stats_lock);
554 axienet_stats_update(lp, false);
555 mutex_unlock(&lp->stats_lock);
557 /* Just less than 2^32 bytes at 2.5 GBit/s */
558 schedule_delayed_work(&lp->stats_work, 13 * HZ);
561 static int __axienet_device_reset(struct axienet_local *lp)
566 /* Save statistics counters in case they will be reset */
567 mutex_lock(&lp->stats_lock);
568 if (lp->features & XAE_FEATURE_STATS)
569 axienet_stats_update(lp, true);
571 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
572 * process of Axi DMA takes a while to complete as all pending
573 * commands/transfers will be flushed or completed during this
575 * Note that even though both TX and RX have their own reset register,
576 * they both reset the entire DMA core, so only one needs to be used.
578 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
579 ret = read_poll_timeout(axienet_dma_in32, value,
580 !(value & XAXIDMA_CR_RESET_MASK),
581 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
582 XAXIDMA_TX_CR_OFFSET);
584 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
588 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
589 ret = read_poll_timeout(axienet_ior, value,
590 value & XAE_INT_PHYRSTCMPLT_MASK,
591 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
594 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
598 /* Update statistics counters with new values */
599 if (lp->features & XAE_FEATURE_STATS) {
600 enum temac_stat stat;
602 write_seqcount_begin(&lp->hw_stats_seqcount);
603 lp->reset_in_progress = false;
604 for (stat = 0; stat < STAT_COUNT; stat++) {
606 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
608 lp->hw_stat_base[stat] +=
609 lp->hw_last_counter[stat] - counter;
610 lp->hw_last_counter[stat] = counter;
612 write_seqcount_end(&lp->hw_stats_seqcount);
616 mutex_unlock(&lp->stats_lock);
621 * axienet_dma_stop - Stop DMA operation
622 * @lp: Pointer to the axienet_local structure
624 static void axienet_dma_stop(struct axienet_local *lp)
629 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
630 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
631 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
632 synchronize_irq(lp->rx_irq);
634 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
635 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
636 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
637 synchronize_irq(lp->tx_irq);
639 /* Give DMAs a chance to halt gracefully */
640 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
641 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
643 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
646 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
647 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
649 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
652 /* Do a reset to ensure DMA is really stopped */
653 axienet_lock_mii(lp);
654 __axienet_device_reset(lp);
655 axienet_unlock_mii(lp);
659 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
660 * @ndev: Pointer to the net_device structure
662 * This function is called to reset and initialize the Axi Ethernet core. This
663 * is typically called during initialization. It does a reset of the Axi DMA
664 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
665 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
666 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
668 * Returns 0 on success or a negative error number otherwise.
670 static int axienet_device_reset(struct net_device *ndev)
673 struct axienet_local *lp = netdev_priv(ndev);
676 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
677 lp->options |= XAE_OPTION_VLAN;
678 lp->options &= (~XAE_OPTION_JUMBO);
680 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
681 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
684 if (lp->max_frm_size <= lp->rxmem)
685 lp->options |= XAE_OPTION_JUMBO;
688 if (!lp->use_dmaengine) {
689 ret = __axienet_device_reset(lp);
693 ret = axienet_dma_bd_init(ndev);
695 netdev_err(ndev, "%s: descriptor allocation failed\n",
701 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
702 axienet_status &= ~XAE_RCW1_RX_MASK;
703 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
705 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
706 if (axienet_status & XAE_INT_RXRJECT_MASK)
707 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
708 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
709 XAE_INT_RECV_ERROR_MASK : 0);
711 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
713 /* Sync default options with HW but leave receiver and
714 * transmitter disabled.
716 axienet_setoptions(ndev, lp->options &
717 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
718 axienet_set_mac_address(ndev, NULL);
719 axienet_set_multicast_list(ndev);
720 axienet_setoptions(ndev, lp->options);
722 netif_trans_update(ndev);
728 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
729 * @lp: Pointer to the axienet_local structure
730 * @first_bd: Index of first descriptor to clean up
731 * @nr_bds: Max number of descriptors to clean up
732 * @force: Whether to clean descriptors even if not complete
733 * @sizep: Pointer to a u32 filled with the total sum of all bytes
734 * in all cleaned-up descriptors. Ignored if NULL.
735 * @budget: NAPI budget (use 0 when not called from NAPI poll)
737 * Would either be called after a successful transmit operation, or after
738 * there was an error when setting up the chain.
739 * Returns the number of packets handled.
741 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
742 int nr_bds, bool force, u32 *sizep, int budget)
744 struct axidma_bd *cur_p;
749 for (i = 0; i < nr_bds; i++) {
750 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
751 status = cur_p->status;
753 /* If force is not specified, clean up only descriptors
754 * that have been completed by the MAC.
756 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
759 /* Ensure we see complete descriptor update */
761 phys = desc_get_phys_addr(lp, cur_p);
762 dma_unmap_single(lp->dev, phys,
763 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
766 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
767 napi_consume_skb(cur_p->skb, budget);
776 /* ensure our transmit path and device don't prematurely see status cleared */
782 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
787 if (lp->tx_bd_ci >= lp->tx_bd_num)
788 lp->tx_bd_ci %= lp->tx_bd_num;
795 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
796 * @lp: Pointer to the axienet_local structure
797 * @num_frag: The number of BDs to check for
799 * Return: 0, on success
800 * NETDEV_TX_BUSY, if any of the descriptors are not free
802 * This function is invoked before BDs are allocated and transmission starts.
803 * This function returns 0 if a BD or group of BDs can be allocated for
804 * transmission. If the BD or any of the BDs are not free the function
805 * returns a busy status.
807 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
810 struct axidma_bd *cur_p;
812 /* Ensure we see all descriptor updates from device or TX polling */
814 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
817 return NETDEV_TX_BUSY;
822 * axienet_dma_tx_cb - DMA engine callback for TX channel.
823 * @data: Pointer to the axienet_local structure.
824 * @result: error reporting through dmaengine_result.
825 * This function is called by dmaengine driver for TX channel to notify
826 * that the transmit is done.
828 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
830 struct skbuf_dma_descriptor *skbuf_dma;
831 struct axienet_local *lp = data;
832 struct netdev_queue *txq;
835 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
836 len = skbuf_dma->skb->len;
837 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
838 u64_stats_update_begin(&lp->tx_stat_sync);
839 u64_stats_add(&lp->tx_bytes, len);
840 u64_stats_add(&lp->tx_packets, 1);
841 u64_stats_update_end(&lp->tx_stat_sync);
842 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
843 dev_consume_skb_any(skbuf_dma->skb);
844 netif_txq_completed_wake(txq, 1, len,
845 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
850 * axienet_start_xmit_dmaengine - Starts the transmission.
851 * @skb: sk_buff pointer that contains data to be Txed.
852 * @ndev: Pointer to net_device structure.
854 * Return: NETDEV_TX_OK on success or any non space errors.
855 * NETDEV_TX_BUSY when free element in TX skb ring buffer
858 * This function is invoked to initiate transmission. The
859 * function sets the skbs, register dma callback API and submit
860 * the dma transaction.
861 * Additionally if checksum offloading is supported,
862 * it populates AXI Stream Control fields with appropriate values.
865 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
867 struct dma_async_tx_descriptor *dma_tx_desc = NULL;
868 struct axienet_local *lp = netdev_priv(ndev);
869 u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
870 struct skbuf_dma_descriptor *skbuf_dma;
871 struct dma_device *dma_dev;
872 struct netdev_queue *txq;
878 dma_dev = lp->tx_chan->device;
879 sg_len = skb_shinfo(skb)->nr_frags + 1;
880 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
881 netif_stop_queue(ndev);
883 netdev_warn(ndev, "TX ring unexpectedly full\n");
884 return NETDEV_TX_BUSY;
887 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
889 goto xmit_error_drop_skb;
892 sg_init_table(skbuf_dma->sgl, sg_len);
893 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
895 goto xmit_error_drop_skb;
897 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
899 goto xmit_error_drop_skb;
901 /* Fill up app fields for checksum */
902 if (skb->ip_summed == CHECKSUM_PARTIAL) {
903 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
904 /* Tx Full Checksum Offload Enabled */
905 app_metadata[0] |= 2;
906 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
907 csum_start_off = skb_transport_offset(skb);
908 csum_index_off = csum_start_off + skb->csum_offset;
909 /* Tx Partial Checksum Offload Enabled */
910 app_metadata[0] |= 1;
911 app_metadata[1] = (csum_start_off << 16) | csum_index_off;
913 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
914 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
917 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
918 sg_len, DMA_MEM_TO_DEV,
919 DMA_PREP_INTERRUPT, (void *)app_metadata);
921 goto xmit_error_unmap_sg;
923 skbuf_dma->skb = skb;
924 skbuf_dma->sg_len = sg_len;
925 dma_tx_desc->callback_param = lp;
926 dma_tx_desc->callback_result = axienet_dma_tx_cb;
927 txq = skb_get_tx_queue(lp->ndev, skb);
928 netdev_tx_sent_queue(txq, skb->len);
929 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
930 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
932 dmaengine_submit(dma_tx_desc);
933 dma_async_issue_pending(lp->tx_chan);
937 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
939 dev_kfree_skb_any(skb);
944 * axienet_tx_poll - Invoked once a transmit is completed by the
945 * Axi DMA Tx channel.
946 * @napi: Pointer to NAPI structure.
947 * @budget: Max number of TX packets to process.
949 * Return: Number of TX packets processed.
951 * This function is invoked from the NAPI processing to notify the completion
952 * of transmit operation. It clears fields in the corresponding Tx BDs and
953 * unmaps the corresponding buffer so that CPU can regain ownership of the
954 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
957 static int axienet_tx_poll(struct napi_struct *napi, int budget)
959 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
960 struct net_device *ndev = lp->ndev;
964 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
968 u64_stats_update_begin(&lp->tx_stat_sync);
969 u64_stats_add(&lp->tx_packets, packets);
970 u64_stats_add(&lp->tx_bytes, size);
971 u64_stats_update_end(&lp->tx_stat_sync);
973 /* Matches barrier in axienet_start_xmit */
976 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
977 netif_wake_queue(ndev);
980 if (packets < budget && napi_complete_done(napi, packets)) {
981 /* Re-enable TX completion interrupts. This should
982 * cause an immediate interrupt if any TX packets are
985 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
991 * axienet_start_xmit - Starts the transmission.
992 * @skb: sk_buff pointer that contains data to be Txed.
993 * @ndev: Pointer to net_device structure.
995 * Return: NETDEV_TX_OK, on success
996 * NETDEV_TX_BUSY, if any of the descriptors are not free
998 * This function is invoked from upper layers to initiate transmission. The
999 * function uses the next available free BDs and populates their fields to
1000 * start the transmission. Additionally if checksum offloading is supported,
1001 * it populates AXI Stream Control fields with appropriate values.
1004 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1011 dma_addr_t tail_p, phys;
1012 u32 orig_tail_ptr, new_tail_ptr;
1013 struct axienet_local *lp = netdev_priv(ndev);
1014 struct axidma_bd *cur_p;
1016 orig_tail_ptr = lp->tx_bd_tail;
1017 new_tail_ptr = orig_tail_ptr;
1019 num_frag = skb_shinfo(skb)->nr_frags;
1020 cur_p = &lp->tx_bd_v[orig_tail_ptr];
1022 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1023 /* Should not happen as last start_xmit call should have
1024 * checked for sufficient space and queue should only be
1025 * woken when sufficient space is available.
1027 netif_stop_queue(ndev);
1028 if (net_ratelimit())
1029 netdev_warn(ndev, "TX ring unexpectedly full\n");
1030 return NETDEV_TX_BUSY;
1033 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1034 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1035 /* Tx Full Checksum Offload Enabled */
1037 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1038 csum_start_off = skb_transport_offset(skb);
1039 csum_index_off = csum_start_off + skb->csum_offset;
1040 /* Tx Partial Checksum Offload Enabled */
1042 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1044 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1045 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1048 phys = dma_map_single(lp->dev, skb->data,
1049 skb_headlen(skb), DMA_TO_DEVICE);
1050 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1051 if (net_ratelimit())
1052 netdev_err(ndev, "TX DMA mapping error\n");
1053 ndev->stats.tx_dropped++;
1054 dev_kfree_skb_any(skb);
1055 return NETDEV_TX_OK;
1057 desc_set_phys_addr(lp, phys, cur_p);
1058 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1060 for (ii = 0; ii < num_frag; ii++) {
1061 if (++new_tail_ptr >= lp->tx_bd_num)
1063 cur_p = &lp->tx_bd_v[new_tail_ptr];
1064 frag = &skb_shinfo(skb)->frags[ii];
1065 phys = dma_map_single(lp->dev,
1066 skb_frag_address(frag),
1067 skb_frag_size(frag),
1069 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1070 if (net_ratelimit())
1071 netdev_err(ndev, "TX DMA mapping error\n");
1072 ndev->stats.tx_dropped++;
1073 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1075 dev_kfree_skb_any(skb);
1076 return NETDEV_TX_OK;
1078 desc_set_phys_addr(lp, phys, cur_p);
1079 cur_p->cntrl = skb_frag_size(frag);
1082 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1085 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1086 if (++new_tail_ptr >= lp->tx_bd_num)
1088 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1090 /* Start the transfer */
1091 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1093 /* Stop queue if next transmit may not have space */
1094 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1095 netif_stop_queue(ndev);
1097 /* Matches barrier in axienet_tx_poll */
1100 /* Space might have just been freed - check again */
1101 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1102 netif_wake_queue(ndev);
1105 return NETDEV_TX_OK;
1109 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1110 * @data: Pointer to the skbuf_dma_descriptor structure.
1111 * @result: error reporting through dmaengine_result.
1112 * This function is called by dmaengine driver for RX channel to notify
1113 * that the packet is received.
1115 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1117 struct skbuf_dma_descriptor *skbuf_dma;
1118 size_t meta_len, meta_max_len, rx_len;
1119 struct axienet_local *lp = data;
1120 struct sk_buff *skb;
1123 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1124 skb = skbuf_dma->skb;
1125 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1127 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1129 /* TODO: Derive app word index programmatically */
1130 rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1131 skb_put(skb, rx_len);
1132 skb->protocol = eth_type_trans(skb, lp->ndev);
1133 skb->ip_summed = CHECKSUM_NONE;
1136 u64_stats_update_begin(&lp->rx_stat_sync);
1137 u64_stats_add(&lp->rx_packets, 1);
1138 u64_stats_add(&lp->rx_bytes, rx_len);
1139 u64_stats_update_end(&lp->rx_stat_sync);
1140 axienet_rx_submit_desc(lp->ndev);
1141 dma_async_issue_pending(lp->rx_chan);
1145 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1146 * @napi: Pointer to NAPI structure.
1147 * @budget: Max number of RX packets to process.
1149 * Return: Number of RX packets processed.
1151 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1157 dma_addr_t tail_p = 0;
1158 struct axidma_bd *cur_p;
1159 struct sk_buff *skb, *new_skb;
1160 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1162 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1164 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1167 /* Ensure we see complete descriptor update */
1173 /* skb could be NULL if a previous pass already received the
1174 * packet for this slot in the ring, but failed to refill it
1175 * with a newly allocated buffer. In this case, don't try to
1179 length = cur_p->app4 & 0x0000FFFF;
1181 phys = desc_get_phys_addr(lp, cur_p);
1182 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1185 skb_put(skb, length);
1186 skb->protocol = eth_type_trans(skb, lp->ndev);
1187 /*skb_checksum_none_assert(skb);*/
1188 skb->ip_summed = CHECKSUM_NONE;
1190 /* if we're doing Rx csum offload, set it up */
1191 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1192 csumstatus = (cur_p->app2 &
1193 XAE_FULL_CSUM_STATUS_MASK) >> 3;
1194 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1195 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1196 skb->ip_summed = CHECKSUM_UNNECESSARY;
1198 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1199 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1200 skb->ip_summed = CHECKSUM_COMPLETE;
1203 napi_gro_receive(napi, skb);
1209 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1213 phys = dma_map_single(lp->dev, new_skb->data,
1216 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1217 if (net_ratelimit())
1218 netdev_err(lp->ndev, "RX DMA mapping error\n");
1219 dev_kfree_skb(new_skb);
1222 desc_set_phys_addr(lp, phys, cur_p);
1224 cur_p->cntrl = lp->max_frm_size;
1226 cur_p->skb = new_skb;
1228 /* Only update tail_p to mark this slot as usable after it has
1229 * been successfully refilled.
1231 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1233 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1235 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1238 u64_stats_update_begin(&lp->rx_stat_sync);
1239 u64_stats_add(&lp->rx_packets, packets);
1240 u64_stats_add(&lp->rx_bytes, size);
1241 u64_stats_update_end(&lp->rx_stat_sync);
1244 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1246 if (packets < budget && napi_complete_done(napi, packets)) {
1247 /* Re-enable RX completion interrupts. This should
1248 * cause an immediate interrupt if any RX packets are
1251 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1257 * axienet_tx_irq - Tx Done Isr.
1259 * @_ndev: net_device pointer
1261 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1263 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1266 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1268 unsigned int status;
1269 struct net_device *ndev = _ndev;
1270 struct axienet_local *lp = netdev_priv(ndev);
1272 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1274 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1277 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1279 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1280 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1281 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1282 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1283 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1284 schedule_work(&lp->dma_err_task);
1286 /* Disable further TX completion interrupts and schedule
1287 * NAPI to handle the completions.
1289 u32 cr = lp->tx_dma_cr;
1291 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1292 if (napi_schedule_prep(&lp->napi_tx)) {
1293 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1294 __napi_schedule(&lp->napi_tx);
1302 * axienet_rx_irq - Rx Isr.
1304 * @_ndev: net_device pointer
1306 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1308 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1311 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1313 unsigned int status;
1314 struct net_device *ndev = _ndev;
1315 struct axienet_local *lp = netdev_priv(ndev);
1317 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1319 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1322 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1324 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1325 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1326 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1327 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1328 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1329 schedule_work(&lp->dma_err_task);
1331 /* Disable further RX completion interrupts and schedule
1334 u32 cr = lp->rx_dma_cr;
1336 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1337 if (napi_schedule_prep(&lp->napi_rx)) {
1338 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1339 __napi_schedule(&lp->napi_rx);
1347 * axienet_eth_irq - Ethernet core Isr.
1349 * @_ndev: net_device pointer
1351 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1353 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1355 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1357 struct net_device *ndev = _ndev;
1358 struct axienet_local *lp = netdev_priv(ndev);
1359 unsigned int pending;
1361 pending = axienet_ior(lp, XAE_IP_OFFSET);
1365 if (pending & XAE_INT_RXFIFOOVR_MASK)
1366 ndev->stats.rx_missed_errors++;
1368 if (pending & XAE_INT_RXRJECT_MASK)
1369 ndev->stats.rx_dropped++;
1371 axienet_iow(lp, XAE_IS_OFFSET, pending);
1375 static void axienet_dma_err_handler(struct work_struct *work);
1378 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1379 * allocate skbuff, map the scatterlist and obtain a descriptor
1380 * and then add the callback information and submit descriptor.
1382 * @ndev: net_device pointer
1385 static void axienet_rx_submit_desc(struct net_device *ndev)
1387 struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1388 struct axienet_local *lp = netdev_priv(ndev);
1389 struct skbuf_dma_descriptor *skbuf_dma;
1390 struct sk_buff *skb;
1393 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1398 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1402 sg_init_table(skbuf_dma->sgl, 1);
1403 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1404 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1405 if (net_ratelimit())
1406 netdev_err(ndev, "DMA mapping error\n");
1407 goto rx_submit_err_free_skb;
1409 sg_dma_address(skbuf_dma->sgl) = addr;
1410 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1411 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1413 DMA_PREP_INTERRUPT);
1415 goto rx_submit_err_unmap_skb;
1417 skbuf_dma->skb = skb;
1418 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1419 skbuf_dma->desc = dma_rx_desc;
1420 dma_rx_desc->callback_param = lp;
1421 dma_rx_desc->callback_result = axienet_dma_rx_cb;
1422 dmaengine_submit(dma_rx_desc);
1426 rx_submit_err_unmap_skb:
1427 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1428 rx_submit_err_free_skb:
1433 * axienet_init_dmaengine - init the dmaengine code.
1434 * @ndev: Pointer to net_device structure
1436 * Return: 0, on success.
1437 * non-zero error value on failure
1439 * This is the dmaengine initialization code.
1441 static int axienet_init_dmaengine(struct net_device *ndev)
1443 struct axienet_local *lp = netdev_priv(ndev);
1444 struct skbuf_dma_descriptor *skbuf_dma;
1447 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1448 if (IS_ERR(lp->tx_chan)) {
1449 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1450 return PTR_ERR(lp->tx_chan);
1453 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1454 if (IS_ERR(lp->rx_chan)) {
1455 ret = PTR_ERR(lp->rx_chan);
1456 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1457 goto err_dma_release_tx;
1460 lp->tx_ring_tail = 0;
1461 lp->tx_ring_head = 0;
1462 lp->rx_ring_tail = 0;
1463 lp->rx_ring_head = 0;
1464 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1466 if (!lp->tx_skb_ring) {
1468 goto err_dma_release_rx;
1470 for (i = 0; i < TX_BD_NUM_MAX; i++) {
1471 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1474 goto err_free_tx_skb_ring;
1476 lp->tx_skb_ring[i] = skbuf_dma;
1479 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1481 if (!lp->rx_skb_ring) {
1483 goto err_free_tx_skb_ring;
1485 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1486 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1489 goto err_free_rx_skb_ring;
1491 lp->rx_skb_ring[i] = skbuf_dma;
1493 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1494 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1495 axienet_rx_submit_desc(ndev);
1496 dma_async_issue_pending(lp->rx_chan);
1500 err_free_rx_skb_ring:
1501 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1502 kfree(lp->rx_skb_ring[i]);
1503 kfree(lp->rx_skb_ring);
1504 err_free_tx_skb_ring:
1505 for (i = 0; i < TX_BD_NUM_MAX; i++)
1506 kfree(lp->tx_skb_ring[i]);
1507 kfree(lp->tx_skb_ring);
1509 dma_release_channel(lp->rx_chan);
1511 dma_release_channel(lp->tx_chan);
1516 * axienet_init_legacy_dma - init the dma legacy code.
1517 * @ndev: Pointer to net_device structure
1519 * Return: 0, on success.
1520 * non-zero error value on failure
1522 * This is the dma initialization code. It also allocates interrupt
1523 * service routines, enables the interrupt lines and ISR handling.
1526 static int axienet_init_legacy_dma(struct net_device *ndev)
1529 struct axienet_local *lp = netdev_priv(ndev);
1531 /* Enable worker thread for Axi DMA error handling */
1532 lp->stopping = false;
1533 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1535 napi_enable(&lp->napi_rx);
1536 napi_enable(&lp->napi_tx);
1538 /* Enable interrupts for Axi DMA Tx */
1539 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1543 /* Enable interrupts for Axi DMA Rx */
1544 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1548 /* Enable interrupts for Axi Ethernet core (if defined) */
1549 if (lp->eth_irq > 0) {
1550 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1559 free_irq(lp->rx_irq, ndev);
1561 free_irq(lp->tx_irq, ndev);
1563 napi_disable(&lp->napi_tx);
1564 napi_disable(&lp->napi_rx);
1565 cancel_work_sync(&lp->dma_err_task);
1566 dev_err(lp->dev, "request_irq() failed\n");
1571 * axienet_open - Driver open routine.
1572 * @ndev: Pointer to net_device structure
1574 * Return: 0, on success.
1575 * non-zero error value on failure
1577 * This is the driver open routine. It calls phylink_start to start the
1579 * It also allocates interrupt service routines, enables the interrupt lines
1580 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1581 * descriptors are initialized.
1583 static int axienet_open(struct net_device *ndev)
1586 struct axienet_local *lp = netdev_priv(ndev);
1588 /* When we do an Axi Ethernet reset, it resets the complete core
1589 * including the MDIO. MDIO must be disabled before resetting.
1590 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1592 axienet_lock_mii(lp);
1593 ret = axienet_device_reset(ndev);
1594 axienet_unlock_mii(lp);
1596 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1598 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1602 phylink_start(lp->phylink);
1604 /* Start the statistics refresh work */
1605 schedule_delayed_work(&lp->stats_work, 0);
1607 if (lp->use_dmaengine) {
1608 /* Enable interrupts for Axi Ethernet core (if defined) */
1609 if (lp->eth_irq > 0) {
1610 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1616 ret = axienet_init_dmaengine(ndev);
1618 goto err_free_eth_irq;
1620 ret = axienet_init_legacy_dma(ndev);
1628 if (lp->eth_irq > 0)
1629 free_irq(lp->eth_irq, ndev);
1631 cancel_delayed_work_sync(&lp->stats_work);
1632 phylink_stop(lp->phylink);
1633 phylink_disconnect_phy(lp->phylink);
1638 * axienet_stop - Driver stop routine.
1639 * @ndev: Pointer to net_device structure
1641 * Return: 0, on success.
1643 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1644 * device. It also removes the interrupt handlers and disables the interrupts.
1645 * The Axi DMA Tx/Rx BDs are released.
1647 static int axienet_stop(struct net_device *ndev)
1649 struct axienet_local *lp = netdev_priv(ndev);
1652 if (!lp->use_dmaengine) {
1653 WRITE_ONCE(lp->stopping, true);
1654 flush_work(&lp->dma_err_task);
1656 napi_disable(&lp->napi_tx);
1657 napi_disable(&lp->napi_rx);
1660 cancel_delayed_work_sync(&lp->stats_work);
1662 phylink_stop(lp->phylink);
1663 phylink_disconnect_phy(lp->phylink);
1665 axienet_setoptions(ndev, lp->options &
1666 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1668 if (!lp->use_dmaengine) {
1669 axienet_dma_stop(lp);
1670 cancel_work_sync(&lp->dma_err_task);
1671 free_irq(lp->tx_irq, ndev);
1672 free_irq(lp->rx_irq, ndev);
1673 axienet_dma_bd_release(ndev);
1675 dmaengine_terminate_sync(lp->tx_chan);
1676 dmaengine_synchronize(lp->tx_chan);
1677 dmaengine_terminate_sync(lp->rx_chan);
1678 dmaengine_synchronize(lp->rx_chan);
1680 for (i = 0; i < TX_BD_NUM_MAX; i++)
1681 kfree(lp->tx_skb_ring[i]);
1682 kfree(lp->tx_skb_ring);
1683 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1684 kfree(lp->rx_skb_ring[i]);
1685 kfree(lp->rx_skb_ring);
1687 dma_release_channel(lp->rx_chan);
1688 dma_release_channel(lp->tx_chan);
1691 axienet_iow(lp, XAE_IE_OFFSET, 0);
1693 if (lp->eth_irq > 0)
1694 free_irq(lp->eth_irq, ndev);
1699 * axienet_change_mtu - Driver change mtu routine.
1700 * @ndev: Pointer to net_device structure
1701 * @new_mtu: New mtu value to be applied
1703 * Return: Always returns 0 (success).
1705 * This is the change mtu driver routine. It checks if the Axi Ethernet
1706 * hardware supports jumbo frames before changing the mtu. This can be
1707 * called only when the device is not up.
1709 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1711 struct axienet_local *lp = netdev_priv(ndev);
1713 if (netif_running(ndev))
1716 if ((new_mtu + VLAN_ETH_HLEN +
1717 XAE_TRL_SIZE) > lp->rxmem)
1720 WRITE_ONCE(ndev->mtu, new_mtu);
1725 #ifdef CONFIG_NET_POLL_CONTROLLER
1727 * axienet_poll_controller - Axi Ethernet poll mechanism.
1728 * @ndev: Pointer to net_device structure
1730 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1731 * to polling the ISRs and are enabled back after the polling is done.
1733 static void axienet_poll_controller(struct net_device *ndev)
1735 struct axienet_local *lp = netdev_priv(ndev);
1737 disable_irq(lp->tx_irq);
1738 disable_irq(lp->rx_irq);
1739 axienet_rx_irq(lp->tx_irq, ndev);
1740 axienet_tx_irq(lp->rx_irq, ndev);
1741 enable_irq(lp->tx_irq);
1742 enable_irq(lp->rx_irq);
1746 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1748 struct axienet_local *lp = netdev_priv(dev);
1750 if (!netif_running(dev))
1753 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1757 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1759 struct axienet_local *lp = netdev_priv(dev);
1762 netdev_stats_to_stats64(stats, &dev->stats);
1765 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1766 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1767 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1768 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1771 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1772 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1773 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1774 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1776 if (!(lp->features & XAE_FEATURE_STATS))
1780 start = read_seqcount_begin(&lp->hw_stats_seqcount);
1781 stats->rx_length_errors =
1782 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1783 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1784 stats->rx_frame_errors =
1785 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1786 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1787 axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1788 stats->rx_length_errors +
1789 stats->rx_crc_errors +
1790 stats->rx_frame_errors;
1791 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1793 stats->tx_aborted_errors =
1794 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1795 stats->tx_fifo_errors =
1796 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1797 stats->tx_window_errors =
1798 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1799 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1800 stats->tx_aborted_errors +
1801 stats->tx_fifo_errors +
1802 stats->tx_window_errors;
1803 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1806 static const struct net_device_ops axienet_netdev_ops = {
1807 .ndo_open = axienet_open,
1808 .ndo_stop = axienet_stop,
1809 .ndo_start_xmit = axienet_start_xmit,
1810 .ndo_get_stats64 = axienet_get_stats64,
1811 .ndo_change_mtu = axienet_change_mtu,
1812 .ndo_set_mac_address = netdev_set_mac_address,
1813 .ndo_validate_addr = eth_validate_addr,
1814 .ndo_eth_ioctl = axienet_ioctl,
1815 .ndo_set_rx_mode = axienet_set_multicast_list,
1816 #ifdef CONFIG_NET_POLL_CONTROLLER
1817 .ndo_poll_controller = axienet_poll_controller,
1821 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1822 .ndo_open = axienet_open,
1823 .ndo_stop = axienet_stop,
1824 .ndo_start_xmit = axienet_start_xmit_dmaengine,
1825 .ndo_get_stats64 = axienet_get_stats64,
1826 .ndo_change_mtu = axienet_change_mtu,
1827 .ndo_set_mac_address = netdev_set_mac_address,
1828 .ndo_validate_addr = eth_validate_addr,
1829 .ndo_eth_ioctl = axienet_ioctl,
1830 .ndo_set_rx_mode = axienet_set_multicast_list,
1834 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1835 * @ndev: Pointer to net_device structure
1836 * @ed: Pointer to ethtool_drvinfo structure
1838 * This implements ethtool command for getting the driver information.
1839 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1841 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1842 struct ethtool_drvinfo *ed)
1844 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1845 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1849 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1851 * @ndev: Pointer to net_device structure
1853 * This implements ethtool command for getting the total register length
1856 * Return: the total regs length
1858 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1860 return sizeof(u32) * AXIENET_REGS_N;
1864 * axienet_ethtools_get_regs - Dump the contents of all registers present
1865 * in AxiEthernet core.
1866 * @ndev: Pointer to net_device structure
1867 * @regs: Pointer to ethtool_regs structure
1868 * @ret: Void pointer used to return the contents of the registers.
1870 * This implements ethtool command for getting the Axi Ethernet register dump.
1871 * Issue "ethtool -d ethX" to execute this function.
1873 static void axienet_ethtools_get_regs(struct net_device *ndev,
1874 struct ethtool_regs *regs, void *ret)
1876 u32 *data = (u32 *)ret;
1877 size_t len = sizeof(u32) * AXIENET_REGS_N;
1878 struct axienet_local *lp = netdev_priv(ndev);
1883 memset(data, 0, len);
1884 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1885 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1886 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1887 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1888 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1889 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1890 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1891 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1892 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1893 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1894 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1895 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1896 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1897 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1898 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1899 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1900 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1901 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1902 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1903 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1904 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1905 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1906 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1907 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1908 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1909 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1910 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1911 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1912 if (!lp->use_dmaengine) {
1913 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1914 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1915 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1916 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1917 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1918 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1919 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1920 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1925 axienet_ethtools_get_ringparam(struct net_device *ndev,
1926 struct ethtool_ringparam *ering,
1927 struct kernel_ethtool_ringparam *kernel_ering,
1928 struct netlink_ext_ack *extack)
1930 struct axienet_local *lp = netdev_priv(ndev);
1932 ering->rx_max_pending = RX_BD_NUM_MAX;
1933 ering->rx_mini_max_pending = 0;
1934 ering->rx_jumbo_max_pending = 0;
1935 ering->tx_max_pending = TX_BD_NUM_MAX;
1936 ering->rx_pending = lp->rx_bd_num;
1937 ering->rx_mini_pending = 0;
1938 ering->rx_jumbo_pending = 0;
1939 ering->tx_pending = lp->tx_bd_num;
1943 axienet_ethtools_set_ringparam(struct net_device *ndev,
1944 struct ethtool_ringparam *ering,
1945 struct kernel_ethtool_ringparam *kernel_ering,
1946 struct netlink_ext_ack *extack)
1948 struct axienet_local *lp = netdev_priv(ndev);
1950 if (ering->rx_pending > RX_BD_NUM_MAX ||
1951 ering->rx_mini_pending ||
1952 ering->rx_jumbo_pending ||
1953 ering->tx_pending < TX_BD_NUM_MIN ||
1954 ering->tx_pending > TX_BD_NUM_MAX)
1957 if (netif_running(ndev))
1960 lp->rx_bd_num = ering->rx_pending;
1961 lp->tx_bd_num = ering->tx_pending;
1966 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1968 * @ndev: Pointer to net_device structure
1969 * @epauseparm: Pointer to ethtool_pauseparam structure.
1971 * This implements ethtool command for getting axi ethernet pause frame
1972 * setting. Issue "ethtool -a ethX" to execute this function.
1975 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1976 struct ethtool_pauseparam *epauseparm)
1978 struct axienet_local *lp = netdev_priv(ndev);
1980 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1984 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1986 * @ndev: Pointer to net_device structure
1987 * @epauseparm:Pointer to ethtool_pauseparam structure
1989 * This implements ethtool command for enabling flow control on Rx and Tx
1990 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1993 * Return: 0 on success, -EFAULT if device is running
1996 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1997 struct ethtool_pauseparam *epauseparm)
1999 struct axienet_local *lp = netdev_priv(ndev);
2001 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2005 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2006 * @ndev: Pointer to net_device structure
2007 * @ecoalesce: Pointer to ethtool_coalesce structure
2008 * @kernel_coal: ethtool CQE mode setting structure
2009 * @extack: extack for reporting error messages
2011 * This implements ethtool command for getting the DMA interrupt coalescing
2012 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2013 * execute this function.
2018 axienet_ethtools_get_coalesce(struct net_device *ndev,
2019 struct ethtool_coalesce *ecoalesce,
2020 struct kernel_ethtool_coalesce *kernel_coal,
2021 struct netlink_ext_ack *extack)
2023 struct axienet_local *lp = netdev_priv(ndev);
2025 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2026 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2027 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2028 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2033 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2034 * @ndev: Pointer to net_device structure
2035 * @ecoalesce: Pointer to ethtool_coalesce structure
2036 * @kernel_coal: ethtool CQE mode setting structure
2037 * @extack: extack for reporting error messages
2039 * This implements ethtool command for setting the DMA interrupt coalescing
2040 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2041 * prompt to execute this function.
2043 * Return: 0, on success, Non-zero error value on failure.
2046 axienet_ethtools_set_coalesce(struct net_device *ndev,
2047 struct ethtool_coalesce *ecoalesce,
2048 struct kernel_ethtool_coalesce *kernel_coal,
2049 struct netlink_ext_ack *extack)
2051 struct axienet_local *lp = netdev_priv(ndev);
2053 if (netif_running(ndev)) {
2054 NL_SET_ERR_MSG(extack,
2055 "Please stop netif before applying configuration");
2059 if (ecoalesce->rx_max_coalesced_frames)
2060 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2061 if (ecoalesce->rx_coalesce_usecs)
2062 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2063 if (ecoalesce->tx_max_coalesced_frames)
2064 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2065 if (ecoalesce->tx_coalesce_usecs)
2066 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2072 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2073 struct ethtool_link_ksettings *cmd)
2075 struct axienet_local *lp = netdev_priv(ndev);
2077 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2081 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2082 const struct ethtool_link_ksettings *cmd)
2084 struct axienet_local *lp = netdev_priv(ndev);
2086 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2089 static int axienet_ethtools_nway_reset(struct net_device *dev)
2091 struct axienet_local *lp = netdev_priv(dev);
2093 return phylink_ethtool_nway_reset(lp->phylink);
2096 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2097 struct ethtool_stats *stats,
2100 struct axienet_local *lp = netdev_priv(dev);
2104 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2105 data[0] = axienet_stat(lp, STAT_RX_BYTES);
2106 data[1] = axienet_stat(lp, STAT_TX_BYTES);
2107 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2108 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2109 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2110 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2111 data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2112 data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2113 data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2114 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2117 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2119 "Transmitted bytes",
2120 "RX Good VLAN Tagged Frames",
2121 "TX Good VLAN Tagged Frames",
2122 "TX Good PFC Frames",
2123 "RX Good PFC Frames",
2124 "User Defined Counter 0",
2125 "User Defined Counter 1",
2126 "User Defined Counter 2",
2129 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2131 switch (stringset) {
2133 memcpy(data, axienet_ethtool_stats_strings,
2134 sizeof(axienet_ethtool_stats_strings));
2139 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2141 struct axienet_local *lp = netdev_priv(dev);
2145 if (lp->features & XAE_FEATURE_STATS)
2146 return ARRAY_SIZE(axienet_ethtool_stats_strings);
2154 axienet_ethtools_get_pause_stats(struct net_device *dev,
2155 struct ethtool_pause_stats *pause_stats)
2157 struct axienet_local *lp = netdev_priv(dev);
2160 if (!(lp->features & XAE_FEATURE_STATS))
2164 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2165 pause_stats->tx_pause_frames =
2166 axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2167 pause_stats->rx_pause_frames =
2168 axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2169 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2173 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2174 struct ethtool_eth_mac_stats *mac_stats)
2176 struct axienet_local *lp = netdev_priv(dev);
2179 if (!(lp->features & XAE_FEATURE_STATS))
2183 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2184 mac_stats->FramesTransmittedOK =
2185 axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2186 mac_stats->SingleCollisionFrames =
2187 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2188 mac_stats->MultipleCollisionFrames =
2189 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2190 mac_stats->FramesReceivedOK =
2191 axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2192 mac_stats->FrameCheckSequenceErrors =
2193 axienet_stat(lp, STAT_RX_FCS_ERRORS);
2194 mac_stats->AlignmentErrors =
2195 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2196 mac_stats->FramesWithDeferredXmissions =
2197 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2198 mac_stats->LateCollisions =
2199 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2200 mac_stats->FramesAbortedDueToXSColls =
2201 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2202 mac_stats->MulticastFramesXmittedOK =
2203 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2204 mac_stats->BroadcastFramesXmittedOK =
2205 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2206 mac_stats->FramesWithExcessiveDeferral =
2207 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2208 mac_stats->MulticastFramesReceivedOK =
2209 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2210 mac_stats->BroadcastFramesReceivedOK =
2211 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2212 mac_stats->InRangeLengthErrors =
2213 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2214 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2218 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2219 struct ethtool_eth_ctrl_stats *ctrl_stats)
2221 struct axienet_local *lp = netdev_priv(dev);
2224 if (!(lp->features & XAE_FEATURE_STATS))
2228 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2229 ctrl_stats->MACControlFramesTransmitted =
2230 axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2231 ctrl_stats->MACControlFramesReceived =
2232 axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2233 ctrl_stats->UnsupportedOpcodesReceived =
2234 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2235 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2238 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2250 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2251 struct ethtool_rmon_stats *rmon_stats,
2252 const struct ethtool_rmon_hist_range **ranges)
2254 struct axienet_local *lp = netdev_priv(dev);
2257 if (!(lp->features & XAE_FEATURE_STATS))
2261 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2262 rmon_stats->undersize_pkts =
2263 axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2264 rmon_stats->oversize_pkts =
2265 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2266 rmon_stats->fragments =
2267 axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2269 rmon_stats->hist[0] =
2270 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2271 rmon_stats->hist[1] =
2272 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2273 rmon_stats->hist[2] =
2274 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2275 rmon_stats->hist[3] =
2276 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2277 rmon_stats->hist[4] =
2278 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2279 rmon_stats->hist[5] =
2280 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2281 rmon_stats->hist[6] =
2282 rmon_stats->oversize_pkts;
2284 rmon_stats->hist_tx[0] =
2285 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2286 rmon_stats->hist_tx[1] =
2287 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2288 rmon_stats->hist_tx[2] =
2289 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2290 rmon_stats->hist_tx[3] =
2291 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2292 rmon_stats->hist_tx[4] =
2293 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2294 rmon_stats->hist_tx[5] =
2295 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2296 rmon_stats->hist_tx[6] =
2297 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2298 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2300 *ranges = axienet_rmon_ranges;
2303 static const struct ethtool_ops axienet_ethtool_ops = {
2304 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2305 ETHTOOL_COALESCE_USECS,
2306 .get_drvinfo = axienet_ethtools_get_drvinfo,
2307 .get_regs_len = axienet_ethtools_get_regs_len,
2308 .get_regs = axienet_ethtools_get_regs,
2309 .get_link = ethtool_op_get_link,
2310 .get_ringparam = axienet_ethtools_get_ringparam,
2311 .set_ringparam = axienet_ethtools_set_ringparam,
2312 .get_pauseparam = axienet_ethtools_get_pauseparam,
2313 .set_pauseparam = axienet_ethtools_set_pauseparam,
2314 .get_coalesce = axienet_ethtools_get_coalesce,
2315 .set_coalesce = axienet_ethtools_set_coalesce,
2316 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
2317 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
2318 .nway_reset = axienet_ethtools_nway_reset,
2319 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2320 .get_strings = axienet_ethtools_get_strings,
2321 .get_sset_count = axienet_ethtools_get_sset_count,
2322 .get_pause_stats = axienet_ethtools_get_pause_stats,
2323 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2324 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2325 .get_rmon_stats = axienet_ethtool_get_rmon_stats,
2328 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2330 return container_of(pcs, struct axienet_local, pcs);
2333 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2334 struct phylink_link_state *state)
2336 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2338 phylink_mii_c22_pcs_get_state(pcs_phy, state);
2341 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2343 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2345 phylink_mii_c22_pcs_an_restart(pcs_phy);
2348 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2349 phy_interface_t interface,
2350 const unsigned long *advertising,
2351 bool permit_pause_to_mac)
2353 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2354 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2355 struct axienet_local *lp = netdev_priv(ndev);
2358 if (lp->switch_x_sgmii) {
2359 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2360 interface == PHY_INTERFACE_MODE_SGMII ?
2361 XLNX_MII_STD_SELECT_SGMII : 0);
2364 "Failed to switch PHY interface: %d\n",
2370 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2373 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2378 static const struct phylink_pcs_ops axienet_pcs_ops = {
2379 .pcs_get_state = axienet_pcs_get_state,
2380 .pcs_config = axienet_pcs_config,
2381 .pcs_an_restart = axienet_pcs_an_restart,
2384 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2385 phy_interface_t interface)
2387 struct net_device *ndev = to_net_dev(config->dev);
2388 struct axienet_local *lp = netdev_priv(ndev);
2390 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2391 interface == PHY_INTERFACE_MODE_SGMII)
2397 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2398 const struct phylink_link_state *state)
2400 /* nothing meaningful to do */
2403 static void axienet_mac_link_down(struct phylink_config *config,
2405 phy_interface_t interface)
2407 /* nothing meaningful to do */
2410 static void axienet_mac_link_up(struct phylink_config *config,
2411 struct phy_device *phy,
2412 unsigned int mode, phy_interface_t interface,
2413 int speed, int duplex,
2414 bool tx_pause, bool rx_pause)
2416 struct net_device *ndev = to_net_dev(config->dev);
2417 struct axienet_local *lp = netdev_priv(ndev);
2418 u32 emmc_reg, fcc_reg;
2420 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2421 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2425 emmc_reg |= XAE_EMMC_LINKSPD_1000;
2428 emmc_reg |= XAE_EMMC_LINKSPD_100;
2431 emmc_reg |= XAE_EMMC_LINKSPD_10;
2435 "Speed other than 10, 100 or 1Gbps is not supported\n");
2439 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2441 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2443 fcc_reg |= XAE_FCC_FCTX_MASK;
2445 fcc_reg &= ~XAE_FCC_FCTX_MASK;
2447 fcc_reg |= XAE_FCC_FCRX_MASK;
2449 fcc_reg &= ~XAE_FCC_FCRX_MASK;
2450 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2453 static const struct phylink_mac_ops axienet_phylink_ops = {
2454 .mac_select_pcs = axienet_mac_select_pcs,
2455 .mac_config = axienet_mac_config,
2456 .mac_link_down = axienet_mac_link_down,
2457 .mac_link_up = axienet_mac_link_up,
2461 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2462 * @work: pointer to work_struct
2464 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2467 static void axienet_dma_err_handler(struct work_struct *work)
2471 struct axidma_bd *cur_p;
2472 struct axienet_local *lp = container_of(work, struct axienet_local,
2474 struct net_device *ndev = lp->ndev;
2476 /* Don't bother if we are going to stop anyway */
2477 if (READ_ONCE(lp->stopping))
2480 napi_disable(&lp->napi_tx);
2481 napi_disable(&lp->napi_rx);
2483 axienet_setoptions(ndev, lp->options &
2484 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2486 axienet_dma_stop(lp);
2488 for (i = 0; i < lp->tx_bd_num; i++) {
2489 cur_p = &lp->tx_bd_v[i];
2491 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2493 dma_unmap_single(lp->dev, addr,
2495 XAXIDMA_BD_CTRL_LENGTH_MASK),
2499 dev_kfree_skb_irq(cur_p->skb);
2501 cur_p->phys_msb = 0;
2512 for (i = 0; i < lp->rx_bd_num; i++) {
2513 cur_p = &lp->rx_bd_v[i];
2526 axienet_dma_start(lp);
2528 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2529 axienet_status &= ~XAE_RCW1_RX_MASK;
2530 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2532 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2533 if (axienet_status & XAE_INT_RXRJECT_MASK)
2534 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2535 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2536 XAE_INT_RECV_ERROR_MASK : 0);
2537 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2539 /* Sync default options with HW but leave receiver and
2540 * transmitter disabled.
2542 axienet_setoptions(ndev, lp->options &
2543 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2544 axienet_set_mac_address(ndev, NULL);
2545 axienet_set_multicast_list(ndev);
2546 napi_enable(&lp->napi_rx);
2547 napi_enable(&lp->napi_tx);
2548 axienet_setoptions(ndev, lp->options);
2552 * axienet_probe - Axi Ethernet probe function.
2553 * @pdev: Pointer to platform device structure.
2555 * Return: 0, on success
2556 * Non-zero error value on failure.
2558 * This is the probe routine for Axi Ethernet driver. This is called before
2559 * any other driver routines are invoked. It allocates and sets up the Ethernet
2560 * device. Parses through device tree and populates fields of
2561 * axienet_local. It registers the Ethernet device.
2563 static int axienet_probe(struct platform_device *pdev)
2566 struct device_node *np;
2567 struct axienet_local *lp;
2568 struct net_device *ndev;
2569 struct resource *ethres;
2570 u8 mac_addr[ETH_ALEN];
2571 int addr_width = 32;
2574 ndev = alloc_etherdev(sizeof(*lp));
2578 platform_set_drvdata(pdev, ndev);
2580 SET_NETDEV_DEV(ndev, &pdev->dev);
2581 ndev->features = NETIF_F_SG;
2582 ndev->ethtool_ops = &axienet_ethtool_ops;
2584 /* MTU range: 64 - 9000 */
2586 ndev->max_mtu = XAE_JUMBO_MTU;
2588 lp = netdev_priv(ndev);
2590 lp->dev = &pdev->dev;
2591 lp->options = XAE_OPTION_DEFAULTS;
2592 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2593 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2595 u64_stats_init(&lp->rx_stat_sync);
2596 u64_stats_init(&lp->tx_stat_sync);
2598 mutex_init(&lp->stats_lock);
2599 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2600 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2602 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2604 /* For backward compatibility, if named AXI clock is not present,
2605 * treat the first clock specified as the AXI clock.
2607 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2609 if (IS_ERR(lp->axi_clk)) {
2610 ret = PTR_ERR(lp->axi_clk);
2613 ret = clk_prepare_enable(lp->axi_clk);
2615 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2619 lp->misc_clks[0].id = "axis_clk";
2620 lp->misc_clks[1].id = "ref_clk";
2621 lp->misc_clks[2].id = "mgt_clk";
2623 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2627 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2631 /* Map device registers */
2632 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres);
2633 if (IS_ERR(lp->regs)) {
2634 ret = PTR_ERR(lp->regs);
2637 lp->regs_start = ethres->start;
2639 /* Setup checksum offload, but default to off if not specified */
2642 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2643 lp->features |= XAE_FEATURE_STATS;
2645 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2649 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2650 /* Can checksum any contiguous range */
2651 ndev->features |= NETIF_F_HW_CSUM;
2654 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2655 /* Can checksum TCP/UDP over IPv4. */
2656 ndev->features |= NETIF_F_IP_CSUM;
2660 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2664 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2665 ndev->features |= NETIF_F_RXCSUM;
2668 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2669 ndev->features |= NETIF_F_RXCSUM;
2673 /* For supporting jumbo frames, the Axi Ethernet hardware must have
2674 * a larger Rx/Tx Memory. Typically, the size must be large so that
2675 * we can enable jumbo option and start supporting jumbo frames.
2676 * Here we check for memory allocated for Rx/Tx in the hardware from
2677 * the device-tree and accordingly set flags.
2679 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2681 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2682 "xlnx,switch-x-sgmii");
2684 /* Start with the proprietary, and broken phy_type */
2685 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2687 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2689 case XAE_PHY_TYPE_MII:
2690 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2692 case XAE_PHY_TYPE_GMII:
2693 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2695 case XAE_PHY_TYPE_RGMII_2_0:
2696 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2698 case XAE_PHY_TYPE_SGMII:
2699 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2701 case XAE_PHY_TYPE_1000BASE_X:
2702 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2709 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2713 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2714 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2715 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2720 if (!of_property_present(pdev->dev.of_node, "dmas")) {
2721 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2722 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2725 struct resource dmares;
2727 ret = of_address_to_resource(np, 0, &dmares);
2730 "unable to get DMA resource\n");
2734 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2736 lp->rx_irq = irq_of_parse_and_map(np, 1);
2737 lp->tx_irq = irq_of_parse_and_map(np, 0);
2739 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2741 /* Check for these resources directly on the Ethernet node. */
2742 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2743 lp->rx_irq = platform_get_irq(pdev, 1);
2744 lp->tx_irq = platform_get_irq(pdev, 0);
2745 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2747 if (IS_ERR(lp->dma_regs)) {
2748 dev_err(&pdev->dev, "could not map DMA regs\n");
2749 ret = PTR_ERR(lp->dma_regs);
2752 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2753 dev_err(&pdev->dev, "could not determine irqs\n");
2758 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2759 ret = __axienet_device_reset(lp);
2763 /* Autodetect the need for 64-bit DMA pointers.
2764 * When the IP is configured for a bus width bigger than 32 bits,
2765 * writing the MSB registers is mandatory, even if they are all 0.
2766 * We can detect this case by writing all 1's to one such register
2767 * and see if that sticks: when the IP is configured for 32 bits
2768 * only, those registers are RES0.
2769 * Those MSB registers were introduced in IP v7.1, which we check first.
2771 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2772 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2774 iowrite32(0x0, desc);
2775 if (ioread32(desc) == 0) { /* sanity check */
2776 iowrite32(0xffffffff, desc);
2777 if (ioread32(desc) > 0) {
2778 lp->features |= XAE_FEATURE_DMA_64BIT;
2780 dev_info(&pdev->dev,
2781 "autodetected 64-bit DMA range\n");
2783 iowrite32(0x0, desc);
2786 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2787 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2792 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2794 dev_err(&pdev->dev, "No suitable DMA available\n");
2797 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2798 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2800 struct xilinx_vdma_config cfg;
2801 struct dma_chan *tx_chan;
2803 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2804 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2808 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2809 if (IS_ERR(tx_chan)) {
2810 ret = PTR_ERR(tx_chan);
2811 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2816 /* As name says VDMA but it has support for DMA channel reset */
2817 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2819 dev_err(&pdev->dev, "Reset channel failed\n");
2820 dma_release_channel(tx_chan);
2824 dma_release_channel(tx_chan);
2825 lp->use_dmaengine = 1;
2828 if (lp->use_dmaengine)
2829 ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2831 ndev->netdev_ops = &axienet_netdev_ops;
2832 /* Check for Ethernet core IRQ (optional) */
2833 if (lp->eth_irq <= 0)
2834 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2836 /* Retrieve the MAC address */
2837 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2839 axienet_set_mac_address(ndev, mac_addr);
2841 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2843 axienet_set_mac_address(ndev, NULL);
2846 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2847 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2848 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2849 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2851 ret = axienet_mdio_setup(lp);
2853 dev_warn(&pdev->dev,
2854 "error registering MDIO bus: %d\n", ret);
2856 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2857 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2858 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2860 /* Deprecated: Always use "pcs-handle" for pcs_phy.
2861 * Falling back to "phy-handle" here is only for
2862 * backward compatibility with old device trees.
2864 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2867 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2871 lp->pcs_phy = of_mdio_find_device(np);
2873 ret = -EPROBE_DEFER;
2878 lp->pcs.ops = &axienet_pcs_ops;
2879 lp->pcs.neg_mode = true;
2880 lp->pcs.poll = true;
2883 lp->phylink_config.dev = &ndev->dev;
2884 lp->phylink_config.type = PHYLINK_NETDEV;
2885 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2886 MAC_10FD | MAC_100FD | MAC_1000FD;
2888 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2889 if (lp->switch_x_sgmii) {
2890 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2891 lp->phylink_config.supported_interfaces);
2892 __set_bit(PHY_INTERFACE_MODE_SGMII,
2893 lp->phylink_config.supported_interfaces);
2896 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2898 &axienet_phylink_ops);
2899 if (IS_ERR(lp->phylink)) {
2900 ret = PTR_ERR(lp->phylink);
2901 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2905 ret = register_netdev(lp->ndev);
2907 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2908 goto cleanup_phylink;
2914 phylink_destroy(lp->phylink);
2918 put_device(&lp->pcs_phy->dev);
2920 axienet_mdio_teardown(lp);
2922 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2923 clk_disable_unprepare(lp->axi_clk);
2931 static void axienet_remove(struct platform_device *pdev)
2933 struct net_device *ndev = platform_get_drvdata(pdev);
2934 struct axienet_local *lp = netdev_priv(ndev);
2936 unregister_netdev(ndev);
2939 phylink_destroy(lp->phylink);
2942 put_device(&lp->pcs_phy->dev);
2944 axienet_mdio_teardown(lp);
2946 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2947 clk_disable_unprepare(lp->axi_clk);
2952 static void axienet_shutdown(struct platform_device *pdev)
2954 struct net_device *ndev = platform_get_drvdata(pdev);
2957 netif_device_detach(ndev);
2959 if (netif_running(ndev))
2965 static int axienet_suspend(struct device *dev)
2967 struct net_device *ndev = dev_get_drvdata(dev);
2969 if (!netif_running(ndev))
2972 netif_device_detach(ndev);
2981 static int axienet_resume(struct device *dev)
2983 struct net_device *ndev = dev_get_drvdata(dev);
2985 if (!netif_running(ndev))
2992 netif_device_attach(ndev);
2997 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2998 axienet_suspend, axienet_resume);
3000 static struct platform_driver axienet_driver = {
3001 .probe = axienet_probe,
3002 .remove = axienet_remove,
3003 .shutdown = axienet_shutdown,
3005 .name = "xilinx_axienet",
3006 .pm = &axienet_pm_ops,
3007 .of_match_table = axienet_of_match,
3011 module_platform_driver(axienet_driver);
3013 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3014 MODULE_AUTHOR("Xilinx");
3015 MODULE_LICENSE("GPL");