1 // SPDX-License-Identifier: GPL-2.0-only
3 * Xilinx Axi Ethernet device driver
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41 #include <linux/dmaengine.h>
42 #include <linux/dma-mapping.h>
43 #include <linux/dma/xilinx_dma.h>
44 #include <linux/circ_buf.h>
45 #include <net/netdev_queues.h>
47 #include "xilinx_axienet.h"
49 /* Descriptors defines for Tx and Rx DMA */
50 #define TX_BD_NUM_DEFAULT 128
51 #define RX_BD_NUM_DEFAULT 1024
52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
53 #define TX_BD_NUM_MAX 4096
54 #define RX_BD_NUM_MAX 4096
55 #define DMA_NUM_APP_WORDS 5
57 #define RX_BUF_NUM_DEFAULT 128
59 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
60 #define DRIVER_NAME "xaxienet"
61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
62 #define DRIVER_VERSION "1.00a"
64 #define AXIENET_REGS_N 40
66 static void axienet_rx_submit_desc(struct net_device *ndev);
68 /* Match table for of_platform binding */
69 static const struct of_device_id axienet_of_match[] = {
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
76 MODULE_DEVICE_TABLE(of, axienet_of_match);
78 /* Option table for setting up Axi Ethernet hardware options */
79 static struct axienet_option axienet_options[] = {
80 /* Turn on jumbo packet support for both Rx and Tx */
82 .opt = XAE_OPTION_JUMBO,
84 .m_or = XAE_TC_JUM_MASK,
86 .opt = XAE_OPTION_JUMBO,
87 .reg = XAE_RCW1_OFFSET,
88 .m_or = XAE_RCW1_JUM_MASK,
89 }, { /* Turn on VLAN packet support for both Rx and Tx */
90 .opt = XAE_OPTION_VLAN,
92 .m_or = XAE_TC_VLAN_MASK,
94 .opt = XAE_OPTION_VLAN,
95 .reg = XAE_RCW1_OFFSET,
96 .m_or = XAE_RCW1_VLAN_MASK,
97 }, { /* Turn on FCS stripping on receive packets */
98 .opt = XAE_OPTION_FCS_STRIP,
99 .reg = XAE_RCW1_OFFSET,
100 .m_or = XAE_RCW1_FCS_MASK,
101 }, { /* Turn on FCS insertion on transmit packets */
102 .opt = XAE_OPTION_FCS_INSERT,
103 .reg = XAE_TC_OFFSET,
104 .m_or = XAE_TC_FCS_MASK,
105 }, { /* Turn off length/type field checking on receive packets */
106 .opt = XAE_OPTION_LENTYPE_ERR,
107 .reg = XAE_RCW1_OFFSET,
108 .m_or = XAE_RCW1_LT_DIS_MASK,
109 }, { /* Turn on Rx flow control */
110 .opt = XAE_OPTION_FLOW_CONTROL,
111 .reg = XAE_FCC_OFFSET,
112 .m_or = XAE_FCC_FCRX_MASK,
113 }, { /* Turn on Tx flow control */
114 .opt = XAE_OPTION_FLOW_CONTROL,
115 .reg = XAE_FCC_OFFSET,
116 .m_or = XAE_FCC_FCTX_MASK,
117 }, { /* Turn on promiscuous frame filtering */
118 .opt = XAE_OPTION_PROMISC,
119 .reg = XAE_FMI_OFFSET,
120 .m_or = XAE_FMI_PM_MASK,
121 }, { /* Enable transmitter */
122 .opt = XAE_OPTION_TXEN,
123 .reg = XAE_TC_OFFSET,
124 .m_or = XAE_TC_TX_MASK,
125 }, { /* Enable receiver */
126 .opt = XAE_OPTION_RXEN,
127 .reg = XAE_RCW1_OFFSET,
128 .m_or = XAE_RCW1_RX_MASK,
133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
145 * @lp: Pointer to axienet local structure
146 * @reg: Address offset from the base address of the Axi DMA core
148 * Return: The contents of the Axi DMA register
150 * This function returns the contents of the corresponding Axi DMA register.
152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
154 return ioread32(lp->dma_regs + reg);
157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
158 struct axidma_bd *desc)
160 desc->phys = lower_32_bits(addr);
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
162 desc->phys_msb = upper_32_bits(addr);
165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
166 struct axidma_bd *desc)
168 dma_addr_t ret = desc->phys;
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
177 * axienet_dma_bd_release - Release buffer descriptor rings
178 * @ndev: Pointer to the net_device structure
180 * This function is used to release the descriptors allocated in
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
182 * driver stop api is called.
184 static void axienet_dma_bd_release(struct net_device *ndev)
187 struct axienet_local *lp = netdev_priv(ndev);
189 /* If we end up here, tx_bd_v must have been DMA allocated. */
190 dma_free_coherent(lp->dev,
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
198 for (i = 0; i < lp->rx_bd_num; i++) {
201 /* A NULL skb means this descriptor has not been initialised
204 if (!lp->rx_bd_v[i].skb)
207 dev_kfree_skb(lp->rx_bd_v[i].skb);
209 /* For each descriptor, we programmed cntrl with the (non-zero)
210 * descriptor size, after it had been successfully allocated.
211 * So a non-zero value in there means we need to unmap it.
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 dma_unmap_single(lp->dev, phys,
216 lp->max_frm_size, DMA_FROM_DEVICE);
220 dma_free_coherent(lp->dev,
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
227 * axienet_usec_to_timer - Calculate IRQ delay timer value
228 * @lp: Pointer to the axienet_local structure
229 * @coalesce_usec: Microseconds to convert into timer value
231 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
234 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
237 clk_rate = clk_get_rate(lp->axi_clk);
239 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
240 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
241 XAXIDMA_DELAY_SCALE);
242 return min(result, FIELD_MAX(XAXIDMA_DELAY_MASK));
246 * axienet_dma_start - Set up DMA registers and start DMA operation
247 * @lp: Pointer to the axienet_local structure
249 static void axienet_dma_start(struct axienet_local *lp)
251 /* Start updating the Rx channel control register */
252 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
253 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
254 /* Only set interrupt delay timer if not generating an interrupt on
255 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
257 if (lp->coalesce_count_rx > 1)
258 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
259 << XAXIDMA_DELAY_SHIFT) |
260 XAXIDMA_IRQ_DELAY_MASK;
261 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
263 /* Start updating the Tx channel control register */
264 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
265 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
266 /* Only set interrupt delay timer if not generating an interrupt on
267 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
269 if (lp->coalesce_count_tx > 1)
270 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
271 << XAXIDMA_DELAY_SHIFT) |
272 XAXIDMA_IRQ_DELAY_MASK;
273 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
275 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
276 * halted state. This will make the Rx side ready for reception.
278 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
279 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
280 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
281 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
282 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
284 /* Write to the RS (Run-stop) bit in the Tx channel control register.
285 * Tx channel is now ready to run. But only after we write to the
286 * tail pointer register that the Tx channel will start transmitting.
288 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
289 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
290 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
294 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
295 * @ndev: Pointer to the net_device structure
297 * Return: 0, on success -ENOMEM, on failure
299 * This function is called to initialize the Rx and Tx DMA descriptor
300 * rings. This initializes the descriptors with required default values
301 * and is called when Axi Ethernet driver reset is called.
303 static int axienet_dma_bd_init(struct net_device *ndev)
307 struct axienet_local *lp = netdev_priv(ndev);
309 /* Reset the indexes which are used for accessing the BDs */
314 /* Allocate the Tx and Rx buffer descriptors. */
315 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
316 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
317 &lp->tx_bd_p, GFP_KERNEL);
321 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
322 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
323 &lp->rx_bd_p, GFP_KERNEL);
327 for (i = 0; i < lp->tx_bd_num; i++) {
328 dma_addr_t addr = lp->tx_bd_p +
329 sizeof(*lp->tx_bd_v) *
330 ((i + 1) % lp->tx_bd_num);
332 lp->tx_bd_v[i].next = lower_32_bits(addr);
333 if (lp->features & XAE_FEATURE_DMA_64BIT)
334 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
337 for (i = 0; i < lp->rx_bd_num; i++) {
340 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
341 ((i + 1) % lp->rx_bd_num);
342 lp->rx_bd_v[i].next = lower_32_bits(addr);
343 if (lp->features & XAE_FEATURE_DMA_64BIT)
344 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
346 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
350 lp->rx_bd_v[i].skb = skb;
351 addr = dma_map_single(lp->dev, skb->data,
352 lp->max_frm_size, DMA_FROM_DEVICE);
353 if (dma_mapping_error(lp->dev, addr)) {
354 netdev_err(ndev, "DMA mapping error\n");
357 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
359 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
362 axienet_dma_start(lp);
366 axienet_dma_bd_release(ndev);
371 * axienet_set_mac_address - Write the MAC address
372 * @ndev: Pointer to the net_device structure
373 * @address: 6 byte Address to be written as MAC address
375 * This function is called to initialize the MAC address of the Axi Ethernet
376 * core. It writes to the UAW0 and UAW1 registers of the core.
378 static void axienet_set_mac_address(struct net_device *ndev,
381 struct axienet_local *lp = netdev_priv(ndev);
384 eth_hw_addr_set(ndev, address);
385 if (!is_valid_ether_addr(ndev->dev_addr))
386 eth_hw_addr_random(ndev);
388 /* Set up unicast MAC address filter set its mac address */
389 axienet_iow(lp, XAE_UAW0_OFFSET,
390 (ndev->dev_addr[0]) |
391 (ndev->dev_addr[1] << 8) |
392 (ndev->dev_addr[2] << 16) |
393 (ndev->dev_addr[3] << 24));
394 axienet_iow(lp, XAE_UAW1_OFFSET,
395 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
396 ~XAE_UAW1_UNICASTADDR_MASK) |
398 (ndev->dev_addr[5] << 8))));
402 * netdev_set_mac_address - Write the MAC address (from outside the driver)
403 * @ndev: Pointer to the net_device structure
404 * @p: 6 byte Address to be written as MAC address
406 * Return: 0 for all conditions. Presently, there is no failure case.
408 * This function is called to initialize the MAC address of the Axi Ethernet
409 * core. It calls the core specific axienet_set_mac_address. This is the
410 * function that goes into net_device_ops structure entry ndo_set_mac_address.
412 static int netdev_set_mac_address(struct net_device *ndev, void *p)
414 struct sockaddr *addr = p;
416 axienet_set_mac_address(ndev, addr->sa_data);
421 * axienet_set_multicast_list - Prepare the multicast table
422 * @ndev: Pointer to the net_device structure
424 * This function is called to initialize the multicast table during
425 * initialization. The Axi Ethernet basic multicast support has a four-entry
426 * multicast table which is initialized here. Additionally this function
427 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
428 * means whenever the multicast table entries need to be updated this
429 * function gets called.
431 static void axienet_set_multicast_list(struct net_device *ndev)
434 u32 reg, af0reg, af1reg;
435 struct axienet_local *lp = netdev_priv(ndev);
437 reg = axienet_ior(lp, XAE_FMI_OFFSET);
438 reg &= ~XAE_FMI_PM_MASK;
439 if (ndev->flags & IFF_PROMISC)
440 reg |= XAE_FMI_PM_MASK;
442 reg &= ~XAE_FMI_PM_MASK;
443 axienet_iow(lp, XAE_FMI_OFFSET, reg);
445 if (ndev->flags & IFF_ALLMULTI ||
446 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
448 axienet_iow(lp, XAE_FMI_OFFSET, reg);
449 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
450 axienet_iow(lp, XAE_AF1_OFFSET, 0);
451 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
452 axienet_iow(lp, XAE_AM1_OFFSET, 0);
453 axienet_iow(lp, XAE_FFE_OFFSET, 1);
455 } else if (!netdev_mc_empty(ndev)) {
456 struct netdev_hw_addr *ha;
458 netdev_for_each_mc_addr(ha, ndev) {
459 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
462 af0reg = (ha->addr[0]);
463 af0reg |= (ha->addr[1] << 8);
464 af0reg |= (ha->addr[2] << 16);
465 af0reg |= (ha->addr[3] << 24);
467 af1reg = (ha->addr[4]);
468 af1reg |= (ha->addr[5] << 8);
473 axienet_iow(lp, XAE_FMI_OFFSET, reg);
474 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
475 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
476 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
477 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
478 axienet_iow(lp, XAE_FFE_OFFSET, 1);
483 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
486 axienet_iow(lp, XAE_FMI_OFFSET, reg);
487 axienet_iow(lp, XAE_FFE_OFFSET, 0);
492 * axienet_setoptions - Set an Axi Ethernet option
493 * @ndev: Pointer to the net_device structure
494 * @options: Option to be enabled/disabled
496 * The Axi Ethernet core has multiple features which can be selectively turned
497 * on or off. The typical options could be jumbo frame option, basic VLAN
498 * option, promiscuous mode option etc. This function is used to set or clear
499 * these options in the Axi Ethernet hardware. This is done through
500 * axienet_option structure .
502 static void axienet_setoptions(struct net_device *ndev, u32 options)
505 struct axienet_local *lp = netdev_priv(ndev);
506 struct axienet_option *tp = &axienet_options[0];
509 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
510 if (options & tp->opt)
512 axienet_iow(lp, tp->reg, reg);
516 lp->options |= options;
519 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
523 if (lp->reset_in_progress)
524 return lp->hw_stat_base[stat];
526 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
527 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
530 static void axienet_stats_update(struct axienet_local *lp, bool reset)
532 enum temac_stat stat;
534 write_seqcount_begin(&lp->hw_stats_seqcount);
535 lp->reset_in_progress = reset;
536 for (stat = 0; stat < STAT_COUNT; stat++) {
537 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
539 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
540 lp->hw_last_counter[stat] = counter;
542 write_seqcount_end(&lp->hw_stats_seqcount);
545 static void axienet_refresh_stats(struct work_struct *work)
547 struct axienet_local *lp = container_of(work, struct axienet_local,
550 mutex_lock(&lp->stats_lock);
551 axienet_stats_update(lp, false);
552 mutex_unlock(&lp->stats_lock);
554 /* Just less than 2^32 bytes at 2.5 GBit/s */
555 schedule_delayed_work(&lp->stats_work, 13 * HZ);
558 static int __axienet_device_reset(struct axienet_local *lp)
563 /* Save statistics counters in case they will be reset */
564 mutex_lock(&lp->stats_lock);
565 if (lp->features & XAE_FEATURE_STATS)
566 axienet_stats_update(lp, true);
568 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
569 * process of Axi DMA takes a while to complete as all pending
570 * commands/transfers will be flushed or completed during this
572 * Note that even though both TX and RX have their own reset register,
573 * they both reset the entire DMA core, so only one needs to be used.
575 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
576 ret = read_poll_timeout(axienet_dma_in32, value,
577 !(value & XAXIDMA_CR_RESET_MASK),
578 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
579 XAXIDMA_TX_CR_OFFSET);
581 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
585 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
586 ret = read_poll_timeout(axienet_ior, value,
587 value & XAE_INT_PHYRSTCMPLT_MASK,
588 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
591 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
595 /* Update statistics counters with new values */
596 if (lp->features & XAE_FEATURE_STATS) {
597 enum temac_stat stat;
599 write_seqcount_begin(&lp->hw_stats_seqcount);
600 lp->reset_in_progress = false;
601 for (stat = 0; stat < STAT_COUNT; stat++) {
603 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
605 lp->hw_stat_base[stat] +=
606 lp->hw_last_counter[stat] - counter;
607 lp->hw_last_counter[stat] = counter;
609 write_seqcount_end(&lp->hw_stats_seqcount);
613 mutex_unlock(&lp->stats_lock);
618 * axienet_dma_stop - Stop DMA operation
619 * @lp: Pointer to the axienet_local structure
621 static void axienet_dma_stop(struct axienet_local *lp)
626 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
627 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
628 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
629 synchronize_irq(lp->rx_irq);
631 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
632 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
633 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
634 synchronize_irq(lp->tx_irq);
636 /* Give DMAs a chance to halt gracefully */
637 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
638 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
640 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
643 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
644 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
646 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
649 /* Do a reset to ensure DMA is really stopped */
650 axienet_lock_mii(lp);
651 __axienet_device_reset(lp);
652 axienet_unlock_mii(lp);
656 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
657 * @ndev: Pointer to the net_device structure
659 * This function is called to reset and initialize the Axi Ethernet core. This
660 * is typically called during initialization. It does a reset of the Axi DMA
661 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
662 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
663 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
665 * Returns 0 on success or a negative error number otherwise.
667 static int axienet_device_reset(struct net_device *ndev)
670 struct axienet_local *lp = netdev_priv(ndev);
673 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
674 lp->options |= XAE_OPTION_VLAN;
675 lp->options &= (~XAE_OPTION_JUMBO);
677 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) {
678 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
681 if (lp->max_frm_size <= lp->rxmem)
682 lp->options |= XAE_OPTION_JUMBO;
685 if (!lp->use_dmaengine) {
686 ret = __axienet_device_reset(lp);
690 ret = axienet_dma_bd_init(ndev);
692 netdev_err(ndev, "%s: descriptor allocation failed\n",
698 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
699 axienet_status &= ~XAE_RCW1_RX_MASK;
700 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
702 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
703 if (axienet_status & XAE_INT_RXRJECT_MASK)
704 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
705 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
706 XAE_INT_RECV_ERROR_MASK : 0);
708 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
710 /* Sync default options with HW but leave receiver and
711 * transmitter disabled.
713 axienet_setoptions(ndev, lp->options &
714 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
715 axienet_set_mac_address(ndev, NULL);
716 axienet_set_multicast_list(ndev);
717 axienet_setoptions(ndev, lp->options);
719 netif_trans_update(ndev);
725 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
726 * @lp: Pointer to the axienet_local structure
727 * @first_bd: Index of first descriptor to clean up
728 * @nr_bds: Max number of descriptors to clean up
729 * @force: Whether to clean descriptors even if not complete
730 * @sizep: Pointer to a u32 filled with the total sum of all bytes
731 * in all cleaned-up descriptors. Ignored if NULL.
732 * @budget: NAPI budget (use 0 when not called from NAPI poll)
734 * Would either be called after a successful transmit operation, or after
735 * there was an error when setting up the chain.
736 * Returns the number of packets handled.
738 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
739 int nr_bds, bool force, u32 *sizep, int budget)
741 struct axidma_bd *cur_p;
746 for (i = 0; i < nr_bds; i++) {
747 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
748 status = cur_p->status;
750 /* If force is not specified, clean up only descriptors
751 * that have been completed by the MAC.
753 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
756 /* Ensure we see complete descriptor update */
758 phys = desc_get_phys_addr(lp, cur_p);
759 dma_unmap_single(lp->dev, phys,
760 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
763 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
764 napi_consume_skb(cur_p->skb, budget);
773 /* ensure our transmit path and device don't prematurely see status cleared */
779 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
784 if (lp->tx_bd_ci >= lp->tx_bd_num)
785 lp->tx_bd_ci %= lp->tx_bd_num;
792 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
793 * @lp: Pointer to the axienet_local structure
794 * @num_frag: The number of BDs to check for
796 * Return: 0, on success
797 * NETDEV_TX_BUSY, if any of the descriptors are not free
799 * This function is invoked before BDs are allocated and transmission starts.
800 * This function returns 0 if a BD or group of BDs can be allocated for
801 * transmission. If the BD or any of the BDs are not free the function
802 * returns a busy status.
804 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
807 struct axidma_bd *cur_p;
809 /* Ensure we see all descriptor updates from device or TX polling */
811 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
814 return NETDEV_TX_BUSY;
819 * axienet_dma_tx_cb - DMA engine callback for TX channel.
820 * @data: Pointer to the axienet_local structure.
821 * @result: error reporting through dmaengine_result.
822 * This function is called by dmaengine driver for TX channel to notify
823 * that the transmit is done.
825 static void axienet_dma_tx_cb(void *data, const struct dmaengine_result *result)
827 struct skbuf_dma_descriptor *skbuf_dma;
828 struct axienet_local *lp = data;
829 struct netdev_queue *txq;
832 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
833 len = skbuf_dma->skb->len;
834 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
835 u64_stats_update_begin(&lp->tx_stat_sync);
836 u64_stats_add(&lp->tx_bytes, len);
837 u64_stats_add(&lp->tx_packets, 1);
838 u64_stats_update_end(&lp->tx_stat_sync);
839 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
840 dev_consume_skb_any(skbuf_dma->skb);
841 netif_txq_completed_wake(txq, 1, len,
842 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
847 * axienet_start_xmit_dmaengine - Starts the transmission.
848 * @skb: sk_buff pointer that contains data to be Txed.
849 * @ndev: Pointer to net_device structure.
851 * Return: NETDEV_TX_OK on success or any non space errors.
852 * NETDEV_TX_BUSY when free element in TX skb ring buffer
855 * This function is invoked to initiate transmission. The
856 * function sets the skbs, register dma callback API and submit
857 * the dma transaction.
858 * Additionally if checksum offloading is supported,
859 * it populates AXI Stream Control fields with appropriate values.
862 axienet_start_xmit_dmaengine(struct sk_buff *skb, struct net_device *ndev)
864 struct dma_async_tx_descriptor *dma_tx_desc = NULL;
865 struct axienet_local *lp = netdev_priv(ndev);
866 u32 app_metadata[DMA_NUM_APP_WORDS] = {0};
867 struct skbuf_dma_descriptor *skbuf_dma;
868 struct dma_device *dma_dev;
869 struct netdev_queue *txq;
875 dma_dev = lp->tx_chan->device;
876 sg_len = skb_shinfo(skb)->nr_frags + 1;
877 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) {
878 netif_stop_queue(ndev);
880 netdev_warn(ndev, "TX ring unexpectedly full\n");
881 return NETDEV_TX_BUSY;
884 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
886 goto xmit_error_drop_skb;
889 sg_init_table(skbuf_dma->sgl, sg_len);
890 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len);
892 goto xmit_error_drop_skb;
894 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
896 goto xmit_error_drop_skb;
898 /* Fill up app fields for checksum */
899 if (skb->ip_summed == CHECKSUM_PARTIAL) {
900 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
901 /* Tx Full Checksum Offload Enabled */
902 app_metadata[0] |= 2;
903 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
904 csum_start_off = skb_transport_offset(skb);
905 csum_index_off = csum_start_off + skb->csum_offset;
906 /* Tx Partial Checksum Offload Enabled */
907 app_metadata[0] |= 1;
908 app_metadata[1] = (csum_start_off << 16) | csum_index_off;
910 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
911 app_metadata[0] |= 2; /* Tx Full Checksum Offload Enabled */
914 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
915 sg_len, DMA_MEM_TO_DEV,
916 DMA_PREP_INTERRUPT, (void *)app_metadata);
918 goto xmit_error_unmap_sg;
920 skbuf_dma->skb = skb;
921 skbuf_dma->sg_len = sg_len;
922 dma_tx_desc->callback_param = lp;
923 dma_tx_desc->callback_result = axienet_dma_tx_cb;
924 txq = skb_get_tx_queue(lp->ndev, skb);
925 netdev_tx_sent_queue(txq, skb->len);
926 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
927 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS);
929 dmaengine_submit(dma_tx_desc);
930 dma_async_issue_pending(lp->tx_chan);
934 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
936 dev_kfree_skb_any(skb);
941 * axienet_tx_poll - Invoked once a transmit is completed by the
942 * Axi DMA Tx channel.
943 * @napi: Pointer to NAPI structure.
944 * @budget: Max number of TX packets to process.
946 * Return: Number of TX packets processed.
948 * This function is invoked from the NAPI processing to notify the completion
949 * of transmit operation. It clears fields in the corresponding Tx BDs and
950 * unmaps the corresponding buffer so that CPU can regain ownership of the
951 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
954 static int axienet_tx_poll(struct napi_struct *napi, int budget)
956 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
957 struct net_device *ndev = lp->ndev;
961 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
965 u64_stats_update_begin(&lp->tx_stat_sync);
966 u64_stats_add(&lp->tx_packets, packets);
967 u64_stats_add(&lp->tx_bytes, size);
968 u64_stats_update_end(&lp->tx_stat_sync);
970 /* Matches barrier in axienet_start_xmit */
973 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
974 netif_wake_queue(ndev);
977 if (packets < budget && napi_complete_done(napi, packets)) {
978 /* Re-enable TX completion interrupts. This should
979 * cause an immediate interrupt if any TX packets are
982 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
988 * axienet_start_xmit - Starts the transmission.
989 * @skb: sk_buff pointer that contains data to be Txed.
990 * @ndev: Pointer to net_device structure.
992 * Return: NETDEV_TX_OK, on success
993 * NETDEV_TX_BUSY, if any of the descriptors are not free
995 * This function is invoked from upper layers to initiate transmission. The
996 * function uses the next available free BDs and populates their fields to
997 * start the transmission. Additionally if checksum offloading is supported,
998 * it populates AXI Stream Control fields with appropriate values.
1001 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1008 dma_addr_t tail_p, phys;
1009 u32 orig_tail_ptr, new_tail_ptr;
1010 struct axienet_local *lp = netdev_priv(ndev);
1011 struct axidma_bd *cur_p;
1013 orig_tail_ptr = lp->tx_bd_tail;
1014 new_tail_ptr = orig_tail_ptr;
1016 num_frag = skb_shinfo(skb)->nr_frags;
1017 cur_p = &lp->tx_bd_v[orig_tail_ptr];
1019 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1020 /* Should not happen as last start_xmit call should have
1021 * checked for sufficient space and queue should only be
1022 * woken when sufficient space is available.
1024 netif_stop_queue(ndev);
1025 if (net_ratelimit())
1026 netdev_warn(ndev, "TX ring unexpectedly full\n");
1027 return NETDEV_TX_BUSY;
1030 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1031 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1032 /* Tx Full Checksum Offload Enabled */
1034 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1035 csum_start_off = skb_transport_offset(skb);
1036 csum_index_off = csum_start_off + skb->csum_offset;
1037 /* Tx Partial Checksum Offload Enabled */
1039 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
1041 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1042 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
1045 phys = dma_map_single(lp->dev, skb->data,
1046 skb_headlen(skb), DMA_TO_DEVICE);
1047 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1048 if (net_ratelimit())
1049 netdev_err(ndev, "TX DMA mapping error\n");
1050 ndev->stats.tx_dropped++;
1051 dev_kfree_skb_any(skb);
1052 return NETDEV_TX_OK;
1054 desc_set_phys_addr(lp, phys, cur_p);
1055 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
1057 for (ii = 0; ii < num_frag; ii++) {
1058 if (++new_tail_ptr >= lp->tx_bd_num)
1060 cur_p = &lp->tx_bd_v[new_tail_ptr];
1061 frag = &skb_shinfo(skb)->frags[ii];
1062 phys = dma_map_single(lp->dev,
1063 skb_frag_address(frag),
1064 skb_frag_size(frag),
1066 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1067 if (net_ratelimit())
1068 netdev_err(ndev, "TX DMA mapping error\n");
1069 ndev->stats.tx_dropped++;
1070 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1072 dev_kfree_skb_any(skb);
1073 return NETDEV_TX_OK;
1075 desc_set_phys_addr(lp, phys, cur_p);
1076 cur_p->cntrl = skb_frag_size(frag);
1079 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
1082 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1083 if (++new_tail_ptr >= lp->tx_bd_num)
1085 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1087 /* Start the transfer */
1088 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1090 /* Stop queue if next transmit may not have space */
1091 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1092 netif_stop_queue(ndev);
1094 /* Matches barrier in axienet_tx_poll */
1097 /* Space might have just been freed - check again */
1098 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1099 netif_wake_queue(ndev);
1102 return NETDEV_TX_OK;
1106 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1107 * @data: Pointer to the skbuf_dma_descriptor structure.
1108 * @result: error reporting through dmaengine_result.
1109 * This function is called by dmaengine driver for RX channel to notify
1110 * that the packet is received.
1112 static void axienet_dma_rx_cb(void *data, const struct dmaengine_result *result)
1114 struct skbuf_dma_descriptor *skbuf_dma;
1115 size_t meta_len, meta_max_len, rx_len;
1116 struct axienet_local *lp = data;
1117 struct sk_buff *skb;
1120 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1121 skb = skbuf_dma->skb;
1122 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len,
1124 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1126 /* TODO: Derive app word index programmatically */
1127 rx_len = (app_metadata[LEN_APP] & 0xFFFF);
1128 skb_put(skb, rx_len);
1129 skb->protocol = eth_type_trans(skb, lp->ndev);
1130 skb->ip_summed = CHECKSUM_NONE;
1133 u64_stats_update_begin(&lp->rx_stat_sync);
1134 u64_stats_add(&lp->rx_packets, 1);
1135 u64_stats_add(&lp->rx_bytes, rx_len);
1136 u64_stats_update_end(&lp->rx_stat_sync);
1137 axienet_rx_submit_desc(lp->ndev);
1138 dma_async_issue_pending(lp->rx_chan);
1142 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1143 * @napi: Pointer to NAPI structure.
1144 * @budget: Max number of RX packets to process.
1146 * Return: Number of RX packets processed.
1148 static int axienet_rx_poll(struct napi_struct *napi, int budget)
1154 dma_addr_t tail_p = 0;
1155 struct axidma_bd *cur_p;
1156 struct sk_buff *skb, *new_skb;
1157 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1159 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1161 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
1164 /* Ensure we see complete descriptor update */
1170 /* skb could be NULL if a previous pass already received the
1171 * packet for this slot in the ring, but failed to refill it
1172 * with a newly allocated buffer. In this case, don't try to
1176 length = cur_p->app4 & 0x0000FFFF;
1178 phys = desc_get_phys_addr(lp, cur_p);
1179 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1182 skb_put(skb, length);
1183 skb->protocol = eth_type_trans(skb, lp->ndev);
1184 /*skb_checksum_none_assert(skb);*/
1185 skb->ip_summed = CHECKSUM_NONE;
1187 /* if we're doing Rx csum offload, set it up */
1188 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1189 csumstatus = (cur_p->app2 &
1190 XAE_FULL_CSUM_STATUS_MASK) >> 3;
1191 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
1192 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
1193 skb->ip_summed = CHECKSUM_UNNECESSARY;
1195 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1196 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
1197 skb->ip_summed = CHECKSUM_COMPLETE;
1200 napi_gro_receive(napi, skb);
1206 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1210 phys = dma_map_single(lp->dev, new_skb->data,
1213 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1214 if (net_ratelimit())
1215 netdev_err(lp->ndev, "RX DMA mapping error\n");
1216 dev_kfree_skb(new_skb);
1219 desc_set_phys_addr(lp, phys, cur_p);
1221 cur_p->cntrl = lp->max_frm_size;
1223 cur_p->skb = new_skb;
1225 /* Only update tail_p to mark this slot as usable after it has
1226 * been successfully refilled.
1228 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1230 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1232 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1235 u64_stats_update_begin(&lp->rx_stat_sync);
1236 u64_stats_add(&lp->rx_packets, packets);
1237 u64_stats_add(&lp->rx_bytes, size);
1238 u64_stats_update_end(&lp->rx_stat_sync);
1241 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1243 if (packets < budget && napi_complete_done(napi, packets)) {
1244 /* Re-enable RX completion interrupts. This should
1245 * cause an immediate interrupt if any RX packets are
1248 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1254 * axienet_tx_irq - Tx Done Isr.
1256 * @_ndev: net_device pointer
1258 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1260 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1263 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1265 unsigned int status;
1266 struct net_device *ndev = _ndev;
1267 struct axienet_local *lp = netdev_priv(ndev);
1269 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1271 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1274 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1276 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1277 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1278 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1279 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1280 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1281 schedule_work(&lp->dma_err_task);
1283 /* Disable further TX completion interrupts and schedule
1284 * NAPI to handle the completions.
1286 u32 cr = lp->tx_dma_cr;
1288 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1289 if (napi_schedule_prep(&lp->napi_tx)) {
1290 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1291 __napi_schedule(&lp->napi_tx);
1299 * axienet_rx_irq - Rx Isr.
1301 * @_ndev: net_device pointer
1303 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1305 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1308 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1310 unsigned int status;
1311 struct net_device *ndev = _ndev;
1312 struct axienet_local *lp = netdev_priv(ndev);
1314 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1316 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1319 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1321 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1322 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1323 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1324 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1325 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1326 schedule_work(&lp->dma_err_task);
1328 /* Disable further RX completion interrupts and schedule
1331 u32 cr = lp->rx_dma_cr;
1333 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1334 if (napi_schedule_prep(&lp->napi_rx)) {
1335 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1336 __napi_schedule(&lp->napi_rx);
1344 * axienet_eth_irq - Ethernet core Isr.
1346 * @_ndev: net_device pointer
1348 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1350 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1352 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1354 struct net_device *ndev = _ndev;
1355 struct axienet_local *lp = netdev_priv(ndev);
1356 unsigned int pending;
1358 pending = axienet_ior(lp, XAE_IP_OFFSET);
1362 if (pending & XAE_INT_RXFIFOOVR_MASK)
1363 ndev->stats.rx_missed_errors++;
1365 if (pending & XAE_INT_RXRJECT_MASK)
1366 ndev->stats.rx_dropped++;
1368 axienet_iow(lp, XAE_IS_OFFSET, pending);
1372 static void axienet_dma_err_handler(struct work_struct *work);
1375 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1376 * allocate skbuff, map the scatterlist and obtain a descriptor
1377 * and then add the callback information and submit descriptor.
1379 * @ndev: net_device pointer
1382 static void axienet_rx_submit_desc(struct net_device *ndev)
1384 struct dma_async_tx_descriptor *dma_rx_desc = NULL;
1385 struct axienet_local *lp = netdev_priv(ndev);
1386 struct skbuf_dma_descriptor *skbuf_dma;
1387 struct sk_buff *skb;
1390 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1395 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1399 sg_init_table(skbuf_dma->sgl, 1);
1400 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1401 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1402 if (net_ratelimit())
1403 netdev_err(ndev, "DMA mapping error\n");
1404 goto rx_submit_err_free_skb;
1406 sg_dma_address(skbuf_dma->sgl) = addr;
1407 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1408 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1410 DMA_PREP_INTERRUPT);
1412 goto rx_submit_err_unmap_skb;
1414 skbuf_dma->skb = skb;
1415 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl);
1416 skbuf_dma->desc = dma_rx_desc;
1417 dma_rx_desc->callback_param = lp;
1418 dma_rx_desc->callback_result = axienet_dma_rx_cb;
1419 dmaengine_submit(dma_rx_desc);
1423 rx_submit_err_unmap_skb:
1424 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1425 rx_submit_err_free_skb:
1430 * axienet_init_dmaengine - init the dmaengine code.
1431 * @ndev: Pointer to net_device structure
1433 * Return: 0, on success.
1434 * non-zero error value on failure
1436 * This is the dmaengine initialization code.
1438 static int axienet_init_dmaengine(struct net_device *ndev)
1440 struct axienet_local *lp = netdev_priv(ndev);
1441 struct skbuf_dma_descriptor *skbuf_dma;
1444 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1445 if (IS_ERR(lp->tx_chan)) {
1446 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1447 return PTR_ERR(lp->tx_chan);
1450 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1451 if (IS_ERR(lp->rx_chan)) {
1452 ret = PTR_ERR(lp->rx_chan);
1453 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1454 goto err_dma_release_tx;
1457 lp->tx_ring_tail = 0;
1458 lp->tx_ring_head = 0;
1459 lp->rx_ring_tail = 0;
1460 lp->rx_ring_head = 0;
1461 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1463 if (!lp->tx_skb_ring) {
1465 goto err_dma_release_rx;
1467 for (i = 0; i < TX_BD_NUM_MAX; i++) {
1468 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1471 goto err_free_tx_skb_ring;
1473 lp->tx_skb_ring[i] = skbuf_dma;
1476 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1478 if (!lp->rx_skb_ring) {
1480 goto err_free_tx_skb_ring;
1482 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++) {
1483 skbuf_dma = kzalloc(sizeof(*skbuf_dma), GFP_KERNEL);
1486 goto err_free_rx_skb_ring;
1488 lp->rx_skb_ring[i] = skbuf_dma;
1490 /* TODO: Instead of BD_NUM_DEFAULT use runtime support */
1491 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1492 axienet_rx_submit_desc(ndev);
1493 dma_async_issue_pending(lp->rx_chan);
1497 err_free_rx_skb_ring:
1498 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1499 kfree(lp->rx_skb_ring[i]);
1500 kfree(lp->rx_skb_ring);
1501 err_free_tx_skb_ring:
1502 for (i = 0; i < TX_BD_NUM_MAX; i++)
1503 kfree(lp->tx_skb_ring[i]);
1504 kfree(lp->tx_skb_ring);
1506 dma_release_channel(lp->rx_chan);
1508 dma_release_channel(lp->tx_chan);
1513 * axienet_init_legacy_dma - init the dma legacy code.
1514 * @ndev: Pointer to net_device structure
1516 * Return: 0, on success.
1517 * non-zero error value on failure
1519 * This is the dma initialization code. It also allocates interrupt
1520 * service routines, enables the interrupt lines and ISR handling.
1523 static int axienet_init_legacy_dma(struct net_device *ndev)
1526 struct axienet_local *lp = netdev_priv(ndev);
1528 /* Enable worker thread for Axi DMA error handling */
1529 lp->stopping = false;
1530 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1532 napi_enable(&lp->napi_rx);
1533 napi_enable(&lp->napi_tx);
1535 /* Enable interrupts for Axi DMA Tx */
1536 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1540 /* Enable interrupts for Axi DMA Rx */
1541 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1545 /* Enable interrupts for Axi Ethernet core (if defined) */
1546 if (lp->eth_irq > 0) {
1547 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1556 free_irq(lp->rx_irq, ndev);
1558 free_irq(lp->tx_irq, ndev);
1560 napi_disable(&lp->napi_tx);
1561 napi_disable(&lp->napi_rx);
1562 cancel_work_sync(&lp->dma_err_task);
1563 dev_err(lp->dev, "request_irq() failed\n");
1568 * axienet_open - Driver open routine.
1569 * @ndev: Pointer to net_device structure
1571 * Return: 0, on success.
1572 * non-zero error value on failure
1574 * This is the driver open routine. It calls phylink_start to start the
1576 * It also allocates interrupt service routines, enables the interrupt lines
1577 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1578 * descriptors are initialized.
1580 static int axienet_open(struct net_device *ndev)
1583 struct axienet_local *lp = netdev_priv(ndev);
1585 /* When we do an Axi Ethernet reset, it resets the complete core
1586 * including the MDIO. MDIO must be disabled before resetting.
1587 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1589 axienet_lock_mii(lp);
1590 ret = axienet_device_reset(ndev);
1591 axienet_unlock_mii(lp);
1593 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1595 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1599 phylink_start(lp->phylink);
1601 /* Start the statistics refresh work */
1602 schedule_delayed_work(&lp->stats_work, 0);
1604 if (lp->use_dmaengine) {
1605 /* Enable interrupts for Axi Ethernet core (if defined) */
1606 if (lp->eth_irq > 0) {
1607 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1613 ret = axienet_init_dmaengine(ndev);
1615 goto err_free_eth_irq;
1617 ret = axienet_init_legacy_dma(ndev);
1625 if (lp->eth_irq > 0)
1626 free_irq(lp->eth_irq, ndev);
1628 cancel_delayed_work_sync(&lp->stats_work);
1629 phylink_stop(lp->phylink);
1630 phylink_disconnect_phy(lp->phylink);
1635 * axienet_stop - Driver stop routine.
1636 * @ndev: Pointer to net_device structure
1638 * Return: 0, on success.
1640 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1641 * device. It also removes the interrupt handlers and disables the interrupts.
1642 * The Axi DMA Tx/Rx BDs are released.
1644 static int axienet_stop(struct net_device *ndev)
1646 struct axienet_local *lp = netdev_priv(ndev);
1649 if (!lp->use_dmaengine) {
1650 WRITE_ONCE(lp->stopping, true);
1651 flush_work(&lp->dma_err_task);
1653 napi_disable(&lp->napi_tx);
1654 napi_disable(&lp->napi_rx);
1657 cancel_delayed_work_sync(&lp->stats_work);
1659 phylink_stop(lp->phylink);
1660 phylink_disconnect_phy(lp->phylink);
1662 axienet_setoptions(ndev, lp->options &
1663 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1665 if (!lp->use_dmaengine) {
1666 axienet_dma_stop(lp);
1667 cancel_work_sync(&lp->dma_err_task);
1668 free_irq(lp->tx_irq, ndev);
1669 free_irq(lp->rx_irq, ndev);
1670 axienet_dma_bd_release(ndev);
1672 dmaengine_terminate_sync(lp->tx_chan);
1673 dmaengine_synchronize(lp->tx_chan);
1674 dmaengine_terminate_sync(lp->rx_chan);
1675 dmaengine_synchronize(lp->rx_chan);
1677 for (i = 0; i < TX_BD_NUM_MAX; i++)
1678 kfree(lp->tx_skb_ring[i]);
1679 kfree(lp->tx_skb_ring);
1680 for (i = 0; i < RX_BUF_NUM_DEFAULT; i++)
1681 kfree(lp->rx_skb_ring[i]);
1682 kfree(lp->rx_skb_ring);
1684 dma_release_channel(lp->rx_chan);
1685 dma_release_channel(lp->tx_chan);
1688 axienet_iow(lp, XAE_IE_OFFSET, 0);
1690 if (lp->eth_irq > 0)
1691 free_irq(lp->eth_irq, ndev);
1696 * axienet_change_mtu - Driver change mtu routine.
1697 * @ndev: Pointer to net_device structure
1698 * @new_mtu: New mtu value to be applied
1700 * Return: Always returns 0 (success).
1702 * This is the change mtu driver routine. It checks if the Axi Ethernet
1703 * hardware supports jumbo frames before changing the mtu. This can be
1704 * called only when the device is not up.
1706 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1708 struct axienet_local *lp = netdev_priv(ndev);
1710 if (netif_running(ndev))
1713 if ((new_mtu + VLAN_ETH_HLEN +
1714 XAE_TRL_SIZE) > lp->rxmem)
1717 WRITE_ONCE(ndev->mtu, new_mtu);
1722 #ifdef CONFIG_NET_POLL_CONTROLLER
1724 * axienet_poll_controller - Axi Ethernet poll mechanism.
1725 * @ndev: Pointer to net_device structure
1727 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1728 * to polling the ISRs and are enabled back after the polling is done.
1730 static void axienet_poll_controller(struct net_device *ndev)
1732 struct axienet_local *lp = netdev_priv(ndev);
1734 disable_irq(lp->tx_irq);
1735 disable_irq(lp->rx_irq);
1736 axienet_rx_irq(lp->tx_irq, ndev);
1737 axienet_tx_irq(lp->rx_irq, ndev);
1738 enable_irq(lp->tx_irq);
1739 enable_irq(lp->rx_irq);
1743 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1745 struct axienet_local *lp = netdev_priv(dev);
1747 if (!netif_running(dev))
1750 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1754 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1756 struct axienet_local *lp = netdev_priv(dev);
1759 netdev_stats_to_stats64(stats, &dev->stats);
1762 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1763 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1764 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1765 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1768 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1769 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1770 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1771 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1773 if (!(lp->features & XAE_FEATURE_STATS))
1777 start = read_seqcount_begin(&lp->hw_stats_seqcount);
1778 stats->rx_length_errors =
1779 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1780 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1781 stats->rx_frame_errors =
1782 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1783 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1784 axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1785 stats->rx_length_errors +
1786 stats->rx_crc_errors +
1787 stats->rx_frame_errors;
1788 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1790 stats->tx_aborted_errors =
1791 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1792 stats->tx_fifo_errors =
1793 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1794 stats->tx_window_errors =
1795 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1796 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1797 stats->tx_aborted_errors +
1798 stats->tx_fifo_errors +
1799 stats->tx_window_errors;
1800 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1803 static const struct net_device_ops axienet_netdev_ops = {
1804 .ndo_open = axienet_open,
1805 .ndo_stop = axienet_stop,
1806 .ndo_start_xmit = axienet_start_xmit,
1807 .ndo_get_stats64 = axienet_get_stats64,
1808 .ndo_change_mtu = axienet_change_mtu,
1809 .ndo_set_mac_address = netdev_set_mac_address,
1810 .ndo_validate_addr = eth_validate_addr,
1811 .ndo_eth_ioctl = axienet_ioctl,
1812 .ndo_set_rx_mode = axienet_set_multicast_list,
1813 #ifdef CONFIG_NET_POLL_CONTROLLER
1814 .ndo_poll_controller = axienet_poll_controller,
1818 static const struct net_device_ops axienet_netdev_dmaengine_ops = {
1819 .ndo_open = axienet_open,
1820 .ndo_stop = axienet_stop,
1821 .ndo_start_xmit = axienet_start_xmit_dmaengine,
1822 .ndo_get_stats64 = axienet_get_stats64,
1823 .ndo_change_mtu = axienet_change_mtu,
1824 .ndo_set_mac_address = netdev_set_mac_address,
1825 .ndo_validate_addr = eth_validate_addr,
1826 .ndo_eth_ioctl = axienet_ioctl,
1827 .ndo_set_rx_mode = axienet_set_multicast_list,
1831 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1832 * @ndev: Pointer to net_device structure
1833 * @ed: Pointer to ethtool_drvinfo structure
1835 * This implements ethtool command for getting the driver information.
1836 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1838 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1839 struct ethtool_drvinfo *ed)
1841 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1842 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1846 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1848 * @ndev: Pointer to net_device structure
1850 * This implements ethtool command for getting the total register length
1853 * Return: the total regs length
1855 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1857 return sizeof(u32) * AXIENET_REGS_N;
1861 * axienet_ethtools_get_regs - Dump the contents of all registers present
1862 * in AxiEthernet core.
1863 * @ndev: Pointer to net_device structure
1864 * @regs: Pointer to ethtool_regs structure
1865 * @ret: Void pointer used to return the contents of the registers.
1867 * This implements ethtool command for getting the Axi Ethernet register dump.
1868 * Issue "ethtool -d ethX" to execute this function.
1870 static void axienet_ethtools_get_regs(struct net_device *ndev,
1871 struct ethtool_regs *regs, void *ret)
1873 u32 *data = (u32 *)ret;
1874 size_t len = sizeof(u32) * AXIENET_REGS_N;
1875 struct axienet_local *lp = netdev_priv(ndev);
1880 memset(data, 0, len);
1881 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1882 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1883 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1884 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1885 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1886 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1887 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1888 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1889 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1890 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1891 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1892 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1893 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1894 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1895 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1896 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1897 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1898 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1899 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1900 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1901 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1902 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1903 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1904 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1905 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1906 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1907 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1908 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1909 if (!lp->use_dmaengine) {
1910 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1911 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1912 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1913 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1914 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1915 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1916 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1917 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1922 axienet_ethtools_get_ringparam(struct net_device *ndev,
1923 struct ethtool_ringparam *ering,
1924 struct kernel_ethtool_ringparam *kernel_ering,
1925 struct netlink_ext_ack *extack)
1927 struct axienet_local *lp = netdev_priv(ndev);
1929 ering->rx_max_pending = RX_BD_NUM_MAX;
1930 ering->rx_mini_max_pending = 0;
1931 ering->rx_jumbo_max_pending = 0;
1932 ering->tx_max_pending = TX_BD_NUM_MAX;
1933 ering->rx_pending = lp->rx_bd_num;
1934 ering->rx_mini_pending = 0;
1935 ering->rx_jumbo_pending = 0;
1936 ering->tx_pending = lp->tx_bd_num;
1940 axienet_ethtools_set_ringparam(struct net_device *ndev,
1941 struct ethtool_ringparam *ering,
1942 struct kernel_ethtool_ringparam *kernel_ering,
1943 struct netlink_ext_ack *extack)
1945 struct axienet_local *lp = netdev_priv(ndev);
1947 if (ering->rx_pending > RX_BD_NUM_MAX ||
1948 ering->rx_mini_pending ||
1949 ering->rx_jumbo_pending ||
1950 ering->tx_pending < TX_BD_NUM_MIN ||
1951 ering->tx_pending > TX_BD_NUM_MAX)
1954 if (netif_running(ndev))
1957 lp->rx_bd_num = ering->rx_pending;
1958 lp->tx_bd_num = ering->tx_pending;
1963 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1965 * @ndev: Pointer to net_device structure
1966 * @epauseparm: Pointer to ethtool_pauseparam structure.
1968 * This implements ethtool command for getting axi ethernet pause frame
1969 * setting. Issue "ethtool -a ethX" to execute this function.
1972 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1973 struct ethtool_pauseparam *epauseparm)
1975 struct axienet_local *lp = netdev_priv(ndev);
1977 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1981 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1983 * @ndev: Pointer to net_device structure
1984 * @epauseparm:Pointer to ethtool_pauseparam structure
1986 * This implements ethtool command for enabling flow control on Rx and Tx
1987 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1990 * Return: 0 on success, -EFAULT if device is running
1993 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1994 struct ethtool_pauseparam *epauseparm)
1996 struct axienet_local *lp = netdev_priv(ndev);
1998 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2002 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2003 * @ndev: Pointer to net_device structure
2004 * @ecoalesce: Pointer to ethtool_coalesce structure
2005 * @kernel_coal: ethtool CQE mode setting structure
2006 * @extack: extack for reporting error messages
2008 * This implements ethtool command for getting the DMA interrupt coalescing
2009 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2010 * execute this function.
2015 axienet_ethtools_get_coalesce(struct net_device *ndev,
2016 struct ethtool_coalesce *ecoalesce,
2017 struct kernel_ethtool_coalesce *kernel_coal,
2018 struct netlink_ext_ack *extack)
2020 struct axienet_local *lp = netdev_priv(ndev);
2022 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
2023 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
2024 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
2025 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
2030 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2031 * @ndev: Pointer to net_device structure
2032 * @ecoalesce: Pointer to ethtool_coalesce structure
2033 * @kernel_coal: ethtool CQE mode setting structure
2034 * @extack: extack for reporting error messages
2036 * This implements ethtool command for setting the DMA interrupt coalescing
2037 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2038 * prompt to execute this function.
2040 * Return: 0, on success, Non-zero error value on failure.
2043 axienet_ethtools_set_coalesce(struct net_device *ndev,
2044 struct ethtool_coalesce *ecoalesce,
2045 struct kernel_ethtool_coalesce *kernel_coal,
2046 struct netlink_ext_ack *extack)
2048 struct axienet_local *lp = netdev_priv(ndev);
2050 if (netif_running(ndev)) {
2051 NL_SET_ERR_MSG(extack,
2052 "Please stop netif before applying configuration");
2056 if (ecoalesce->rx_max_coalesced_frames > 255 ||
2057 ecoalesce->tx_max_coalesced_frames > 255) {
2058 NL_SET_ERR_MSG(extack, "frames must be less than 256");
2062 if (!ecoalesce->rx_max_coalesced_frames ||
2063 !ecoalesce->tx_max_coalesced_frames) {
2064 NL_SET_ERR_MSG(extack, "frames must be non-zero");
2068 if ((ecoalesce->rx_max_coalesced_frames > 1 &&
2069 !ecoalesce->rx_coalesce_usecs) ||
2070 (ecoalesce->tx_max_coalesced_frames > 1 &&
2071 !ecoalesce->tx_coalesce_usecs)) {
2072 NL_SET_ERR_MSG(extack,
2073 "usecs must be non-zero when frames is greater than one");
2077 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
2078 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
2079 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
2080 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
2086 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
2087 struct ethtool_link_ksettings *cmd)
2089 struct axienet_local *lp = netdev_priv(ndev);
2091 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2095 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
2096 const struct ethtool_link_ksettings *cmd)
2098 struct axienet_local *lp = netdev_priv(ndev);
2100 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2103 static int axienet_ethtools_nway_reset(struct net_device *dev)
2105 struct axienet_local *lp = netdev_priv(dev);
2107 return phylink_ethtool_nway_reset(lp->phylink);
2110 static void axienet_ethtools_get_ethtool_stats(struct net_device *dev,
2111 struct ethtool_stats *stats,
2114 struct axienet_local *lp = netdev_priv(dev);
2118 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2119 data[0] = axienet_stat(lp, STAT_RX_BYTES);
2120 data[1] = axienet_stat(lp, STAT_TX_BYTES);
2121 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2122 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2123 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2124 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2125 data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2126 data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2127 data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2128 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2131 static const char axienet_ethtool_stats_strings[][ETH_GSTRING_LEN] = {
2133 "Transmitted bytes",
2134 "RX Good VLAN Tagged Frames",
2135 "TX Good VLAN Tagged Frames",
2136 "TX Good PFC Frames",
2137 "RX Good PFC Frames",
2138 "User Defined Counter 0",
2139 "User Defined Counter 1",
2140 "User Defined Counter 2",
2143 static void axienet_ethtools_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2145 switch (stringset) {
2147 memcpy(data, axienet_ethtool_stats_strings,
2148 sizeof(axienet_ethtool_stats_strings));
2153 static int axienet_ethtools_get_sset_count(struct net_device *dev, int sset)
2155 struct axienet_local *lp = netdev_priv(dev);
2159 if (lp->features & XAE_FEATURE_STATS)
2160 return ARRAY_SIZE(axienet_ethtool_stats_strings);
2168 axienet_ethtools_get_pause_stats(struct net_device *dev,
2169 struct ethtool_pause_stats *pause_stats)
2171 struct axienet_local *lp = netdev_priv(dev);
2174 if (!(lp->features & XAE_FEATURE_STATS))
2178 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2179 pause_stats->tx_pause_frames =
2180 axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2181 pause_stats->rx_pause_frames =
2182 axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2183 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2187 axienet_ethtool_get_eth_mac_stats(struct net_device *dev,
2188 struct ethtool_eth_mac_stats *mac_stats)
2190 struct axienet_local *lp = netdev_priv(dev);
2193 if (!(lp->features & XAE_FEATURE_STATS))
2197 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2198 mac_stats->FramesTransmittedOK =
2199 axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2200 mac_stats->SingleCollisionFrames =
2201 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2202 mac_stats->MultipleCollisionFrames =
2203 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2204 mac_stats->FramesReceivedOK =
2205 axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2206 mac_stats->FrameCheckSequenceErrors =
2207 axienet_stat(lp, STAT_RX_FCS_ERRORS);
2208 mac_stats->AlignmentErrors =
2209 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2210 mac_stats->FramesWithDeferredXmissions =
2211 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2212 mac_stats->LateCollisions =
2213 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2214 mac_stats->FramesAbortedDueToXSColls =
2215 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2216 mac_stats->MulticastFramesXmittedOK =
2217 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2218 mac_stats->BroadcastFramesXmittedOK =
2219 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2220 mac_stats->FramesWithExcessiveDeferral =
2221 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2222 mac_stats->MulticastFramesReceivedOK =
2223 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2224 mac_stats->BroadcastFramesReceivedOK =
2225 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2226 mac_stats->InRangeLengthErrors =
2227 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2228 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2232 axienet_ethtool_get_eth_ctrl_stats(struct net_device *dev,
2233 struct ethtool_eth_ctrl_stats *ctrl_stats)
2235 struct axienet_local *lp = netdev_priv(dev);
2238 if (!(lp->features & XAE_FEATURE_STATS))
2242 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2243 ctrl_stats->MACControlFramesTransmitted =
2244 axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2245 ctrl_stats->MACControlFramesReceived =
2246 axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2247 ctrl_stats->UnsupportedOpcodesReceived =
2248 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2249 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2252 static const struct ethtool_rmon_hist_range axienet_rmon_ranges[] = {
2264 axienet_ethtool_get_rmon_stats(struct net_device *dev,
2265 struct ethtool_rmon_stats *rmon_stats,
2266 const struct ethtool_rmon_hist_range **ranges)
2268 struct axienet_local *lp = netdev_priv(dev);
2271 if (!(lp->features & XAE_FEATURE_STATS))
2275 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2276 rmon_stats->undersize_pkts =
2277 axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2278 rmon_stats->oversize_pkts =
2279 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2280 rmon_stats->fragments =
2281 axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2283 rmon_stats->hist[0] =
2284 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2285 rmon_stats->hist[1] =
2286 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2287 rmon_stats->hist[2] =
2288 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2289 rmon_stats->hist[3] =
2290 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2291 rmon_stats->hist[4] =
2292 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2293 rmon_stats->hist[5] =
2294 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2295 rmon_stats->hist[6] =
2296 rmon_stats->oversize_pkts;
2298 rmon_stats->hist_tx[0] =
2299 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2300 rmon_stats->hist_tx[1] =
2301 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2302 rmon_stats->hist_tx[2] =
2303 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2304 rmon_stats->hist_tx[3] =
2305 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2306 rmon_stats->hist_tx[4] =
2307 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2308 rmon_stats->hist_tx[5] =
2309 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2310 rmon_stats->hist_tx[6] =
2311 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2312 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2314 *ranges = axienet_rmon_ranges;
2317 static const struct ethtool_ops axienet_ethtool_ops = {
2318 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
2319 ETHTOOL_COALESCE_USECS,
2320 .get_drvinfo = axienet_ethtools_get_drvinfo,
2321 .get_regs_len = axienet_ethtools_get_regs_len,
2322 .get_regs = axienet_ethtools_get_regs,
2323 .get_link = ethtool_op_get_link,
2324 .get_ringparam = axienet_ethtools_get_ringparam,
2325 .set_ringparam = axienet_ethtools_set_ringparam,
2326 .get_pauseparam = axienet_ethtools_get_pauseparam,
2327 .set_pauseparam = axienet_ethtools_set_pauseparam,
2328 .get_coalesce = axienet_ethtools_get_coalesce,
2329 .set_coalesce = axienet_ethtools_set_coalesce,
2330 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
2331 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
2332 .nway_reset = axienet_ethtools_nway_reset,
2333 .get_ethtool_stats = axienet_ethtools_get_ethtool_stats,
2334 .get_strings = axienet_ethtools_get_strings,
2335 .get_sset_count = axienet_ethtools_get_sset_count,
2336 .get_pause_stats = axienet_ethtools_get_pause_stats,
2337 .get_eth_mac_stats = axienet_ethtool_get_eth_mac_stats,
2338 .get_eth_ctrl_stats = axienet_ethtool_get_eth_ctrl_stats,
2339 .get_rmon_stats = axienet_ethtool_get_rmon_stats,
2342 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
2344 return container_of(pcs, struct axienet_local, pcs);
2347 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
2348 unsigned int neg_mode,
2349 struct phylink_link_state *state)
2351 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2353 phylink_mii_c22_pcs_get_state(pcs_phy, neg_mode, state);
2356 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
2358 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2360 phylink_mii_c22_pcs_an_restart(pcs_phy);
2363 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
2364 phy_interface_t interface,
2365 const unsigned long *advertising,
2366 bool permit_pause_to_mac)
2368 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
2369 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
2370 struct axienet_local *lp = netdev_priv(ndev);
2373 if (lp->switch_x_sgmii) {
2374 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
2375 interface == PHY_INTERFACE_MODE_SGMII ?
2376 XLNX_MII_STD_SELECT_SGMII : 0);
2379 "Failed to switch PHY interface: %d\n",
2385 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
2388 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
2393 static const struct phylink_pcs_ops axienet_pcs_ops = {
2394 .pcs_get_state = axienet_pcs_get_state,
2395 .pcs_config = axienet_pcs_config,
2396 .pcs_an_restart = axienet_pcs_an_restart,
2399 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
2400 phy_interface_t interface)
2402 struct net_device *ndev = to_net_dev(config->dev);
2403 struct axienet_local *lp = netdev_priv(ndev);
2405 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
2406 interface == PHY_INTERFACE_MODE_SGMII)
2412 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
2413 const struct phylink_link_state *state)
2415 /* nothing meaningful to do */
2418 static void axienet_mac_link_down(struct phylink_config *config,
2420 phy_interface_t interface)
2422 /* nothing meaningful to do */
2425 static void axienet_mac_link_up(struct phylink_config *config,
2426 struct phy_device *phy,
2427 unsigned int mode, phy_interface_t interface,
2428 int speed, int duplex,
2429 bool tx_pause, bool rx_pause)
2431 struct net_device *ndev = to_net_dev(config->dev);
2432 struct axienet_local *lp = netdev_priv(ndev);
2433 u32 emmc_reg, fcc_reg;
2435 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2436 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
2440 emmc_reg |= XAE_EMMC_LINKSPD_1000;
2443 emmc_reg |= XAE_EMMC_LINKSPD_100;
2446 emmc_reg |= XAE_EMMC_LINKSPD_10;
2450 "Speed other than 10, 100 or 1Gbps is not supported\n");
2454 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2456 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2458 fcc_reg |= XAE_FCC_FCTX_MASK;
2460 fcc_reg &= ~XAE_FCC_FCTX_MASK;
2462 fcc_reg |= XAE_FCC_FCRX_MASK;
2464 fcc_reg &= ~XAE_FCC_FCRX_MASK;
2465 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2468 static const struct phylink_mac_ops axienet_phylink_ops = {
2469 .mac_select_pcs = axienet_mac_select_pcs,
2470 .mac_config = axienet_mac_config,
2471 .mac_link_down = axienet_mac_link_down,
2472 .mac_link_up = axienet_mac_link_up,
2476 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2477 * @work: pointer to work_struct
2479 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2482 static void axienet_dma_err_handler(struct work_struct *work)
2486 struct axidma_bd *cur_p;
2487 struct axienet_local *lp = container_of(work, struct axienet_local,
2489 struct net_device *ndev = lp->ndev;
2491 /* Don't bother if we are going to stop anyway */
2492 if (READ_ONCE(lp->stopping))
2495 napi_disable(&lp->napi_tx);
2496 napi_disable(&lp->napi_rx);
2498 axienet_setoptions(ndev, lp->options &
2499 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2501 axienet_dma_stop(lp);
2503 for (i = 0; i < lp->tx_bd_num; i++) {
2504 cur_p = &lp->tx_bd_v[i];
2506 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2508 dma_unmap_single(lp->dev, addr,
2510 XAXIDMA_BD_CTRL_LENGTH_MASK),
2514 dev_kfree_skb_irq(cur_p->skb);
2516 cur_p->phys_msb = 0;
2527 for (i = 0; i < lp->rx_bd_num; i++) {
2528 cur_p = &lp->rx_bd_v[i];
2541 axienet_dma_start(lp);
2543 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2544 axienet_status &= ~XAE_RCW1_RX_MASK;
2545 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2547 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2548 if (axienet_status & XAE_INT_RXRJECT_MASK)
2549 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2550 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2551 XAE_INT_RECV_ERROR_MASK : 0);
2552 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2554 /* Sync default options with HW but leave receiver and
2555 * transmitter disabled.
2557 axienet_setoptions(ndev, lp->options &
2558 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
2559 axienet_set_mac_address(ndev, NULL);
2560 axienet_set_multicast_list(ndev);
2561 napi_enable(&lp->napi_rx);
2562 napi_enable(&lp->napi_tx);
2563 axienet_setoptions(ndev, lp->options);
2567 * axienet_probe - Axi Ethernet probe function.
2568 * @pdev: Pointer to platform device structure.
2570 * Return: 0, on success
2571 * Non-zero error value on failure.
2573 * This is the probe routine for Axi Ethernet driver. This is called before
2574 * any other driver routines are invoked. It allocates and sets up the Ethernet
2575 * device. Parses through device tree and populates fields of
2576 * axienet_local. It registers the Ethernet device.
2578 static int axienet_probe(struct platform_device *pdev)
2581 struct device_node *np;
2582 struct axienet_local *lp;
2583 struct net_device *ndev;
2584 struct resource *ethres;
2585 u8 mac_addr[ETH_ALEN];
2586 int addr_width = 32;
2589 ndev = alloc_etherdev(sizeof(*lp));
2593 platform_set_drvdata(pdev, ndev);
2595 SET_NETDEV_DEV(ndev, &pdev->dev);
2596 ndev->features = NETIF_F_SG;
2597 ndev->ethtool_ops = &axienet_ethtool_ops;
2599 /* MTU range: 64 - 9000 */
2601 ndev->max_mtu = XAE_JUMBO_MTU;
2603 lp = netdev_priv(ndev);
2605 lp->dev = &pdev->dev;
2606 lp->options = XAE_OPTION_DEFAULTS;
2607 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2608 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2610 u64_stats_init(&lp->rx_stat_sync);
2611 u64_stats_init(&lp->tx_stat_sync);
2613 mutex_init(&lp->stats_lock);
2614 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2615 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2617 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2619 /* For backward compatibility, if named AXI clock is not present,
2620 * treat the first clock specified as the AXI clock.
2622 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2624 if (IS_ERR(lp->axi_clk)) {
2625 ret = PTR_ERR(lp->axi_clk);
2628 ret = clk_prepare_enable(lp->axi_clk);
2630 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
2634 lp->misc_clks[0].id = "axis_clk";
2635 lp->misc_clks[1].id = "ref_clk";
2636 lp->misc_clks[2].id = "mgt_clk";
2638 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2642 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2646 /* Map device registers */
2647 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres);
2648 if (IS_ERR(lp->regs)) {
2649 ret = PTR_ERR(lp->regs);
2652 lp->regs_start = ethres->start;
2654 /* Setup checksum offload, but default to off if not specified */
2657 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2658 lp->features |= XAE_FEATURE_STATS;
2660 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
2664 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2665 /* Can checksum any contiguous range */
2666 ndev->features |= NETIF_F_HW_CSUM;
2669 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2670 /* Can checksum TCP/UDP over IPv4. */
2671 ndev->features |= NETIF_F_IP_CSUM;
2675 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
2679 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2680 ndev->features |= NETIF_F_RXCSUM;
2683 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2684 ndev->features |= NETIF_F_RXCSUM;
2688 /* For supporting jumbo frames, the Axi Ethernet hardware must have
2689 * a larger Rx/Tx Memory. Typically, the size must be large so that
2690 * we can enable jumbo option and start supporting jumbo frames.
2691 * Here we check for memory allocated for Rx/Tx in the hardware from
2692 * the device-tree and accordingly set flags.
2694 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2696 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2697 "xlnx,switch-x-sgmii");
2699 /* Start with the proprietary, and broken phy_type */
2700 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
2702 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
2704 case XAE_PHY_TYPE_MII:
2705 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2707 case XAE_PHY_TYPE_GMII:
2708 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2710 case XAE_PHY_TYPE_RGMII_2_0:
2711 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2713 case XAE_PHY_TYPE_SGMII:
2714 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2716 case XAE_PHY_TYPE_1000BASE_X:
2717 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2724 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2728 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2729 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2730 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2735 if (!of_property_present(pdev->dev.of_node, "dmas")) {
2736 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2737 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2740 struct resource dmares;
2742 ret = of_address_to_resource(np, 0, &dmares);
2745 "unable to get DMA resource\n");
2749 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2751 lp->rx_irq = irq_of_parse_and_map(np, 1);
2752 lp->tx_irq = irq_of_parse_and_map(np, 0);
2754 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2756 /* Check for these resources directly on the Ethernet node. */
2757 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2758 lp->rx_irq = platform_get_irq(pdev, 1);
2759 lp->tx_irq = platform_get_irq(pdev, 0);
2760 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2762 if (IS_ERR(lp->dma_regs)) {
2763 dev_err(&pdev->dev, "could not map DMA regs\n");
2764 ret = PTR_ERR(lp->dma_regs);
2767 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2768 dev_err(&pdev->dev, "could not determine irqs\n");
2773 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2774 ret = __axienet_device_reset(lp);
2778 /* Autodetect the need for 64-bit DMA pointers.
2779 * When the IP is configured for a bus width bigger than 32 bits,
2780 * writing the MSB registers is mandatory, even if they are all 0.
2781 * We can detect this case by writing all 1's to one such register
2782 * and see if that sticks: when the IP is configured for 32 bits
2783 * only, those registers are RES0.
2784 * Those MSB registers were introduced in IP v7.1, which we check first.
2786 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2787 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2789 iowrite32(0x0, desc);
2790 if (ioread32(desc) == 0) { /* sanity check */
2791 iowrite32(0xffffffff, desc);
2792 if (ioread32(desc) > 0) {
2793 lp->features |= XAE_FEATURE_DMA_64BIT;
2795 dev_info(&pdev->dev,
2796 "autodetected 64-bit DMA range\n");
2798 iowrite32(0x0, desc);
2801 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2802 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2807 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2809 dev_err(&pdev->dev, "No suitable DMA available\n");
2812 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2813 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2815 struct xilinx_vdma_config cfg;
2816 struct dma_chan *tx_chan;
2818 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2819 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
2823 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
2824 if (IS_ERR(tx_chan)) {
2825 ret = PTR_ERR(tx_chan);
2826 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
2831 /* As name says VDMA but it has support for DMA channel reset */
2832 ret = xilinx_vdma_channel_set_config(tx_chan, &cfg);
2834 dev_err(&pdev->dev, "Reset channel failed\n");
2835 dma_release_channel(tx_chan);
2839 dma_release_channel(tx_chan);
2840 lp->use_dmaengine = 1;
2843 if (lp->use_dmaengine)
2844 ndev->netdev_ops = &axienet_netdev_dmaengine_ops;
2846 ndev->netdev_ops = &axienet_netdev_ops;
2847 /* Check for Ethernet core IRQ (optional) */
2848 if (lp->eth_irq <= 0)
2849 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2851 /* Retrieve the MAC address */
2852 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2854 axienet_set_mac_address(ndev, mac_addr);
2856 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2858 axienet_set_mac_address(ndev, NULL);
2861 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2862 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2863 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2864 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2866 ret = axienet_mdio_setup(lp);
2868 dev_warn(&pdev->dev,
2869 "error registering MDIO bus: %d\n", ret);
2871 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2872 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2873 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2875 /* Deprecated: Always use "pcs-handle" for pcs_phy.
2876 * Falling back to "phy-handle" here is only for
2877 * backward compatibility with old device trees.
2879 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2882 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2886 lp->pcs_phy = of_mdio_find_device(np);
2888 ret = -EPROBE_DEFER;
2893 lp->pcs.ops = &axienet_pcs_ops;
2894 lp->pcs.neg_mode = true;
2895 lp->pcs.poll = true;
2898 lp->phylink_config.dev = &ndev->dev;
2899 lp->phylink_config.type = PHYLINK_NETDEV;
2900 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2901 MAC_10FD | MAC_100FD | MAC_1000FD;
2903 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2904 if (lp->switch_x_sgmii) {
2905 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2906 lp->phylink_config.supported_interfaces);
2907 __set_bit(PHY_INTERFACE_MODE_SGMII,
2908 lp->phylink_config.supported_interfaces);
2911 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2913 &axienet_phylink_ops);
2914 if (IS_ERR(lp->phylink)) {
2915 ret = PTR_ERR(lp->phylink);
2916 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2920 ret = register_netdev(lp->ndev);
2922 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2923 goto cleanup_phylink;
2929 phylink_destroy(lp->phylink);
2933 put_device(&lp->pcs_phy->dev);
2935 axienet_mdio_teardown(lp);
2937 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2938 clk_disable_unprepare(lp->axi_clk);
2946 static void axienet_remove(struct platform_device *pdev)
2948 struct net_device *ndev = platform_get_drvdata(pdev);
2949 struct axienet_local *lp = netdev_priv(ndev);
2951 unregister_netdev(ndev);
2954 phylink_destroy(lp->phylink);
2957 put_device(&lp->pcs_phy->dev);
2959 axienet_mdio_teardown(lp);
2961 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2962 clk_disable_unprepare(lp->axi_clk);
2967 static void axienet_shutdown(struct platform_device *pdev)
2969 struct net_device *ndev = platform_get_drvdata(pdev);
2972 netif_device_detach(ndev);
2974 if (netif_running(ndev))
2980 static int axienet_suspend(struct device *dev)
2982 struct net_device *ndev = dev_get_drvdata(dev);
2984 if (!netif_running(ndev))
2987 netif_device_detach(ndev);
2996 static int axienet_resume(struct device *dev)
2998 struct net_device *ndev = dev_get_drvdata(dev);
3000 if (!netif_running(ndev))
3007 netif_device_attach(ndev);
3012 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
3013 axienet_suspend, axienet_resume);
3015 static struct platform_driver axienet_driver = {
3016 .probe = axienet_probe,
3017 .remove = axienet_remove,
3018 .shutdown = axienet_shutdown,
3020 .name = "xilinx_axienet",
3021 .pm = &axienet_pm_ops,
3022 .of_match_table = axienet_of_match,
3026 module_platform_driver(axienet_driver);
3028 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
3029 MODULE_AUTHOR("Xilinx");
3030 MODULE_LICENSE("GPL");