1 // SPDX-License-Identifier: GPL-2.0+
3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
6 * Right now, I am very wasteful with the buffers. I allocate memory
7 * pages and then divide them into 2K frame buffers. This way I know I
8 * have buffers large enough to hold one frame within one buffer descriptor.
9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10 * will be much more memory efficient and will easily handle lots of
13 * Much better multiple PHY support by Magnus Damm.
14 * Copyright (c) 2000 Ericsson Radio Systems AB.
16 * Support for FEC controller of ColdFire processors.
20 * Copyright (c) 2004-2006 Macq Electronique SA.
22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/ptrace.h>
30 #include <linux/errno.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
41 #include <net/page_pool/helpers.h>
42 #include <net/selftests.h>
44 #include <linux/tcp.h>
45 #include <linux/udp.h>
46 #include <linux/icmp.h>
47 #include <linux/spinlock.h>
48 #include <linux/workqueue.h>
49 #include <linux/bitops.h>
51 #include <linux/irq.h>
52 #include <linux/clk.h>
53 #include <linux/crc32.h>
54 #include <linux/platform_device.h>
55 #include <linux/property.h>
56 #include <linux/mdio.h>
57 #include <linux/phy.h>
58 #include <linux/fec.h>
60 #include <linux/of_mdio.h>
61 #include <linux/of_net.h>
62 #include <linux/regulator/consumer.h>
63 #include <linux/if_vlan.h>
64 #include <linux/pinctrl/consumer.h>
65 #include <linux/gpio/consumer.h>
66 #include <linux/prefetch.h>
67 #include <linux/mfd/syscon.h>
68 #include <linux/regmap.h>
69 #include <soc/imx/cpuidle.h>
70 #include <linux/filter.h>
71 #include <linux/bpf.h>
72 #include <linux/bpf_trace.h>
74 #include <asm/cacheflush.h>
78 static void set_multicast_list(struct net_device *ndev);
79 static void fec_enet_itr_coal_set(struct net_device *ndev);
80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
81 int cpu, struct xdp_buff *xdp,
84 #define DRIVER_NAME "fec"
86 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
88 #define FEC_ENET_RSEM_V 0x84
89 #define FEC_ENET_RSFL_V 16
90 #define FEC_ENET_RAEM_V 0x8
91 #define FEC_ENET_RAFL_V 0x8
92 #define FEC_ENET_OPD_V 0xFFF0
93 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
95 #define FEC_ENET_XDP_PASS 0
96 #define FEC_ENET_XDP_CONSUMED BIT(0)
97 #define FEC_ENET_XDP_TX BIT(1)
98 #define FEC_ENET_XDP_REDIR BIT(2)
104 static const struct fec_devinfo fec_imx25_info = {
105 .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
106 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
109 static const struct fec_devinfo fec_imx27_info = {
110 .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
111 FEC_QUIRK_HAS_MDIO_C45,
114 static const struct fec_devinfo fec_imx28_info = {
115 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
116 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
117 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
118 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
121 static const struct fec_devinfo fec_imx6q_info = {
122 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
123 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
124 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
125 FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
126 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
129 static const struct fec_devinfo fec_mvf600_info = {
130 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
131 FEC_QUIRK_HAS_MDIO_C45,
134 static const struct fec_devinfo fec_imx6x_info = {
135 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
136 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
137 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
138 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
139 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
140 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
141 FEC_QUIRK_HAS_MDIO_C45,
144 static const struct fec_devinfo fec_imx6ul_info = {
145 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
146 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
147 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
148 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
149 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
150 FEC_QUIRK_HAS_MDIO_C45,
153 static const struct fec_devinfo fec_imx8mq_info = {
154 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
155 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
156 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
157 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
158 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
159 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
160 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
161 FEC_QUIRK_HAS_MDIO_C45,
164 static const struct fec_devinfo fec_imx8qm_info = {
165 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
166 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
167 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
168 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
169 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
170 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
171 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
174 static const struct fec_devinfo fec_s32v234_info = {
175 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
176 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
177 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
178 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
179 FEC_QUIRK_HAS_MDIO_C45,
182 static struct platform_device_id fec_devtype[] = {
184 /* keep it for coldfire */
191 MODULE_DEVICE_TABLE(platform, fec_devtype);
193 static const struct of_device_id fec_dt_ids[] = {
194 { .compatible = "fsl,imx25-fec", .data = &fec_imx25_info, },
195 { .compatible = "fsl,imx27-fec", .data = &fec_imx27_info, },
196 { .compatible = "fsl,imx28-fec", .data = &fec_imx28_info, },
197 { .compatible = "fsl,imx6q-fec", .data = &fec_imx6q_info, },
198 { .compatible = "fsl,mvf600-fec", .data = &fec_mvf600_info, },
199 { .compatible = "fsl,imx6sx-fec", .data = &fec_imx6x_info, },
200 { .compatible = "fsl,imx6ul-fec", .data = &fec_imx6ul_info, },
201 { .compatible = "fsl,imx8mq-fec", .data = &fec_imx8mq_info, },
202 { .compatible = "fsl,imx8qm-fec", .data = &fec_imx8qm_info, },
203 { .compatible = "fsl,s32v234-fec", .data = &fec_s32v234_info, },
206 MODULE_DEVICE_TABLE(of, fec_dt_ids);
208 static unsigned char macaddr[ETH_ALEN];
209 module_param_array(macaddr, byte, NULL, 0);
210 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
212 #if defined(CONFIG_M5272)
214 * Some hardware gets it MAC address out of local flash memory.
215 * if this is non-zero then assume it is the address to get MAC from.
217 #if defined(CONFIG_NETtel)
218 #define FEC_FLASHMAC 0xf0006006
219 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
220 #define FEC_FLASHMAC 0xf0006000
221 #elif defined(CONFIG_CANCam)
222 #define FEC_FLASHMAC 0xf0020000
223 #elif defined (CONFIG_M5272C3)
224 #define FEC_FLASHMAC (0xffe04000 + 4)
225 #elif defined(CONFIG_MOD5272)
226 #define FEC_FLASHMAC 0xffc0406b
228 #define FEC_FLASHMAC 0
230 #endif /* CONFIG_M5272 */
232 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
234 * 2048 byte skbufs are allocated. However, alignment requirements
235 * varies between FEC variants. Worst case is 64, so round down by 64.
237 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
238 #define PKT_MINBUF_SIZE 64
240 /* FEC receive acceleration */
241 #define FEC_RACC_IPDIS BIT(1)
242 #define FEC_RACC_PRODIS BIT(2)
243 #define FEC_RACC_SHIFT16 BIT(7)
244 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
246 /* MIB Control Register */
247 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
250 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
251 * size bits. Other FEC hardware does not, so we need to take that into
252 * account when setting it.
254 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
255 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
256 defined(CONFIG_ARM64)
257 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
259 #define OPT_FRAME_SIZE 0
262 /* FEC MII MMFR bits definition */
263 #define FEC_MMFR_ST (1 << 30)
264 #define FEC_MMFR_ST_C45 (0)
265 #define FEC_MMFR_OP_READ (2 << 28)
266 #define FEC_MMFR_OP_READ_C45 (3 << 28)
267 #define FEC_MMFR_OP_WRITE (1 << 28)
268 #define FEC_MMFR_OP_ADDR_WRITE (0)
269 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
270 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
271 #define FEC_MMFR_TA (2 << 16)
272 #define FEC_MMFR_DATA(v) (v & 0xffff)
273 /* FEC ECR bits definition */
274 #define FEC_ECR_RESET BIT(0)
275 #define FEC_ECR_ETHEREN BIT(1)
276 #define FEC_ECR_MAGICEN BIT(2)
277 #define FEC_ECR_SLEEP BIT(3)
278 #define FEC_ECR_EN1588 BIT(4)
279 #define FEC_ECR_BYTESWP BIT(8)
280 /* FEC RCR bits definition */
281 #define FEC_RCR_LOOP BIT(0)
282 #define FEC_RCR_HALFDPX BIT(1)
283 #define FEC_RCR_MII BIT(2)
284 #define FEC_RCR_PROMISC BIT(3)
285 #define FEC_RCR_BC_REJ BIT(4)
286 #define FEC_RCR_FLOWCTL BIT(5)
287 #define FEC_RCR_RMII BIT(8)
288 #define FEC_RCR_10BASET BIT(9)
290 #define FEC_TXWMRK_STRFWD BIT(8)
292 #define FEC_MII_TIMEOUT 30000 /* us */
294 /* Transmitter timeout */
295 #define TX_TIMEOUT (2 * HZ)
297 #define FEC_PAUSE_FLAG_AUTONEG 0x1
298 #define FEC_PAUSE_FLAG_ENABLE 0x2
299 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
300 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
301 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
303 /* Max number of allowed TCP segments for software TSO */
304 #define FEC_MAX_TSO_SEGS 100
305 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
307 #define IS_TSO_HEADER(txq, addr) \
308 ((addr >= txq->tso_hdrs_dma) && \
309 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
313 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
314 struct bufdesc_prop *bd)
316 return (bdp >= bd->last) ? bd->base
317 : (struct bufdesc *)(((void *)bdp) + bd->dsize);
320 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
321 struct bufdesc_prop *bd)
323 return (bdp <= bd->base) ? bd->last
324 : (struct bufdesc *)(((void *)bdp) - bd->dsize);
327 static int fec_enet_get_bd_index(struct bufdesc *bdp,
328 struct bufdesc_prop *bd)
330 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
333 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
337 entries = (((const char *)txq->dirty_tx -
338 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
340 return entries >= 0 ? entries : entries + txq->bd.ring_size;
343 static void swap_buffer(void *bufaddr, int len)
346 unsigned int *buf = bufaddr;
348 for (i = 0; i < len; i += 4, buf++)
352 static void fec_dump(struct net_device *ndev)
354 struct fec_enet_private *fep = netdev_priv(ndev);
356 struct fec_enet_priv_tx_q *txq;
359 netdev_info(ndev, "TX ring dump\n");
360 pr_info("Nr SC addr len SKB\n");
362 txq = fep->tx_queue[0];
366 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
368 bdp == txq->bd.cur ? 'S' : ' ',
369 bdp == txq->dirty_tx ? 'H' : ' ',
370 fec16_to_cpu(bdp->cbd_sc),
371 fec32_to_cpu(bdp->cbd_bufaddr),
372 fec16_to_cpu(bdp->cbd_datlen),
373 txq->tx_buf[index].buf_p);
374 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
376 } while (bdp != txq->bd.base);
380 * Coldfire does not support DMA coherent allocations, and has historically used
381 * a band-aid with a manual flush in fec_enet_rx_queue.
383 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
384 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
387 return dma_alloc_noncoherent(dev, size, handle, DMA_BIDIRECTIONAL, gfp);
390 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
393 dma_free_noncoherent(dev, size, cpu_addr, handle, DMA_BIDIRECTIONAL);
395 #else /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
396 static void *fec_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
399 return dma_alloc_coherent(dev, size, handle, gfp);
402 static void fec_dma_free(struct device *dev, size_t size, void *cpu_addr,
405 dma_free_coherent(dev, size, cpu_addr, handle);
407 #endif /* !CONFIG_COLDFIRE || CONFIG_COLDFIRE_COHERENT_DMA */
409 struct fec_dma_devres {
412 dma_addr_t dma_handle;
415 static void fec_dmam_release(struct device *dev, void *res)
417 struct fec_dma_devres *this = res;
419 fec_dma_free(dev, this->size, this->vaddr, this->dma_handle);
422 static void *fec_dmam_alloc(struct device *dev, size_t size, dma_addr_t *handle,
425 struct fec_dma_devres *dr;
428 dr = devres_alloc(fec_dmam_release, sizeof(*dr), gfp);
431 vaddr = fec_dma_alloc(dev, size, handle, gfp);
437 dr->dma_handle = *handle;
443 static inline bool is_ipv4_pkt(struct sk_buff *skb)
445 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
449 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
451 /* Only run for packets requiring a checksum. */
452 if (skb->ip_summed != CHECKSUM_PARTIAL)
455 if (unlikely(skb_cow_head(skb, 0)))
458 if (is_ipv4_pkt(skb))
459 ip_hdr(skb)->check = 0;
460 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
466 fec_enet_create_page_pool(struct fec_enet_private *fep,
467 struct fec_enet_priv_rx_q *rxq, int size)
469 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
470 struct page_pool_params pp_params = {
472 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
474 .nid = dev_to_node(&fep->pdev->dev),
475 .dev = &fep->pdev->dev,
476 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
477 .offset = FEC_ENET_XDP_HEADROOM,
478 .max_len = FEC_ENET_RX_FRSIZE,
482 rxq->page_pool = page_pool_create(&pp_params);
483 if (IS_ERR(rxq->page_pool)) {
484 err = PTR_ERR(rxq->page_pool);
485 rxq->page_pool = NULL;
489 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
493 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
496 goto err_unregister_rxq;
501 xdp_rxq_info_unreg(&rxq->xdp_rxq);
503 page_pool_destroy(rxq->page_pool);
504 rxq->page_pool = NULL;
508 static struct bufdesc *
509 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
511 struct net_device *ndev)
513 struct fec_enet_private *fep = netdev_priv(ndev);
514 struct bufdesc *bdp = txq->bd.cur;
515 struct bufdesc_ex *ebdp;
516 int nr_frags = skb_shinfo(skb)->nr_frags;
518 unsigned short status;
519 unsigned int estatus = 0;
520 skb_frag_t *this_frag;
526 for (frag = 0; frag < nr_frags; frag++) {
527 this_frag = &skb_shinfo(skb)->frags[frag];
528 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
529 ebdp = (struct bufdesc_ex *)bdp;
531 status = fec16_to_cpu(bdp->cbd_sc);
532 status &= ~BD_ENET_TX_STATS;
533 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
534 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
536 /* Handle the last BD specially */
537 if (frag == nr_frags - 1) {
538 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
539 if (fep->bufdesc_ex) {
540 estatus |= BD_ENET_TX_INT;
541 if (unlikely(skb_shinfo(skb)->tx_flags &
542 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
543 estatus |= BD_ENET_TX_TS;
547 if (fep->bufdesc_ex) {
548 if (fep->quirks & FEC_QUIRK_HAS_AVB)
549 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
550 if (skb->ip_summed == CHECKSUM_PARTIAL)
551 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
554 ebdp->cbd_esc = cpu_to_fec32(estatus);
557 bufaddr = skb_frag_address(this_frag);
559 index = fec_enet_get_bd_index(bdp, &txq->bd);
560 if (((unsigned long) bufaddr) & fep->tx_align ||
561 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
562 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
563 bufaddr = txq->tx_bounce[index];
565 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
566 swap_buffer(bufaddr, frag_len);
569 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
571 if (dma_mapping_error(&fep->pdev->dev, addr)) {
573 netdev_err(ndev, "Tx DMA memory map failed\n");
574 goto dma_mapping_error;
577 bdp->cbd_bufaddr = cpu_to_fec32(addr);
578 bdp->cbd_datlen = cpu_to_fec16(frag_len);
579 /* Make sure the updates to rest of the descriptor are
580 * performed before transferring ownership.
583 bdp->cbd_sc = cpu_to_fec16(status);
589 for (i = 0; i < frag; i++) {
590 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
591 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
592 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
594 return ERR_PTR(-ENOMEM);
597 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
598 struct sk_buff *skb, struct net_device *ndev)
600 struct fec_enet_private *fep = netdev_priv(ndev);
601 int nr_frags = skb_shinfo(skb)->nr_frags;
602 struct bufdesc *bdp, *last_bdp;
605 unsigned short status;
606 unsigned short buflen;
607 unsigned int estatus = 0;
611 entries_free = fec_enet_get_free_txdesc_num(txq);
612 if (entries_free < MAX_SKB_FRAGS + 1) {
613 dev_kfree_skb_any(skb);
615 netdev_err(ndev, "NOT enough BD for SG!\n");
619 /* Protocol checksum off-load for TCP and UDP. */
620 if (fec_enet_clear_csum(skb, ndev)) {
621 dev_kfree_skb_any(skb);
625 /* Fill in a Tx ring entry */
628 status = fec16_to_cpu(bdp->cbd_sc);
629 status &= ~BD_ENET_TX_STATS;
631 /* Set buffer length and buffer pointer */
633 buflen = skb_headlen(skb);
635 index = fec_enet_get_bd_index(bdp, &txq->bd);
636 if (((unsigned long) bufaddr) & fep->tx_align ||
637 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
638 memcpy(txq->tx_bounce[index], skb->data, buflen);
639 bufaddr = txq->tx_bounce[index];
641 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
642 swap_buffer(bufaddr, buflen);
645 /* Push the data cache so the CPM does not get stale memory data. */
646 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
647 if (dma_mapping_error(&fep->pdev->dev, addr)) {
648 dev_kfree_skb_any(skb);
650 netdev_err(ndev, "Tx DMA memory map failed\n");
655 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
656 if (IS_ERR(last_bdp)) {
657 dma_unmap_single(&fep->pdev->dev, addr,
658 buflen, DMA_TO_DEVICE);
659 dev_kfree_skb_any(skb);
663 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
664 if (fep->bufdesc_ex) {
665 estatus = BD_ENET_TX_INT;
666 if (unlikely(skb_shinfo(skb)->tx_flags &
667 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
668 estatus |= BD_ENET_TX_TS;
671 bdp->cbd_bufaddr = cpu_to_fec32(addr);
672 bdp->cbd_datlen = cpu_to_fec16(buflen);
674 if (fep->bufdesc_ex) {
676 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
678 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
680 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
682 if (fep->quirks & FEC_QUIRK_HAS_AVB)
683 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
685 if (skb->ip_summed == CHECKSUM_PARTIAL)
686 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
689 ebdp->cbd_esc = cpu_to_fec32(estatus);
692 index = fec_enet_get_bd_index(last_bdp, &txq->bd);
693 /* Save skb pointer */
694 txq->tx_buf[index].buf_p = skb;
696 /* Make sure the updates to rest of the descriptor are performed before
697 * transferring ownership.
701 /* Send it on its way. Tell FEC it's ready, interrupt when done,
702 * it's the last BD of the frame, and to put the CRC on the end.
704 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
705 bdp->cbd_sc = cpu_to_fec16(status);
707 /* If this was the last BD in the ring, start at the beginning again. */
708 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
710 skb_tx_timestamp(skb);
712 /* Make sure the update to bdp is performed before txq->bd.cur. */
716 /* Trigger transmission start */
717 writel(0, txq->bd.reg_desc_active);
723 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
724 struct net_device *ndev,
725 struct bufdesc *bdp, int index, char *data,
726 int size, bool last_tcp, bool is_last)
728 struct fec_enet_private *fep = netdev_priv(ndev);
729 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
730 unsigned short status;
731 unsigned int estatus = 0;
734 status = fec16_to_cpu(bdp->cbd_sc);
735 status &= ~BD_ENET_TX_STATS;
737 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
739 if (((unsigned long) data) & fep->tx_align ||
740 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
741 memcpy(txq->tx_bounce[index], data, size);
742 data = txq->tx_bounce[index];
744 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
745 swap_buffer(data, size);
748 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
749 if (dma_mapping_error(&fep->pdev->dev, addr)) {
750 dev_kfree_skb_any(skb);
752 netdev_err(ndev, "Tx DMA memory map failed\n");
756 bdp->cbd_datlen = cpu_to_fec16(size);
757 bdp->cbd_bufaddr = cpu_to_fec32(addr);
759 if (fep->bufdesc_ex) {
760 if (fep->quirks & FEC_QUIRK_HAS_AVB)
761 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
762 if (skb->ip_summed == CHECKSUM_PARTIAL)
763 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
765 ebdp->cbd_esc = cpu_to_fec32(estatus);
768 /* Handle the last BD specially */
770 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
772 status |= BD_ENET_TX_INTR;
774 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
777 bdp->cbd_sc = cpu_to_fec16(status);
783 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
784 struct sk_buff *skb, struct net_device *ndev,
785 struct bufdesc *bdp, int index)
787 struct fec_enet_private *fep = netdev_priv(ndev);
788 int hdr_len = skb_tcp_all_headers(skb);
789 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
791 unsigned long dmabuf;
792 unsigned short status;
793 unsigned int estatus = 0;
795 status = fec16_to_cpu(bdp->cbd_sc);
796 status &= ~BD_ENET_TX_STATS;
797 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
799 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
800 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
801 if (((unsigned long)bufaddr) & fep->tx_align ||
802 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
803 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
804 bufaddr = txq->tx_bounce[index];
806 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
807 swap_buffer(bufaddr, hdr_len);
809 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
810 hdr_len, DMA_TO_DEVICE);
811 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
812 dev_kfree_skb_any(skb);
814 netdev_err(ndev, "Tx DMA memory map failed\n");
819 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
820 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
822 if (fep->bufdesc_ex) {
823 if (fep->quirks & FEC_QUIRK_HAS_AVB)
824 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
825 if (skb->ip_summed == CHECKSUM_PARTIAL)
826 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
828 ebdp->cbd_esc = cpu_to_fec32(estatus);
831 bdp->cbd_sc = cpu_to_fec16(status);
836 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
838 struct net_device *ndev)
840 struct fec_enet_private *fep = netdev_priv(ndev);
841 int hdr_len, total_len, data_left;
842 struct bufdesc *bdp = txq->bd.cur;
844 unsigned int index = 0;
847 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
848 dev_kfree_skb_any(skb);
850 netdev_err(ndev, "NOT enough BD for TSO!\n");
854 /* Protocol checksum off-load for TCP and UDP. */
855 if (fec_enet_clear_csum(skb, ndev)) {
856 dev_kfree_skb_any(skb);
860 /* Initialize the TSO handler, and prepare the first payload */
861 hdr_len = tso_start(skb, &tso);
863 total_len = skb->len - hdr_len;
864 while (total_len > 0) {
867 index = fec_enet_get_bd_index(bdp, &txq->bd);
868 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
869 total_len -= data_left;
871 /* prepare packet headers: MAC + IP + TCP */
872 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
873 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
874 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
878 while (data_left > 0) {
881 size = min_t(int, tso.size, data_left);
882 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
883 index = fec_enet_get_bd_index(bdp, &txq->bd);
884 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
893 tso_build_data(skb, &tso, size);
896 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
899 /* Save skb pointer */
900 txq->tx_buf[index].buf_p = skb;
902 skb_tx_timestamp(skb);
905 /* Trigger transmission start */
906 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
907 !readl(txq->bd.reg_desc_active) ||
908 !readl(txq->bd.reg_desc_active) ||
909 !readl(txq->bd.reg_desc_active) ||
910 !readl(txq->bd.reg_desc_active))
911 writel(0, txq->bd.reg_desc_active);
916 /* TODO: Release all used data descriptors for TSO */
921 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
923 struct fec_enet_private *fep = netdev_priv(ndev);
925 unsigned short queue;
926 struct fec_enet_priv_tx_q *txq;
927 struct netdev_queue *nq;
930 queue = skb_get_queue_mapping(skb);
931 txq = fep->tx_queue[queue];
932 nq = netdev_get_tx_queue(ndev, queue);
935 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
937 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
941 entries_free = fec_enet_get_free_txdesc_num(txq);
942 if (entries_free <= txq->tx_stop_threshold)
943 netif_tx_stop_queue(nq);
948 /* Init RX & TX buffer descriptors
950 static void fec_enet_bd_init(struct net_device *dev)
952 struct fec_enet_private *fep = netdev_priv(dev);
953 struct fec_enet_priv_tx_q *txq;
954 struct fec_enet_priv_rx_q *rxq;
959 for (q = 0; q < fep->num_rx_queues; q++) {
960 /* Initialize the receive buffer descriptors. */
961 rxq = fep->rx_queue[q];
964 for (i = 0; i < rxq->bd.ring_size; i++) {
966 /* Initialize the BD for every fragment in the page. */
967 if (bdp->cbd_bufaddr)
968 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
970 bdp->cbd_sc = cpu_to_fec16(0);
971 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
974 /* Set the last buffer to wrap */
975 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
976 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
978 rxq->bd.cur = rxq->bd.base;
981 for (q = 0; q < fep->num_tx_queues; q++) {
982 /* ...and the same for transmit */
983 txq = fep->tx_queue[q];
987 for (i = 0; i < txq->bd.ring_size; i++) {
988 /* Initialize the BD for every fragment in the page. */
989 bdp->cbd_sc = cpu_to_fec16(0);
990 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
991 if (bdp->cbd_bufaddr &&
992 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
993 dma_unmap_single(&fep->pdev->dev,
994 fec32_to_cpu(bdp->cbd_bufaddr),
995 fec16_to_cpu(bdp->cbd_datlen),
997 if (txq->tx_buf[i].buf_p)
998 dev_kfree_skb_any(txq->tx_buf[i].buf_p);
999 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
1000 if (bdp->cbd_bufaddr)
1001 dma_unmap_single(&fep->pdev->dev,
1002 fec32_to_cpu(bdp->cbd_bufaddr),
1003 fec16_to_cpu(bdp->cbd_datlen),
1006 if (txq->tx_buf[i].buf_p)
1007 xdp_return_frame(txq->tx_buf[i].buf_p);
1009 struct page *page = txq->tx_buf[i].buf_p;
1012 page_pool_put_page(page->pp, page, 0, false);
1015 txq->tx_buf[i].buf_p = NULL;
1016 /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1017 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
1018 bdp->cbd_bufaddr = cpu_to_fec32(0);
1019 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1022 /* Set the last buffer to wrap */
1023 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1024 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1025 txq->dirty_tx = bdp;
1029 static void fec_enet_active_rxring(struct net_device *ndev)
1031 struct fec_enet_private *fep = netdev_priv(ndev);
1034 for (i = 0; i < fep->num_rx_queues; i++)
1035 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1038 static void fec_enet_enable_ring(struct net_device *ndev)
1040 struct fec_enet_private *fep = netdev_priv(ndev);
1041 struct fec_enet_priv_tx_q *txq;
1042 struct fec_enet_priv_rx_q *rxq;
1045 for (i = 0; i < fep->num_rx_queues; i++) {
1046 rxq = fep->rx_queue[i];
1047 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1048 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1052 writel(RCMR_MATCHEN | RCMR_CMP(i),
1053 fep->hwp + FEC_RCMR(i));
1056 for (i = 0; i < fep->num_tx_queues; i++) {
1057 txq = fep->tx_queue[i];
1058 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1062 writel(DMA_CLASS_EN | IDLE_SLOPE(i),
1063 fep->hwp + FEC_DMA_CFG(i));
1068 * This function is called to start or restart the FEC during a link
1069 * change, transmit timeout, or to reconfigure the FEC. The network
1070 * packet processing for this device must be stopped before this call.
1073 fec_restart(struct net_device *ndev)
1075 struct fec_enet_private *fep = netdev_priv(ndev);
1077 u32 rcntl = OPT_FRAME_SIZE | 0x04;
1078 u32 ecntl = FEC_ECR_ETHEREN;
1080 if (fep->bufdesc_ex)
1081 fec_ptp_save_state(fep);
1083 /* Whack a reset. We should wait for this.
1084 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1085 * instead of reset MAC itself.
1087 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
1088 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
1089 writel(0, fep->hwp + FEC_ECNTRL);
1091 writel(1, fep->hwp + FEC_ECNTRL);
1096 * enet-mac reset will reset mac address registers too,
1097 * so need to reconfigure it.
1099 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1100 writel((__force u32)cpu_to_be32(temp_mac[0]),
1101 fep->hwp + FEC_ADDR_LOW);
1102 writel((__force u32)cpu_to_be32(temp_mac[1]),
1103 fep->hwp + FEC_ADDR_HIGH);
1105 /* Clear any outstanding interrupt, except MDIO. */
1106 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1108 fec_enet_bd_init(ndev);
1110 fec_enet_enable_ring(ndev);
1112 /* Enable MII mode */
1113 if (fep->full_duplex == DUPLEX_FULL) {
1115 writel(0x04, fep->hwp + FEC_X_CNTRL);
1117 /* No Rcv on Xmit */
1119 writel(0x0, fep->hwp + FEC_X_CNTRL);
1123 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1125 #if !defined(CONFIG_M5272)
1126 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1127 u32 val = readl(fep->hwp + FEC_RACC);
1129 /* align IP header */
1130 val |= FEC_RACC_SHIFT16;
1131 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1132 /* set RX checksum */
1133 val |= FEC_RACC_OPTIONS;
1135 val &= ~FEC_RACC_OPTIONS;
1136 writel(val, fep->hwp + FEC_RACC);
1137 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1142 * The phy interface and speed need to get configured
1143 * differently on enet-mac.
1145 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1146 /* Enable flow control and length check */
1147 rcntl |= 0x40000000 | 0x00000020;
1149 /* RGMII, RMII or MII */
1150 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1151 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1152 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1153 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1155 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1156 rcntl |= FEC_RCR_RMII;
1158 rcntl &= ~FEC_RCR_RMII;
1160 /* 1G, 100M or 10M */
1162 if (ndev->phydev->speed == SPEED_1000)
1164 else if (ndev->phydev->speed == SPEED_100)
1165 rcntl &= ~FEC_RCR_10BASET;
1167 rcntl |= FEC_RCR_10BASET;
1170 #ifdef FEC_MIIGSK_ENR
1171 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1173 /* disable the gasket and wait */
1174 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1175 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1179 * configure the gasket:
1180 * RMII, 50 MHz, no loopback, no echo
1181 * MII, 25 MHz, no loopback, no echo
1183 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1184 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1185 if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1186 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1187 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1189 /* re-enable the gasket */
1190 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1195 #if !defined(CONFIG_M5272)
1196 /* enable pause frame*/
1197 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1198 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1199 ndev->phydev && ndev->phydev->pause)) {
1200 rcntl |= FEC_RCR_FLOWCTL;
1202 /* set FIFO threshold parameter to reduce overrun */
1203 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1204 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1205 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1206 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1209 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1211 rcntl &= ~FEC_RCR_FLOWCTL;
1213 #endif /* !defined(CONFIG_M5272) */
1215 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1217 /* Setup multicast filter. */
1218 set_multicast_list(ndev);
1219 #ifndef CONFIG_M5272
1220 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1221 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1224 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1225 /* enable ENET endian swap */
1226 ecntl |= FEC_ECR_BYTESWP;
1227 /* enable ENET store and forward mode */
1228 writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
1231 if (fep->bufdesc_ex)
1232 ecntl |= FEC_ECR_EN1588;
1234 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1236 ecntl |= FEC_ENET_TXC_DLY;
1237 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1239 ecntl |= FEC_ENET_RXC_DLY;
1241 #ifndef CONFIG_M5272
1242 /* Enable the MIB statistic event counters */
1243 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1246 /* And last, enable the transmit and receive processing */
1247 writel(ecntl, fep->hwp + FEC_ECNTRL);
1248 fec_enet_active_rxring(ndev);
1250 if (fep->bufdesc_ex) {
1251 fec_ptp_start_cyclecounter(ndev);
1252 fec_ptp_restore_state(fep);
1255 /* Enable interrupts we wish to service */
1257 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1259 writel(0, fep->hwp + FEC_IMASK);
1261 /* Init the interrupt coalescing */
1262 if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
1263 fec_enet_itr_coal_set(ndev);
1266 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
1268 if (!(of_machine_is_compatible("fsl,imx8qm") ||
1269 of_machine_is_compatible("fsl,imx8qxp") ||
1270 of_machine_is_compatible("fsl,imx8dxl")))
1273 return imx_scu_get_handle(&fep->ipc_handle);
1276 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
1278 struct device_node *np = fep->pdev->dev.of_node;
1282 if (!np || !fep->ipc_handle)
1285 idx = of_alias_get_id(np, "ethernet");
1288 rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
1290 val = enabled ? 1 : 0;
1291 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
1294 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1296 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1297 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1299 if (stop_gpr->gpr) {
1301 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1303 BIT(stop_gpr->bit));
1305 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1306 BIT(stop_gpr->bit), 0);
1307 } else if (pdata && pdata->sleep_mode_enable) {
1308 pdata->sleep_mode_enable(enabled);
1310 fec_enet_ipg_stop_set(fep, enabled);
1314 static void fec_irqs_disable(struct net_device *ndev)
1316 struct fec_enet_private *fep = netdev_priv(ndev);
1318 writel(0, fep->hwp + FEC_IMASK);
1321 static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
1323 struct fec_enet_private *fep = netdev_priv(ndev);
1325 writel(0, fep->hwp + FEC_IMASK);
1326 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1330 fec_stop(struct net_device *ndev)
1332 struct fec_enet_private *fep = netdev_priv(ndev);
1333 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
1336 /* We cannot expect a graceful transmit stop without link !!! */
1338 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1340 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1341 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1344 if (fep->bufdesc_ex)
1345 fec_ptp_save_state(fep);
1347 /* Whack a reset. We should wait for this.
1348 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1349 * instead of reset MAC itself.
1351 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1352 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
1353 writel(0, fep->hwp + FEC_ECNTRL);
1355 writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
1359 val = readl(fep->hwp + FEC_ECNTRL);
1360 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1361 writel(val, fep->hwp + FEC_ECNTRL);
1363 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1364 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1366 /* We have to keep ENET enabled to have MII interrupt stay working */
1367 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1368 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1369 writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
1370 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1373 if (fep->bufdesc_ex) {
1374 val = readl(fep->hwp + FEC_ECNTRL);
1375 val |= FEC_ECR_EN1588;
1376 writel(val, fep->hwp + FEC_ECNTRL);
1378 fec_ptp_start_cyclecounter(ndev);
1379 fec_ptp_restore_state(fep);
1384 fec_timeout(struct net_device *ndev, unsigned int txqueue)
1386 struct fec_enet_private *fep = netdev_priv(ndev);
1390 ndev->stats.tx_errors++;
1392 schedule_work(&fep->tx_timeout_work);
1395 static void fec_enet_timeout_work(struct work_struct *work)
1397 struct fec_enet_private *fep =
1398 container_of(work, struct fec_enet_private, tx_timeout_work);
1399 struct net_device *ndev = fep->netdev;
1402 if (netif_device_present(ndev) || netif_running(ndev)) {
1403 napi_disable(&fep->napi);
1404 netif_tx_lock_bh(ndev);
1406 netif_tx_wake_all_queues(ndev);
1407 netif_tx_unlock_bh(ndev);
1408 napi_enable(&fep->napi);
1414 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1415 struct skb_shared_hwtstamps *hwtstamps)
1417 unsigned long flags;
1420 spin_lock_irqsave(&fep->tmreg_lock, flags);
1421 ns = timecounter_cyc2time(&fep->tc, ts);
1422 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1424 memset(hwtstamps, 0, sizeof(*hwtstamps));
1425 hwtstamps->hwtstamp = ns_to_ktime(ns);
1429 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1431 struct fec_enet_private *fep;
1432 struct xdp_frame *xdpf;
1433 struct bufdesc *bdp;
1434 unsigned short status;
1435 struct sk_buff *skb;
1436 struct fec_enet_priv_tx_q *txq;
1437 struct netdev_queue *nq;
1443 fep = netdev_priv(ndev);
1445 txq = fep->tx_queue[queue_id];
1446 /* get next bdp of dirty_tx */
1447 nq = netdev_get_tx_queue(ndev, queue_id);
1448 bdp = txq->dirty_tx;
1450 /* get next bdp of dirty_tx */
1451 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1453 while (bdp != READ_ONCE(txq->bd.cur)) {
1454 /* Order the load of bd.cur and cbd_sc */
1456 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1457 if (status & BD_ENET_TX_READY)
1460 index = fec_enet_get_bd_index(bdp, &txq->bd);
1462 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1463 skb = txq->tx_buf[index].buf_p;
1464 if (bdp->cbd_bufaddr &&
1465 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1466 dma_unmap_single(&fep->pdev->dev,
1467 fec32_to_cpu(bdp->cbd_bufaddr),
1468 fec16_to_cpu(bdp->cbd_datlen),
1470 bdp->cbd_bufaddr = cpu_to_fec32(0);
1474 /* Tx processing cannot call any XDP (or page pool) APIs if
1475 * the "budget" is 0. Because NAPI is called with budget of
1476 * 0 (such as netpoll) indicates we may be in an IRQ context,
1477 * however, we can't use the page pool from IRQ context.
1479 if (unlikely(!budget))
1482 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1483 xdpf = txq->tx_buf[index].buf_p;
1484 if (bdp->cbd_bufaddr)
1485 dma_unmap_single(&fep->pdev->dev,
1486 fec32_to_cpu(bdp->cbd_bufaddr),
1487 fec16_to_cpu(bdp->cbd_datlen),
1490 page = txq->tx_buf[index].buf_p;
1493 bdp->cbd_bufaddr = cpu_to_fec32(0);
1494 if (unlikely(!txq->tx_buf[index].buf_p)) {
1495 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1499 frame_len = fec16_to_cpu(bdp->cbd_datlen);
1502 /* Check for errors. */
1503 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1504 BD_ENET_TX_RL | BD_ENET_TX_UN |
1506 ndev->stats.tx_errors++;
1507 if (status & BD_ENET_TX_HB) /* No heartbeat */
1508 ndev->stats.tx_heartbeat_errors++;
1509 if (status & BD_ENET_TX_LC) /* Late collision */
1510 ndev->stats.tx_window_errors++;
1511 if (status & BD_ENET_TX_RL) /* Retrans limit */
1512 ndev->stats.tx_aborted_errors++;
1513 if (status & BD_ENET_TX_UN) /* Underrun */
1514 ndev->stats.tx_fifo_errors++;
1515 if (status & BD_ENET_TX_CSL) /* Carrier lost */
1516 ndev->stats.tx_carrier_errors++;
1518 ndev->stats.tx_packets++;
1520 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1521 ndev->stats.tx_bytes += skb->len;
1523 ndev->stats.tx_bytes += frame_len;
1526 /* Deferred means some collisions occurred during transmit,
1527 * but we eventually sent the packet OK.
1529 if (status & BD_ENET_TX_DEF)
1530 ndev->stats.collisions++;
1532 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1533 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1534 * are to time stamp the packet, so we still need to check time
1535 * stamping enabled flag.
1537 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1538 fep->hwts_tx_en) && fep->bufdesc_ex) {
1539 struct skb_shared_hwtstamps shhwtstamps;
1540 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1542 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1543 skb_tstamp_tx(skb, &shhwtstamps);
1546 /* Free the sk buffer associated with this last transmit */
1547 napi_consume_skb(skb, budget);
1548 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1549 xdp_return_frame_rx_napi(xdpf);
1550 } else { /* recycle pages of XDP_TX frames */
1551 /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1552 page_pool_put_page(page->pp, page, 0, true);
1555 txq->tx_buf[index].buf_p = NULL;
1556 /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1557 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1560 /* Make sure the update to bdp and tx_buf are performed
1564 txq->dirty_tx = bdp;
1566 /* Update pointer to next buffer descriptor to be transmitted */
1567 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1569 /* Since we have freed up a buffer, the ring is no longer full
1571 if (netif_tx_queue_stopped(nq)) {
1572 entries_free = fec_enet_get_free_txdesc_num(txq);
1573 if (entries_free >= txq->tx_wake_threshold)
1574 netif_tx_wake_queue(nq);
1578 /* ERR006358: Keep the transmitter going */
1579 if (bdp != txq->bd.cur &&
1580 readl(txq->bd.reg_desc_active) == 0)
1581 writel(0, txq->bd.reg_desc_active);
1584 static void fec_enet_tx(struct net_device *ndev, int budget)
1586 struct fec_enet_private *fep = netdev_priv(ndev);
1589 /* Make sure that AVB queues are processed first. */
1590 for (i = fep->num_tx_queues - 1; i >= 0; i--)
1591 fec_enet_tx_queue(ndev, i, budget);
1594 static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1595 struct bufdesc *bdp, int index)
1597 struct page *new_page;
1598 dma_addr_t phys_addr;
1600 new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1602 rxq->rx_skb_info[index].page = new_page;
1604 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
1605 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
1606 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1610 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1611 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
1613 unsigned int sync, len = xdp->data_end - xdp->data;
1614 u32 ret = FEC_ENET_XDP_PASS;
1619 act = bpf_prog_run_xdp(prog, xdp);
1621 /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
1624 sync = xdp->data_end - xdp->data;
1625 sync = max(sync, len);
1629 rxq->stats[RX_XDP_PASS]++;
1630 ret = FEC_ENET_XDP_PASS;
1634 rxq->stats[RX_XDP_REDIRECT]++;
1635 err = xdp_do_redirect(fep->netdev, xdp, prog);
1639 ret = FEC_ENET_XDP_REDIR;
1643 rxq->stats[RX_XDP_TX]++;
1644 err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1645 if (unlikely(err)) {
1646 rxq->stats[RX_XDP_TX_ERRORS]++;
1650 ret = FEC_ENET_XDP_TX;
1654 bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1658 fallthrough; /* handle aborts by dropping packet */
1661 rxq->stats[RX_XDP_DROP]++;
1663 ret = FEC_ENET_XDP_CONSUMED;
1664 page = virt_to_head_page(xdp->data);
1665 page_pool_put_page(rxq->page_pool, page, sync, true);
1666 if (act != XDP_DROP)
1667 trace_xdp_exception(fep->netdev, prog, act);
1674 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1675 * When we update through the ring, if the next incoming buffer has
1676 * not been given to the system, we just set the empty indicator,
1677 * effectively tossing the packet.
1680 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1682 struct fec_enet_private *fep = netdev_priv(ndev);
1683 struct fec_enet_priv_rx_q *rxq;
1684 struct bufdesc *bdp;
1685 unsigned short status;
1686 struct sk_buff *skb;
1689 int pkt_received = 0;
1690 struct bufdesc_ex *ebdp = NULL;
1691 bool vlan_packet_rcvd = false;
1694 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1695 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
1696 u32 ret, xdp_result = FEC_ENET_XDP_PASS;
1697 u32 data_start = FEC_ENET_XDP_HEADROOM;
1698 int cpu = smp_processor_id();
1699 struct xdp_buff xdp;
1703 #if !defined(CONFIG_M5272)
1704 /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
1705 * FEC_RACC_SHIFT16 is set by default in the probe function.
1707 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1713 #if defined(CONFIG_COLDFIRE) && !defined(CONFIG_COLDFIRE_COHERENT_DMA)
1715 * Hacky flush of all caches instead of using the DMA API for the TSO
1720 rxq = fep->rx_queue[queue_id];
1722 /* First, grab all of the stats for the incoming packet.
1723 * These get messed up if we get called due to a busy condition.
1726 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
1728 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1730 if (pkt_received >= budget)
1734 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1736 /* Check for errors. */
1737 status ^= BD_ENET_RX_LAST;
1738 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1739 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1741 ndev->stats.rx_errors++;
1742 if (status & BD_ENET_RX_OV) {
1744 ndev->stats.rx_fifo_errors++;
1745 goto rx_processing_done;
1747 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1748 | BD_ENET_RX_LAST)) {
1749 /* Frame too long or too short. */
1750 ndev->stats.rx_length_errors++;
1751 if (status & BD_ENET_RX_LAST)
1752 netdev_err(ndev, "rcv is not +last\n");
1754 if (status & BD_ENET_RX_CR) /* CRC Error */
1755 ndev->stats.rx_crc_errors++;
1756 /* Report late collisions as a frame error. */
1757 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1758 ndev->stats.rx_frame_errors++;
1759 goto rx_processing_done;
1762 /* Process the incoming frame. */
1763 ndev->stats.rx_packets++;
1764 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1765 ndev->stats.rx_bytes += pkt_len;
1767 index = fec_enet_get_bd_index(bdp, &rxq->bd);
1768 page = rxq->rx_skb_info[index].page;
1769 dma_sync_single_for_cpu(&fep->pdev->dev,
1770 fec32_to_cpu(bdp->cbd_bufaddr),
1773 prefetch(page_address(page));
1774 fec_enet_update_cbd(rxq, bdp, index);
1777 xdp_buff_clear_frags_flag(&xdp);
1778 /* subtract 16bit shift and FCS */
1779 xdp_prepare_buff(&xdp, page_address(page),
1780 data_start, pkt_len - sub_len, false);
1781 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
1783 if (ret != FEC_ENET_XDP_PASS)
1784 goto rx_processing_done;
1787 /* The packet length includes FCS, but we don't want to
1788 * include that when passing upstream as it messes up
1789 * bridging applications.
1791 skb = build_skb(page_address(page), PAGE_SIZE);
1792 if (unlikely(!skb)) {
1793 page_pool_recycle_direct(rxq->page_pool, page);
1794 ndev->stats.rx_dropped++;
1796 netdev_err_once(ndev, "build_skb failed!\n");
1797 goto rx_processing_done;
1800 skb_reserve(skb, data_start);
1801 skb_put(skb, pkt_len - sub_len);
1802 skb_mark_for_recycle(skb);
1804 if (unlikely(need_swap)) {
1805 data = page_address(page) + FEC_ENET_XDP_HEADROOM;
1806 swap_buffer(data, pkt_len);
1810 /* Extract the enhanced buffer descriptor */
1812 if (fep->bufdesc_ex)
1813 ebdp = (struct bufdesc_ex *)bdp;
1815 /* If this is a VLAN packet remove the VLAN Tag */
1816 vlan_packet_rcvd = false;
1817 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1819 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1820 /* Push and remove the vlan tag */
1821 struct vlan_hdr *vlan_header =
1822 (struct vlan_hdr *) (data + ETH_HLEN);
1823 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1825 vlan_packet_rcvd = true;
1827 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1828 skb_pull(skb, VLAN_HLEN);
1831 skb->protocol = eth_type_trans(skb, ndev);
1833 /* Get receive timestamp from the skb */
1834 if (fep->hwts_rx_en && fep->bufdesc_ex)
1835 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1836 skb_hwtstamps(skb));
1838 if (fep->bufdesc_ex &&
1839 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1840 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1841 /* don't check it */
1842 skb->ip_summed = CHECKSUM_UNNECESSARY;
1844 skb_checksum_none_assert(skb);
1848 /* Handle received VLAN packets */
1849 if (vlan_packet_rcvd)
1850 __vlan_hwaccel_put_tag(skb,
1854 skb_record_rx_queue(skb, queue_id);
1855 napi_gro_receive(&fep->napi, skb);
1858 /* Clear the status flags for this buffer */
1859 status &= ~BD_ENET_RX_STATS;
1861 /* Mark the buffer empty */
1862 status |= BD_ENET_RX_EMPTY;
1864 if (fep->bufdesc_ex) {
1865 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1867 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1871 /* Make sure the updates to rest of the descriptor are
1872 * performed before transferring ownership.
1875 bdp->cbd_sc = cpu_to_fec16(status);
1877 /* Update BD pointer to next entry */
1878 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1880 /* Doing this here will keep the FEC running while we process
1881 * incoming frames. On a heavily loaded network, we should be
1882 * able to keep up at the expense of system resources.
1884 writel(0, rxq->bd.reg_desc_active);
1888 if (xdp_result & FEC_ENET_XDP_REDIR)
1891 return pkt_received;
1894 static int fec_enet_rx(struct net_device *ndev, int budget)
1896 struct fec_enet_private *fep = netdev_priv(ndev);
1899 /* Make sure that AVB queues are processed first. */
1900 for (i = fep->num_rx_queues - 1; i >= 0; i--)
1901 done += fec_enet_rx_queue(ndev, budget - done, i);
1906 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1910 int_events = readl(fep->hwp + FEC_IEVENT);
1912 /* Don't clear MDIO events, we poll for those */
1913 int_events &= ~FEC_ENET_MII;
1915 writel(int_events, fep->hwp + FEC_IEVENT);
1917 return int_events != 0;
1921 fec_enet_interrupt(int irq, void *dev_id)
1923 struct net_device *ndev = dev_id;
1924 struct fec_enet_private *fep = netdev_priv(ndev);
1925 irqreturn_t ret = IRQ_NONE;
1927 if (fec_enet_collect_events(fep) && fep->link) {
1930 if (napi_schedule_prep(&fep->napi)) {
1931 /* Disable interrupts */
1932 writel(0, fep->hwp + FEC_IMASK);
1933 __napi_schedule(&fep->napi);
1940 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1942 struct net_device *ndev = napi->dev;
1943 struct fec_enet_private *fep = netdev_priv(ndev);
1947 done += fec_enet_rx(ndev, budget - done);
1948 fec_enet_tx(ndev, budget);
1949 } while ((done < budget) && fec_enet_collect_events(fep));
1951 if (done < budget) {
1952 napi_complete_done(napi, done);
1953 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1959 /* ------------------------------------------------------------------------- */
1960 static int fec_get_mac(struct net_device *ndev)
1962 struct fec_enet_private *fep = netdev_priv(ndev);
1963 unsigned char *iap, tmpaddr[ETH_ALEN];
1967 * try to get mac address in following order:
1969 * 1) module parameter via kernel command line in form
1970 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1975 * 2) from device tree data
1977 if (!is_valid_ether_addr(iap)) {
1978 struct device_node *np = fep->pdev->dev.of_node;
1980 ret = of_get_mac_address(np, tmpaddr);
1983 else if (ret == -EPROBE_DEFER)
1989 * 3) from flash or fuse (via platform data)
1991 if (!is_valid_ether_addr(iap)) {
1994 iap = (unsigned char *)FEC_FLASHMAC;
1996 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1999 iap = (unsigned char *)&pdata->mac;
2004 * 4) FEC mac registers set by bootloader
2006 if (!is_valid_ether_addr(iap)) {
2007 *((__be32 *) &tmpaddr[0]) =
2008 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
2009 *((__be16 *) &tmpaddr[4]) =
2010 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
2015 * 5) random mac address
2017 if (!is_valid_ether_addr(iap)) {
2018 /* Report it and use a random ethernet address instead */
2019 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
2020 eth_hw_addr_random(ndev);
2021 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
2026 /* Adjust MAC if using macaddr */
2027 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
2032 /* ------------------------------------------------------------------------- */
2038 /* LPI Sleep Ts count base on tx clk (clk_ref).
2039 * The lpi sleep cnt value = X us / (cycle_ns).
2041 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
2043 struct fec_enet_private *fep = netdev_priv(ndev);
2045 return us * (fep->clk_ref_rate / 1000) / 1000;
2048 static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
2050 struct fec_enet_private *fep = netdev_priv(ndev);
2051 struct ethtool_keee *p = &fep->eee;
2052 unsigned int sleep_cycle, wake_cycle;
2055 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
2056 wake_cycle = sleep_cycle;
2062 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
2063 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
2068 static void fec_enet_adjust_link(struct net_device *ndev)
2070 struct fec_enet_private *fep = netdev_priv(ndev);
2071 struct phy_device *phy_dev = ndev->phydev;
2072 int status_change = 0;
2075 * If the netdev is down, or is going down, we're not interested
2076 * in link state events, so just mark our idea of the link as down
2077 * and ignore the event.
2079 if (!netif_running(ndev) || !netif_device_present(ndev)) {
2081 } else if (phy_dev->link) {
2083 fep->link = phy_dev->link;
2087 if (fep->full_duplex != phy_dev->duplex) {
2088 fep->full_duplex = phy_dev->duplex;
2092 if (phy_dev->speed != fep->speed) {
2093 fep->speed = phy_dev->speed;
2097 /* if any of the above changed restart the FEC */
2098 if (status_change) {
2099 netif_stop_queue(ndev);
2100 napi_disable(&fep->napi);
2101 netif_tx_lock_bh(ndev);
2103 netif_tx_wake_all_queues(ndev);
2104 netif_tx_unlock_bh(ndev);
2105 napi_enable(&fep->napi);
2107 if (fep->quirks & FEC_QUIRK_HAS_EEE)
2108 fec_enet_eee_mode_set(ndev, phy_dev->enable_tx_lpi);
2111 netif_stop_queue(ndev);
2112 napi_disable(&fep->napi);
2113 netif_tx_lock_bh(ndev);
2115 netif_tx_unlock_bh(ndev);
2116 napi_enable(&fep->napi);
2117 fep->link = phy_dev->link;
2123 phy_print_status(phy_dev);
2126 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
2131 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
2132 ievent & FEC_ENET_MII, 2, 30000);
2135 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2140 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
2142 struct fec_enet_private *fep = bus->priv;
2143 struct device *dev = &fep->pdev->dev;
2144 int ret = 0, frame_start, frame_addr, frame_op;
2146 ret = pm_runtime_resume_and_get(dev);
2151 frame_op = FEC_MMFR_OP_READ;
2152 frame_start = FEC_MMFR_ST;
2153 frame_addr = regnum;
2155 /* start a read op */
2156 writel(frame_start | frame_op |
2157 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2158 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2160 /* wait for end of transfer */
2161 ret = fec_enet_mdio_wait(fep);
2163 netdev_err(fep->netdev, "MDIO read timeout\n");
2167 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2170 pm_runtime_mark_last_busy(dev);
2171 pm_runtime_put_autosuspend(dev);
2176 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
2177 int devad, int regnum)
2179 struct fec_enet_private *fep = bus->priv;
2180 struct device *dev = &fep->pdev->dev;
2181 int ret = 0, frame_start, frame_op;
2183 ret = pm_runtime_resume_and_get(dev);
2187 frame_start = FEC_MMFR_ST_C45;
2190 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2191 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2192 FEC_MMFR_TA | (regnum & 0xFFFF),
2193 fep->hwp + FEC_MII_DATA);
2195 /* wait for end of transfer */
2196 ret = fec_enet_mdio_wait(fep);
2198 netdev_err(fep->netdev, "MDIO address write timeout\n");
2202 frame_op = FEC_MMFR_OP_READ_C45;
2204 /* start a read op */
2205 writel(frame_start | frame_op |
2206 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2207 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2209 /* wait for end of transfer */
2210 ret = fec_enet_mdio_wait(fep);
2212 netdev_err(fep->netdev, "MDIO read timeout\n");
2216 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2219 pm_runtime_mark_last_busy(dev);
2220 pm_runtime_put_autosuspend(dev);
2225 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
2228 struct fec_enet_private *fep = bus->priv;
2229 struct device *dev = &fep->pdev->dev;
2230 int ret, frame_start, frame_addr;
2232 ret = pm_runtime_resume_and_get(dev);
2237 frame_start = FEC_MMFR_ST;
2238 frame_addr = regnum;
2240 /* start a write op */
2241 writel(frame_start | FEC_MMFR_OP_WRITE |
2242 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2243 FEC_MMFR_TA | FEC_MMFR_DATA(value),
2244 fep->hwp + FEC_MII_DATA);
2246 /* wait for end of transfer */
2247 ret = fec_enet_mdio_wait(fep);
2249 netdev_err(fep->netdev, "MDIO write timeout\n");
2251 pm_runtime_mark_last_busy(dev);
2252 pm_runtime_put_autosuspend(dev);
2257 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
2258 int devad, int regnum, u16 value)
2260 struct fec_enet_private *fep = bus->priv;
2261 struct device *dev = &fep->pdev->dev;
2262 int ret, frame_start;
2264 ret = pm_runtime_resume_and_get(dev);
2268 frame_start = FEC_MMFR_ST_C45;
2271 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2272 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2273 FEC_MMFR_TA | (regnum & 0xFFFF),
2274 fep->hwp + FEC_MII_DATA);
2276 /* wait for end of transfer */
2277 ret = fec_enet_mdio_wait(fep);
2279 netdev_err(fep->netdev, "MDIO address write timeout\n");
2283 /* start a write op */
2284 writel(frame_start | FEC_MMFR_OP_WRITE |
2285 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2286 FEC_MMFR_TA | FEC_MMFR_DATA(value),
2287 fep->hwp + FEC_MII_DATA);
2289 /* wait for end of transfer */
2290 ret = fec_enet_mdio_wait(fep);
2292 netdev_err(fep->netdev, "MDIO write timeout\n");
2295 pm_runtime_mark_last_busy(dev);
2296 pm_runtime_put_autosuspend(dev);
2301 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
2303 struct fec_enet_private *fep = netdev_priv(ndev);
2304 struct phy_device *phy_dev = ndev->phydev;
2307 phy_reset_after_clk_enable(phy_dev);
2308 } else if (fep->phy_node) {
2310 * If the PHY still is not bound to the MAC, but there is
2311 * OF PHY node and a matching PHY device instance already,
2312 * use the OF PHY node to obtain the PHY device instance,
2313 * and then use that PHY device instance when triggering
2316 phy_dev = of_phy_find_device(fep->phy_node);
2317 phy_reset_after_clk_enable(phy_dev);
2318 put_device(&phy_dev->mdio.dev);
2322 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
2324 struct fec_enet_private *fep = netdev_priv(ndev);
2328 ret = clk_prepare_enable(fep->clk_enet_out);
2333 mutex_lock(&fep->ptp_clk_mutex);
2334 ret = clk_prepare_enable(fep->clk_ptp);
2336 mutex_unlock(&fep->ptp_clk_mutex);
2337 goto failed_clk_ptp;
2339 fep->ptp_clk_on = true;
2341 mutex_unlock(&fep->ptp_clk_mutex);
2344 ret = clk_prepare_enable(fep->clk_ref);
2346 goto failed_clk_ref;
2348 ret = clk_prepare_enable(fep->clk_2x_txclk);
2350 goto failed_clk_2x_txclk;
2352 fec_enet_phy_reset_after_clk_enable(ndev);
2354 clk_disable_unprepare(fep->clk_enet_out);
2356 mutex_lock(&fep->ptp_clk_mutex);
2357 clk_disable_unprepare(fep->clk_ptp);
2358 fep->ptp_clk_on = false;
2359 mutex_unlock(&fep->ptp_clk_mutex);
2361 clk_disable_unprepare(fep->clk_ref);
2362 clk_disable_unprepare(fep->clk_2x_txclk);
2367 failed_clk_2x_txclk:
2369 clk_disable_unprepare(fep->clk_ref);
2372 mutex_lock(&fep->ptp_clk_mutex);
2373 clk_disable_unprepare(fep->clk_ptp);
2374 fep->ptp_clk_on = false;
2375 mutex_unlock(&fep->ptp_clk_mutex);
2378 clk_disable_unprepare(fep->clk_enet_out);
2383 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2384 struct device_node *np)
2386 u32 rgmii_tx_delay, rgmii_rx_delay;
2388 /* For rgmii tx internal delay, valid values are 0ps and 2000ps */
2389 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
2390 if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
2391 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2393 } else if (rgmii_tx_delay == 2000) {
2394 fep->rgmii_txc_dly = true;
2398 /* For rgmii rx internal delay, valid values are 0ps and 2000ps */
2399 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
2400 if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
2401 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2403 } else if (rgmii_rx_delay == 2000) {
2404 fep->rgmii_rxc_dly = true;
2411 static int fec_enet_mii_probe(struct net_device *ndev)
2413 struct fec_enet_private *fep = netdev_priv(ndev);
2414 struct phy_device *phy_dev = NULL;
2415 char mdio_bus_id[MII_BUS_ID_SIZE];
2416 char phy_name[MII_BUS_ID_SIZE + 3];
2418 int dev_id = fep->dev_id;
2420 if (fep->phy_node) {
2421 phy_dev = of_phy_connect(ndev, fep->phy_node,
2422 &fec_enet_adjust_link, 0,
2423 fep->phy_interface);
2425 netdev_err(ndev, "Unable to connect to phy\n");
2429 /* check for attached phy */
2430 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
2431 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2435 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2439 if (phy_id >= PHY_MAX_ADDR) {
2440 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
2441 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
2445 snprintf(phy_name, sizeof(phy_name),
2446 PHY_ID_FMT, mdio_bus_id, phy_id);
2447 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
2448 fep->phy_interface);
2451 if (IS_ERR(phy_dev)) {
2452 netdev_err(ndev, "could not attach to PHY\n");
2453 return PTR_ERR(phy_dev);
2456 /* mask with MAC supported features */
2457 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2458 phy_set_max_speed(phy_dev, 1000);
2459 phy_remove_link_mode(phy_dev,
2460 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2461 #if !defined(CONFIG_M5272)
2462 phy_support_sym_pause(phy_dev);
2466 phy_set_max_speed(phy_dev, 100);
2468 if (fep->quirks & FEC_QUIRK_HAS_EEE)
2469 phy_support_eee(phy_dev);
2472 fep->full_duplex = 0;
2474 phy_attached_info(phy_dev);
2479 static int fec_enet_mii_init(struct platform_device *pdev)
2481 static struct mii_bus *fec0_mii_bus;
2482 struct net_device *ndev = platform_get_drvdata(pdev);
2483 struct fec_enet_private *fep = netdev_priv(ndev);
2484 bool suppress_preamble = false;
2485 struct phy_device *phydev;
2486 struct device_node *node;
2488 u32 mii_speed, holdtime;
2493 * The i.MX28 dual fec interfaces are not equal.
2494 * Here are the differences:
2496 * - fec0 supports MII & RMII modes while fec1 only supports RMII
2497 * - fec0 acts as the 1588 time master while fec1 is slave
2498 * - external phys can only be configured by fec0
2500 * That is to say fec1 can not work independently. It only works
2501 * when fec0 is working. The reason behind this design is that the
2502 * second interface is added primarily for Switch mode.
2504 * Because of the last point above, both phys are attached on fec0
2505 * mdio interface in board design, and need to be configured by
2508 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2509 /* fec1 uses fec0 mii_bus */
2510 if (mii_cnt && fec0_mii_bus) {
2511 fep->mii_bus = fec0_mii_bus;
2518 bus_freq = 2500000; /* 2.5MHz by default */
2519 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2521 of_property_read_u32(node, "clock-frequency", &bus_freq);
2522 suppress_preamble = of_property_read_bool(node,
2523 "suppress-preamble");
2527 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2529 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2530 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
2531 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2534 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2535 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2537 if (mii_speed > 63) {
2539 "fec clock (%lu) too fast to get right mii speed\n",
2540 clk_get_rate(fep->clk_ipg));
2546 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2547 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2548 * versions are RAZ there, so just ignore the difference and write the
2550 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2551 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2553 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2554 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2555 * holdtime cannot result in a value greater than 3.
2557 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2559 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2561 if (suppress_preamble)
2562 fep->phy_speed |= BIT(7);
2564 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2565 /* Clear MMFR to avoid to generate MII event by writing MSCR.
2566 * MII event generation condition:
2568 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2569 * mscr_reg_data_in[7:0] != 0
2571 * - mscr[7:0]_not_zero
2573 writel(0, fep->hwp + FEC_MII_DATA);
2576 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2578 /* Clear any pending transaction complete indication */
2579 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2581 fep->mii_bus = mdiobus_alloc();
2582 if (fep->mii_bus == NULL) {
2587 fep->mii_bus->name = "fec_enet_mii_bus";
2588 fep->mii_bus->read = fec_enet_mdio_read_c22;
2589 fep->mii_bus->write = fec_enet_mdio_write_c22;
2590 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
2591 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
2592 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
2594 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2595 pdev->name, fep->dev_id + 1);
2596 fep->mii_bus->priv = fep;
2597 fep->mii_bus->parent = &pdev->dev;
2599 err = of_mdiobus_register(fep->mii_bus, node);
2601 goto err_out_free_mdiobus;
2604 /* find all the PHY devices on the bus and set mac_managed_pm to true */
2605 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
2606 phydev = mdiobus_get_phy(fep->mii_bus, addr);
2608 phydev->mac_managed_pm = true;
2613 /* save fec0 mii_bus */
2614 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2615 fec0_mii_bus = fep->mii_bus;
2619 err_out_free_mdiobus:
2620 mdiobus_free(fep->mii_bus);
2626 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2628 if (--mii_cnt == 0) {
2629 mdiobus_unregister(fep->mii_bus);
2630 mdiobus_free(fep->mii_bus);
2634 static void fec_enet_get_drvinfo(struct net_device *ndev,
2635 struct ethtool_drvinfo *info)
2637 struct fec_enet_private *fep = netdev_priv(ndev);
2639 strscpy(info->driver, fep->pdev->dev.driver->name,
2640 sizeof(info->driver));
2641 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2644 static int fec_enet_get_regs_len(struct net_device *ndev)
2646 struct fec_enet_private *fep = netdev_priv(ndev);
2650 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2652 s = resource_size(r);
2657 /* List of registers that can be safety be read to dump them with ethtool */
2658 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2659 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2660 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2661 static __u32 fec_enet_register_version = 2;
2662 static u32 fec_enet_register_offset[] = {
2663 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2664 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2665 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2666 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2667 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2668 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2669 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2670 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2671 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2672 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2673 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2674 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2675 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2676 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2677 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2678 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2679 RMON_T_P_GTE2048, RMON_T_OCTETS,
2680 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2681 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2682 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2683 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2684 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2685 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2686 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2687 RMON_R_P_GTE2048, RMON_R_OCTETS,
2688 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2689 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2692 static u32 fec_enet_register_offset_6ul[] = {
2693 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2694 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2695 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
2696 FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
2697 FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
2698 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2699 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
2700 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2701 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2702 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2703 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2704 RMON_T_P_GTE2048, RMON_T_OCTETS,
2705 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2706 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2707 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2708 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2709 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2710 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2711 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2712 RMON_R_P_GTE2048, RMON_R_OCTETS,
2713 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2714 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2717 static __u32 fec_enet_register_version = 1;
2718 static u32 fec_enet_register_offset[] = {
2719 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2720 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2721 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2722 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2723 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2724 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2725 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2726 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2727 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2731 static void fec_enet_get_regs(struct net_device *ndev,
2732 struct ethtool_regs *regs, void *regbuf)
2734 struct fec_enet_private *fep = netdev_priv(ndev);
2735 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2736 struct device *dev = &fep->pdev->dev;
2737 u32 *buf = (u32 *)regbuf;
2740 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2741 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2742 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2746 if (!of_machine_is_compatible("fsl,imx6ul")) {
2747 reg_list = fec_enet_register_offset;
2748 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2750 reg_list = fec_enet_register_offset_6ul;
2751 reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
2755 static u32 *reg_list = fec_enet_register_offset;
2756 static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2758 ret = pm_runtime_resume_and_get(dev);
2762 regs->version = fec_enet_register_version;
2764 memset(buf, 0, regs->len);
2766 for (i = 0; i < reg_cnt; i++) {
2769 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2770 !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2774 buf[off] = readl(&theregs[off]);
2777 pm_runtime_mark_last_busy(dev);
2778 pm_runtime_put_autosuspend(dev);
2781 static int fec_enet_get_ts_info(struct net_device *ndev,
2782 struct kernel_ethtool_ts_info *info)
2784 struct fec_enet_private *fep = netdev_priv(ndev);
2786 if (fep->bufdesc_ex) {
2788 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2789 SOF_TIMESTAMPING_TX_HARDWARE |
2790 SOF_TIMESTAMPING_RX_HARDWARE |
2791 SOF_TIMESTAMPING_RAW_HARDWARE;
2793 info->phc_index = ptp_clock_index(fep->ptp_clock);
2795 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2796 (1 << HWTSTAMP_TX_ON);
2798 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2799 (1 << HWTSTAMP_FILTER_ALL);
2802 return ethtool_op_get_ts_info(ndev, info);
2806 #if !defined(CONFIG_M5272)
2808 static void fec_enet_get_pauseparam(struct net_device *ndev,
2809 struct ethtool_pauseparam *pause)
2811 struct fec_enet_private *fep = netdev_priv(ndev);
2813 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2814 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2815 pause->rx_pause = pause->tx_pause;
2818 static int fec_enet_set_pauseparam(struct net_device *ndev,
2819 struct ethtool_pauseparam *pause)
2821 struct fec_enet_private *fep = netdev_priv(ndev);
2826 if (pause->tx_pause != pause->rx_pause) {
2828 "hardware only support enable/disable both tx and rx");
2832 fep->pause_flag = 0;
2834 /* tx pause must be same as rx pause */
2835 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2836 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2838 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2841 if (pause->autoneg) {
2842 if (netif_running(ndev))
2844 phy_start_aneg(ndev->phydev);
2846 if (netif_running(ndev)) {
2847 napi_disable(&fep->napi);
2848 netif_tx_lock_bh(ndev);
2850 netif_tx_wake_all_queues(ndev);
2851 netif_tx_unlock_bh(ndev);
2852 napi_enable(&fep->napi);
2858 static const struct fec_stat {
2859 char name[ETH_GSTRING_LEN];
2863 { "tx_dropped", RMON_T_DROP },
2864 { "tx_packets", RMON_T_PACKETS },
2865 { "tx_broadcast", RMON_T_BC_PKT },
2866 { "tx_multicast", RMON_T_MC_PKT },
2867 { "tx_crc_errors", RMON_T_CRC_ALIGN },
2868 { "tx_undersize", RMON_T_UNDERSIZE },
2869 { "tx_oversize", RMON_T_OVERSIZE },
2870 { "tx_fragment", RMON_T_FRAG },
2871 { "tx_jabber", RMON_T_JAB },
2872 { "tx_collision", RMON_T_COL },
2873 { "tx_64byte", RMON_T_P64 },
2874 { "tx_65to127byte", RMON_T_P65TO127 },
2875 { "tx_128to255byte", RMON_T_P128TO255 },
2876 { "tx_256to511byte", RMON_T_P256TO511 },
2877 { "tx_512to1023byte", RMON_T_P512TO1023 },
2878 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2879 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2880 { "tx_octets", RMON_T_OCTETS },
2883 { "IEEE_tx_drop", IEEE_T_DROP },
2884 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2885 { "IEEE_tx_1col", IEEE_T_1COL },
2886 { "IEEE_tx_mcol", IEEE_T_MCOL },
2887 { "IEEE_tx_def", IEEE_T_DEF },
2888 { "IEEE_tx_lcol", IEEE_T_LCOL },
2889 { "IEEE_tx_excol", IEEE_T_EXCOL },
2890 { "IEEE_tx_macerr", IEEE_T_MACERR },
2891 { "IEEE_tx_cserr", IEEE_T_CSERR },
2892 { "IEEE_tx_sqe", IEEE_T_SQE },
2893 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2894 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2897 { "rx_packets", RMON_R_PACKETS },
2898 { "rx_broadcast", RMON_R_BC_PKT },
2899 { "rx_multicast", RMON_R_MC_PKT },
2900 { "rx_crc_errors", RMON_R_CRC_ALIGN },
2901 { "rx_undersize", RMON_R_UNDERSIZE },
2902 { "rx_oversize", RMON_R_OVERSIZE },
2903 { "rx_fragment", RMON_R_FRAG },
2904 { "rx_jabber", RMON_R_JAB },
2905 { "rx_64byte", RMON_R_P64 },
2906 { "rx_65to127byte", RMON_R_P65TO127 },
2907 { "rx_128to255byte", RMON_R_P128TO255 },
2908 { "rx_256to511byte", RMON_R_P256TO511 },
2909 { "rx_512to1023byte", RMON_R_P512TO1023 },
2910 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2911 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2912 { "rx_octets", RMON_R_OCTETS },
2915 { "IEEE_rx_drop", IEEE_R_DROP },
2916 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2917 { "IEEE_rx_crc", IEEE_R_CRC },
2918 { "IEEE_rx_align", IEEE_R_ALIGN },
2919 { "IEEE_rx_macerr", IEEE_R_MACERR },
2920 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2921 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2924 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2926 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
2927 "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */
2928 "rx_xdp_pass", /* RX_XDP_PASS, */
2929 "rx_xdp_drop", /* RX_XDP_DROP, */
2930 "rx_xdp_tx", /* RX_XDP_TX, */
2931 "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */
2932 "tx_xdp_xmit", /* TX_XDP_XMIT, */
2933 "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */
2936 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2938 struct fec_enet_private *fep = netdev_priv(dev);
2941 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2942 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2945 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
2947 u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
2948 struct fec_enet_priv_rx_q *rxq;
2951 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2952 rxq = fep->rx_queue[i];
2954 for (j = 0; j < XDP_STATS_TOTAL; j++)
2955 xdp_stats[j] += rxq->stats[j];
2958 memcpy(data, xdp_stats, sizeof(xdp_stats));
2961 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
2963 #ifdef CONFIG_PAGE_POOL_STATS
2964 struct page_pool_stats stats = {};
2965 struct fec_enet_priv_rx_q *rxq;
2968 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2969 rxq = fep->rx_queue[i];
2971 if (!rxq->page_pool)
2974 page_pool_get_stats(rxq->page_pool, &stats);
2977 page_pool_ethtool_stats_get(data, &stats);
2981 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2982 struct ethtool_stats *stats, u64 *data)
2984 struct fec_enet_private *fep = netdev_priv(dev);
2986 if (netif_running(dev))
2987 fec_enet_update_ethtool_stats(dev);
2989 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2990 data += FEC_STATS_SIZE / sizeof(u64);
2992 fec_enet_get_xdp_stats(fep, data);
2993 data += XDP_STATS_TOTAL;
2995 fec_enet_page_pool_stats(fep, data);
2998 static void fec_enet_get_strings(struct net_device *netdev,
2999 u32 stringset, u8 *data)
3002 switch (stringset) {
3004 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
3005 ethtool_puts(&data, fec_stats[i].name);
3007 for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
3008 ethtool_puts(&data, fec_xdp_stat_strs[i]);
3010 page_pool_ethtool_stats_get_strings(data);
3014 net_selftest_get_strings(data);
3019 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
3025 count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
3026 count += page_pool_ethtool_stats_get_count();
3030 return net_selftest_get_count();
3036 static void fec_enet_clear_ethtool_stats(struct net_device *dev)
3038 struct fec_enet_private *fep = netdev_priv(dev);
3039 struct fec_enet_priv_rx_q *rxq;
3042 /* Disable MIB statistics counters */
3043 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
3045 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
3046 writel(0, fep->hwp + fec_stats[i].offset);
3048 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
3049 rxq = fep->rx_queue[i];
3050 for (j = 0; j < XDP_STATS_TOTAL; j++)
3054 /* Don't disable MIB statistics counters */
3055 writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
3058 #else /* !defined(CONFIG_M5272) */
3059 #define FEC_STATS_SIZE 0
3060 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
3064 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
3067 #endif /* !defined(CONFIG_M5272) */
3069 /* ITR clock source is enet system clock (clk_ahb).
3070 * TCTT unit is cycle_ns * 64 cycle
3071 * So, the ICTT value = X us / (cycle_ns * 64)
3073 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
3075 struct fec_enet_private *fep = netdev_priv(ndev);
3077 return us * (fep->itr_clk_rate / 64000) / 1000;
3080 /* Set threshold for interrupt coalescing */
3081 static void fec_enet_itr_coal_set(struct net_device *ndev)
3083 struct fec_enet_private *fep = netdev_priv(ndev);
3086 /* Must be greater than zero to avoid unpredictable behavior */
3087 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
3088 !fep->tx_time_itr || !fep->tx_pkts_itr)
3091 /* Select enet system clock as Interrupt Coalescing
3092 * timer Clock Source
3094 rx_itr = FEC_ITR_CLK_SEL;
3095 tx_itr = FEC_ITR_CLK_SEL;
3097 /* set ICFT and ICTT */
3098 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
3099 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
3100 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
3101 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
3103 rx_itr |= FEC_ITR_EN;
3104 tx_itr |= FEC_ITR_EN;
3106 writel(tx_itr, fep->hwp + FEC_TXIC0);
3107 writel(rx_itr, fep->hwp + FEC_RXIC0);
3108 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3109 writel(tx_itr, fep->hwp + FEC_TXIC1);
3110 writel(rx_itr, fep->hwp + FEC_RXIC1);
3111 writel(tx_itr, fep->hwp + FEC_TXIC2);
3112 writel(rx_itr, fep->hwp + FEC_RXIC2);
3116 static int fec_enet_get_coalesce(struct net_device *ndev,
3117 struct ethtool_coalesce *ec,
3118 struct kernel_ethtool_coalesce *kernel_coal,
3119 struct netlink_ext_ack *extack)
3121 struct fec_enet_private *fep = netdev_priv(ndev);
3123 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3126 ec->rx_coalesce_usecs = fep->rx_time_itr;
3127 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
3129 ec->tx_coalesce_usecs = fep->tx_time_itr;
3130 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
3135 static int fec_enet_set_coalesce(struct net_device *ndev,
3136 struct ethtool_coalesce *ec,
3137 struct kernel_ethtool_coalesce *kernel_coal,
3138 struct netlink_ext_ack *extack)
3140 struct fec_enet_private *fep = netdev_priv(ndev);
3141 struct device *dev = &fep->pdev->dev;
3144 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3147 if (ec->rx_max_coalesced_frames > 255) {
3148 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
3152 if (ec->tx_max_coalesced_frames > 255) {
3153 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
3157 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
3158 if (cycle > 0xFFFF) {
3159 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
3163 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
3164 if (cycle > 0xFFFF) {
3165 dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
3169 fep->rx_time_itr = ec->rx_coalesce_usecs;
3170 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
3172 fep->tx_time_itr = ec->tx_coalesce_usecs;
3173 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
3175 fec_enet_itr_coal_set(ndev);
3181 fec_enet_get_eee(struct net_device *ndev, struct ethtool_keee *edata)
3183 struct fec_enet_private *fep = netdev_priv(ndev);
3184 struct ethtool_keee *p = &fep->eee;
3186 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3189 if (!netif_running(ndev))
3192 edata->tx_lpi_timer = p->tx_lpi_timer;
3194 return phy_ethtool_get_eee(ndev->phydev, edata);
3198 fec_enet_set_eee(struct net_device *ndev, struct ethtool_keee *edata)
3200 struct fec_enet_private *fep = netdev_priv(ndev);
3201 struct ethtool_keee *p = &fep->eee;
3203 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3206 if (!netif_running(ndev))
3209 p->tx_lpi_timer = edata->tx_lpi_timer;
3211 return phy_ethtool_set_eee(ndev->phydev, edata);
3215 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3217 struct fec_enet_private *fep = netdev_priv(ndev);
3219 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
3220 wol->supported = WAKE_MAGIC;
3221 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
3223 wol->supported = wol->wolopts = 0;
3228 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3230 struct fec_enet_private *fep = netdev_priv(ndev);
3232 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
3235 if (wol->wolopts & ~WAKE_MAGIC)
3238 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
3239 if (device_may_wakeup(&ndev->dev))
3240 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
3242 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
3247 static const struct ethtool_ops fec_enet_ethtool_ops = {
3248 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3249 ETHTOOL_COALESCE_MAX_FRAMES,
3250 .get_drvinfo = fec_enet_get_drvinfo,
3251 .get_regs_len = fec_enet_get_regs_len,
3252 .get_regs = fec_enet_get_regs,
3253 .nway_reset = phy_ethtool_nway_reset,
3254 .get_link = ethtool_op_get_link,
3255 .get_coalesce = fec_enet_get_coalesce,
3256 .set_coalesce = fec_enet_set_coalesce,
3257 #ifndef CONFIG_M5272
3258 .get_pauseparam = fec_enet_get_pauseparam,
3259 .set_pauseparam = fec_enet_set_pauseparam,
3260 .get_strings = fec_enet_get_strings,
3261 .get_ethtool_stats = fec_enet_get_ethtool_stats,
3262 .get_sset_count = fec_enet_get_sset_count,
3264 .get_ts_info = fec_enet_get_ts_info,
3265 .get_wol = fec_enet_get_wol,
3266 .set_wol = fec_enet_set_wol,
3267 .get_eee = fec_enet_get_eee,
3268 .set_eee = fec_enet_set_eee,
3269 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3270 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3271 .self_test = net_selftest,
3274 static void fec_enet_free_buffers(struct net_device *ndev)
3276 struct fec_enet_private *fep = netdev_priv(ndev);
3278 struct fec_enet_priv_tx_q *txq;
3279 struct fec_enet_priv_rx_q *rxq;
3282 for (q = 0; q < fep->num_rx_queues; q++) {
3283 rxq = fep->rx_queue[q];
3284 for (i = 0; i < rxq->bd.ring_size; i++)
3285 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
3287 for (i = 0; i < XDP_STATS_TOTAL; i++)
3290 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
3291 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3292 page_pool_destroy(rxq->page_pool);
3293 rxq->page_pool = NULL;
3296 for (q = 0; q < fep->num_tx_queues; q++) {
3297 txq = fep->tx_queue[q];
3298 for (i = 0; i < txq->bd.ring_size; i++) {
3299 kfree(txq->tx_bounce[i]);
3300 txq->tx_bounce[i] = NULL;
3302 if (!txq->tx_buf[i].buf_p) {
3303 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3307 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3308 dev_kfree_skb(txq->tx_buf[i].buf_p);
3309 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3310 xdp_return_frame(txq->tx_buf[i].buf_p);
3312 struct page *page = txq->tx_buf[i].buf_p;
3314 page_pool_put_page(page->pp, page, 0, false);
3317 txq->tx_buf[i].buf_p = NULL;
3318 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3323 static void fec_enet_free_queue(struct net_device *ndev)
3325 struct fec_enet_private *fep = netdev_priv(ndev);
3327 struct fec_enet_priv_tx_q *txq;
3329 for (i = 0; i < fep->num_tx_queues; i++)
3330 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3331 txq = fep->tx_queue[i];
3332 fec_dma_free(&fep->pdev->dev,
3333 txq->bd.ring_size * TSO_HEADER_SIZE,
3334 txq->tso_hdrs, txq->tso_hdrs_dma);
3337 for (i = 0; i < fep->num_rx_queues; i++)
3338 kfree(fep->rx_queue[i]);
3339 for (i = 0; i < fep->num_tx_queues; i++)
3340 kfree(fep->tx_queue[i]);
3343 static int fec_enet_alloc_queue(struct net_device *ndev)
3345 struct fec_enet_private *fep = netdev_priv(ndev);
3348 struct fec_enet_priv_tx_q *txq;
3350 for (i = 0; i < fep->num_tx_queues; i++) {
3351 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3357 fep->tx_queue[i] = txq;
3358 txq->bd.ring_size = TX_RING_SIZE;
3359 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3361 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3362 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
3364 txq->tso_hdrs = fec_dma_alloc(&fep->pdev->dev,
3365 txq->bd.ring_size * TSO_HEADER_SIZE,
3366 &txq->tso_hdrs_dma, GFP_KERNEL);
3367 if (!txq->tso_hdrs) {
3373 for (i = 0; i < fep->num_rx_queues; i++) {
3374 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3376 if (!fep->rx_queue[i]) {
3381 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3382 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3387 fec_enet_free_queue(ndev);
3392 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3394 struct fec_enet_private *fep = netdev_priv(ndev);
3395 struct fec_enet_priv_rx_q *rxq;
3396 dma_addr_t phys_addr;
3397 struct bufdesc *bdp;
3401 rxq = fep->rx_queue[queue];
3404 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
3406 netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
3410 for (i = 0; i < rxq->bd.ring_size; i++) {
3411 page = page_pool_dev_alloc_pages(rxq->page_pool);
3415 phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
3416 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
3418 rxq->rx_skb_info[i].page = page;
3419 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
3420 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3422 if (fep->bufdesc_ex) {
3423 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3424 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3427 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3430 /* Set the last buffer to wrap. */
3431 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3432 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3436 fec_enet_free_buffers(ndev);
3441 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
3443 struct fec_enet_private *fep = netdev_priv(ndev);
3445 struct bufdesc *bdp;
3446 struct fec_enet_priv_tx_q *txq;
3448 txq = fep->tx_queue[queue];
3450 for (i = 0; i < txq->bd.ring_size; i++) {
3451 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3452 if (!txq->tx_bounce[i])
3455 bdp->cbd_sc = cpu_to_fec16(0);
3456 bdp->cbd_bufaddr = cpu_to_fec32(0);
3458 if (fep->bufdesc_ex) {
3459 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3460 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
3463 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3466 /* Set the last buffer to wrap. */
3467 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3468 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3473 fec_enet_free_buffers(ndev);
3477 static int fec_enet_alloc_buffers(struct net_device *ndev)
3479 struct fec_enet_private *fep = netdev_priv(ndev);
3482 for (i = 0; i < fep->num_rx_queues; i++)
3483 if (fec_enet_alloc_rxq_buffers(ndev, i))
3486 for (i = 0; i < fep->num_tx_queues; i++)
3487 if (fec_enet_alloc_txq_buffers(ndev, i))
3493 fec_enet_open(struct net_device *ndev)
3495 struct fec_enet_private *fep = netdev_priv(ndev);
3499 ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3503 pinctrl_pm_select_default_state(&fep->pdev->dev);
3504 ret = fec_enet_clk_enable(ndev, true);
3508 /* During the first fec_enet_open call the PHY isn't probed at this
3509 * point. Therefore the phy_reset_after_clk_enable() call within
3510 * fec_enet_clk_enable() fails. As we need this reset in order to be
3511 * sure the PHY is working correctly we check if we need to reset again
3512 * later when the PHY is probed
3514 if (ndev->phydev && ndev->phydev->drv)
3515 reset_again = false;
3519 /* I should reset the ring buffers here, but I don't yet know
3520 * a simple way to do that.
3523 ret = fec_enet_alloc_buffers(ndev);
3525 goto err_enet_alloc;
3527 /* Init MAC prior to mii bus probe */
3530 /* Call phy_reset_after_clk_enable() again if it failed during
3531 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3534 fec_enet_phy_reset_after_clk_enable(ndev);
3536 /* Probe and connect to PHY when open the interface */
3537 ret = fec_enet_mii_probe(ndev);
3539 goto err_enet_mii_probe;
3541 if (fep->quirks & FEC_QUIRK_ERR006687)
3542 imx6q_cpuidle_fec_irqs_used();
3544 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3545 cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3547 napi_enable(&fep->napi);
3548 phy_start(ndev->phydev);
3549 netif_tx_start_all_queues(ndev);
3551 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3552 FEC_WOL_FLAG_ENABLE);
3557 fec_enet_free_buffers(ndev);
3559 fec_enet_clk_enable(ndev, false);
3561 pm_runtime_mark_last_busy(&fep->pdev->dev);
3562 pm_runtime_put_autosuspend(&fep->pdev->dev);
3563 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3568 fec_enet_close(struct net_device *ndev)
3570 struct fec_enet_private *fep = netdev_priv(ndev);
3572 phy_stop(ndev->phydev);
3574 if (netif_device_present(ndev)) {
3575 napi_disable(&fep->napi);
3576 netif_tx_disable(ndev);
3580 phy_disconnect(ndev->phydev);
3582 if (fep->quirks & FEC_QUIRK_ERR006687)
3583 imx6q_cpuidle_fec_irqs_unused();
3585 fec_enet_update_ethtool_stats(ndev);
3587 fec_enet_clk_enable(ndev, false);
3588 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3589 cpu_latency_qos_remove_request(&fep->pm_qos_req);
3591 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3592 pm_runtime_mark_last_busy(&fep->pdev->dev);
3593 pm_runtime_put_autosuspend(&fep->pdev->dev);
3595 fec_enet_free_buffers(ndev);
3600 /* Set or clear the multicast filter for this adaptor.
3601 * Skeleton taken from sunlance driver.
3602 * The CPM Ethernet implementation allows Multicast as well as individual
3603 * MAC address filtering. Some of the drivers check to make sure it is
3604 * a group multicast address, and discard those that are not. I guess I
3605 * will do the same for now, but just remove the test if you want
3606 * individual filtering as well (do the upper net layers want or support
3607 * this kind of feature?).
3610 #define FEC_HASH_BITS 6 /* #bits in hash */
3612 static void set_multicast_list(struct net_device *ndev)
3614 struct fec_enet_private *fep = netdev_priv(ndev);
3615 struct netdev_hw_addr *ha;
3616 unsigned int crc, tmp;
3618 unsigned int hash_high = 0, hash_low = 0;
3620 if (ndev->flags & IFF_PROMISC) {
3621 tmp = readl(fep->hwp + FEC_R_CNTRL);
3623 writel(tmp, fep->hwp + FEC_R_CNTRL);
3627 tmp = readl(fep->hwp + FEC_R_CNTRL);
3629 writel(tmp, fep->hwp + FEC_R_CNTRL);
3631 if (ndev->flags & IFF_ALLMULTI) {
3632 /* Catch all multicast addresses, so set the
3635 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3636 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3641 /* Add the addresses in hash register */
3642 netdev_for_each_mc_addr(ha, ndev) {
3643 /* calculate crc32 value of mac address */
3644 crc = ether_crc_le(ndev->addr_len, ha->addr);
3646 /* only upper 6 bits (FEC_HASH_BITS) are used
3647 * which point to specific bit in the hash registers
3649 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3652 hash_high |= 1 << (hash - 32);
3654 hash_low |= 1 << hash;
3657 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3658 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3661 /* Set a MAC change in hardware. */
3663 fec_set_mac_address(struct net_device *ndev, void *p)
3665 struct fec_enet_private *fep = netdev_priv(ndev);
3666 struct sockaddr *addr = p;
3669 if (!is_valid_ether_addr(addr->sa_data))
3670 return -EADDRNOTAVAIL;
3671 eth_hw_addr_set(ndev, addr->sa_data);
3674 /* Add netif status check here to avoid system hang in below case:
3675 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3676 * After ethx down, fec all clocks are gated off and then register
3677 * access causes system hang.
3679 if (!netif_running(ndev))
3682 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3683 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3684 fep->hwp + FEC_ADDR_LOW);
3685 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3686 fep->hwp + FEC_ADDR_HIGH);
3690 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3691 netdev_features_t features)
3693 struct fec_enet_private *fep = netdev_priv(netdev);
3694 netdev_features_t changed = features ^ netdev->features;
3696 netdev->features = features;
3698 /* Receive checksum has been changed */
3699 if (changed & NETIF_F_RXCSUM) {
3700 if (features & NETIF_F_RXCSUM)
3701 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3703 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3707 static int fec_set_features(struct net_device *netdev,
3708 netdev_features_t features)
3710 struct fec_enet_private *fep = netdev_priv(netdev);
3711 netdev_features_t changed = features ^ netdev->features;
3713 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3714 napi_disable(&fep->napi);
3715 netif_tx_lock_bh(netdev);
3717 fec_enet_set_netdev_features(netdev, features);
3718 fec_restart(netdev);
3719 netif_tx_wake_all_queues(netdev);
3720 netif_tx_unlock_bh(netdev);
3721 napi_enable(&fep->napi);
3723 fec_enet_set_netdev_features(netdev, features);
3729 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
3730 struct net_device *sb_dev)
3732 struct fec_enet_private *fep = netdev_priv(ndev);
3735 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3736 return netdev_pick_tx(ndev, skb, NULL);
3738 /* VLAN is present in the payload.*/
3739 if (eth_type_vlan(skb->protocol)) {
3740 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
3742 vlan_tag = ntohs(vhdr->h_vlan_TCI);
3743 /* VLAN is present in the skb but not yet pushed in the payload.*/
3744 } else if (skb_vlan_tag_present(skb)) {
3745 vlan_tag = skb->vlan_tci;
3750 return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
3753 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
3755 struct fec_enet_private *fep = netdev_priv(dev);
3756 bool is_run = netif_running(dev);
3757 struct bpf_prog *old_prog;
3759 switch (bpf->command) {
3760 case XDP_SETUP_PROG:
3761 /* No need to support the SoCs that require to
3762 * do the frame swap because the performance wouldn't be
3763 * better than the skb mode.
3765 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
3769 xdp_features_clear_redirect_target(dev);
3772 napi_disable(&fep->napi);
3773 netif_tx_disable(dev);
3776 old_prog = xchg(&fep->xdp_prog, bpf->prog);
3778 bpf_prog_put(old_prog);
3783 napi_enable(&fep->napi);
3784 netif_tx_start_all_queues(dev);
3788 xdp_features_set_redirect_target(dev, false);
3792 case XDP_SETUP_XSK_POOL:
3801 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3803 if (unlikely(index < 0))
3806 return (index % fep->num_tx_queues);
3809 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3810 struct fec_enet_priv_tx_q *txq,
3811 void *frame, u32 dma_sync_len,
3814 unsigned int index, status, estatus;
3815 struct bufdesc *bdp;
3816 dma_addr_t dma_addr;
3820 entries_free = fec_enet_get_free_txdesc_num(txq);
3821 if (entries_free < MAX_SKB_FRAGS + 1) {
3822 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
3826 /* Fill in a Tx ring entry */
3828 status = fec16_to_cpu(bdp->cbd_sc);
3829 status &= ~BD_ENET_TX_STATS;
3831 index = fec_enet_get_bd_index(bdp, &txq->bd);
3834 struct xdp_frame *xdpf = frame;
3836 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3837 xdpf->len, DMA_TO_DEVICE);
3838 if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3841 frame_len = xdpf->len;
3842 txq->tx_buf[index].buf_p = xdpf;
3843 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3845 struct xdp_buff *xdpb = frame;
3848 page = virt_to_page(xdpb->data);
3849 dma_addr = page_pool_get_dma_addr(page) +
3850 (xdpb->data - xdpb->data_hard_start);
3851 dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3852 dma_sync_len, DMA_BIDIRECTIONAL);
3853 frame_len = xdpb->data_end - xdpb->data;
3854 txq->tx_buf[index].buf_p = page;
3855 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3858 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
3859 if (fep->bufdesc_ex)
3860 estatus = BD_ENET_TX_INT;
3862 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3863 bdp->cbd_datlen = cpu_to_fec16(frame_len);
3865 if (fep->bufdesc_ex) {
3866 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3868 if (fep->quirks & FEC_QUIRK_HAS_AVB)
3869 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
3872 ebdp->cbd_esc = cpu_to_fec32(estatus);
3875 /* Make sure the updates to rest of the descriptor are performed before
3876 * transferring ownership.
3880 /* Send it on its way. Tell FEC it's ready, interrupt when done,
3881 * it's the last BD of the frame, and to put the CRC on the end.
3883 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
3884 bdp->cbd_sc = cpu_to_fec16(status);
3886 /* If this was the last BD in the ring, start at the beginning again. */
3887 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3889 /* Make sure the update to bdp are performed before txq->bd.cur. */
3894 /* Trigger transmission start */
3895 writel(0, txq->bd.reg_desc_active);
3900 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3901 int cpu, struct xdp_buff *xdp,
3904 struct fec_enet_priv_tx_q *txq;
3905 struct netdev_queue *nq;
3908 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3909 txq = fep->tx_queue[queue];
3910 nq = netdev_get_tx_queue(fep->netdev, queue);
3912 __netif_tx_lock(nq, cpu);
3914 /* Avoid tx timeout as XDP shares the queue with kernel stack */
3915 txq_trans_cond_update(nq);
3916 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3918 __netif_tx_unlock(nq);
3923 static int fec_enet_xdp_xmit(struct net_device *dev,
3925 struct xdp_frame **frames,
3928 struct fec_enet_private *fep = netdev_priv(dev);
3929 struct fec_enet_priv_tx_q *txq;
3930 int cpu = smp_processor_id();
3931 unsigned int sent_frames = 0;
3932 struct netdev_queue *nq;
3936 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3937 txq = fep->tx_queue[queue];
3938 nq = netdev_get_tx_queue(fep->netdev, queue);
3940 __netif_tx_lock(nq, cpu);
3942 /* Avoid tx timeout as XDP shares the queue with kernel stack */
3943 txq_trans_cond_update(nq);
3944 for (i = 0; i < num_frames; i++) {
3945 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
3950 __netif_tx_unlock(nq);
3955 static int fec_hwtstamp_get(struct net_device *ndev,
3956 struct kernel_hwtstamp_config *config)
3958 struct fec_enet_private *fep = netdev_priv(ndev);
3960 if (!netif_running(ndev))
3963 if (!fep->bufdesc_ex)
3966 fec_ptp_get(ndev, config);
3971 static int fec_hwtstamp_set(struct net_device *ndev,
3972 struct kernel_hwtstamp_config *config,
3973 struct netlink_ext_ack *extack)
3975 struct fec_enet_private *fep = netdev_priv(ndev);
3977 if (!netif_running(ndev))
3980 if (!fep->bufdesc_ex)
3983 return fec_ptp_set(ndev, config, extack);
3986 static const struct net_device_ops fec_netdev_ops = {
3987 .ndo_open = fec_enet_open,
3988 .ndo_stop = fec_enet_close,
3989 .ndo_start_xmit = fec_enet_start_xmit,
3990 .ndo_select_queue = fec_enet_select_queue,
3991 .ndo_set_rx_mode = set_multicast_list,
3992 .ndo_validate_addr = eth_validate_addr,
3993 .ndo_tx_timeout = fec_timeout,
3994 .ndo_set_mac_address = fec_set_mac_address,
3995 .ndo_eth_ioctl = phy_do_ioctl_running,
3996 .ndo_set_features = fec_set_features,
3997 .ndo_bpf = fec_enet_bpf,
3998 .ndo_xdp_xmit = fec_enet_xdp_xmit,
3999 .ndo_hwtstamp_get = fec_hwtstamp_get,
4000 .ndo_hwtstamp_set = fec_hwtstamp_set,
4003 static const unsigned short offset_des_active_rxq[] = {
4004 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
4007 static const unsigned short offset_des_active_txq[] = {
4008 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
4012 * XXX: We need to clean up on failure exits here.
4015 static int fec_enet_init(struct net_device *ndev)
4017 struct fec_enet_private *fep = netdev_priv(ndev);
4018 struct bufdesc *cbd_base;
4022 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
4023 sizeof(struct bufdesc);
4024 unsigned dsize_log2 = __fls(dsize);
4027 WARN_ON(dsize != (1 << dsize_log2));
4028 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4029 fep->rx_align = 0xf;
4030 fep->tx_align = 0xf;
4032 fep->rx_align = 0x3;
4033 fep->tx_align = 0x3;
4035 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4036 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4037 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
4038 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
4040 /* Check mask of the streaming and coherent API */
4041 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
4043 dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
4047 ret = fec_enet_alloc_queue(ndev);
4051 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
4053 /* Allocate memory for buffer descriptors. */
4054 cbd_base = fec_dmam_alloc(&fep->pdev->dev, bd_size, &bd_dma,
4058 goto free_queue_mem;
4061 /* Get the Ethernet address */
4062 ret = fec_get_mac(ndev);
4064 goto free_queue_mem;
4066 /* Set receive and transmit descriptor base. */
4067 for (i = 0; i < fep->num_rx_queues; i++) {
4068 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
4069 unsigned size = dsize * rxq->bd.ring_size;
4072 rxq->bd.base = cbd_base;
4073 rxq->bd.cur = cbd_base;
4074 rxq->bd.dma = bd_dma;
4075 rxq->bd.dsize = dsize;
4076 rxq->bd.dsize_log2 = dsize_log2;
4077 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
4079 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4080 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4083 for (i = 0; i < fep->num_tx_queues; i++) {
4084 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4085 unsigned size = dsize * txq->bd.ring_size;
4088 txq->bd.base = cbd_base;
4089 txq->bd.cur = cbd_base;
4090 txq->bd.dma = bd_dma;
4091 txq->bd.dsize = dsize;
4092 txq->bd.dsize_log2 = dsize_log2;
4093 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4095 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4096 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4100 /* The FEC Ethernet specific entries in the device structure */
4101 ndev->watchdog_timeo = TX_TIMEOUT;
4102 ndev->netdev_ops = &fec_netdev_ops;
4103 ndev->ethtool_ops = &fec_enet_ethtool_ops;
4105 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
4106 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
4108 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
4109 /* enable hw VLAN support */
4110 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4112 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
4113 netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
4115 /* enable hw accelerator */
4116 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
4117 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
4118 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
4121 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
4123 fep->rx_align = 0x3f;
4126 ndev->hw_features = ndev->features;
4128 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
4129 ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
4130 NETDEV_XDP_ACT_REDIRECT;
4134 if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
4135 fec_enet_clear_ethtool_stats(ndev);
4137 fec_enet_update_ethtool_stats(ndev);
4142 fec_enet_free_queue(ndev);
4146 static void fec_enet_deinit(struct net_device *ndev)
4148 struct fec_enet_private *fep = netdev_priv(ndev);
4150 netif_napi_del(&fep->napi);
4151 fec_enet_free_queue(ndev);
4155 static int fec_reset_phy(struct platform_device *pdev)
4157 struct gpio_desc *phy_reset;
4158 int msec = 1, phy_post_delay = 0;
4159 struct device_node *np = pdev->dev.of_node;
4165 err = of_property_read_u32(np, "phy-reset-duration", &msec);
4166 /* A sane reset duration should not be longer than 1s */
4167 if (!err && msec > 1000)
4170 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
4171 /* valid reset duration should be less than 1s */
4172 if (!err && phy_post_delay > 1000)
4175 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
4177 if (IS_ERR(phy_reset))
4178 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
4179 "failed to get phy-reset-gpios\n");
4187 usleep_range(msec * 1000, msec * 1000 + 1000);
4189 gpiod_set_value_cansleep(phy_reset, 0);
4191 if (!phy_post_delay)
4194 if (phy_post_delay > 20)
4195 msleep(phy_post_delay);
4197 usleep_range(phy_post_delay * 1000,
4198 phy_post_delay * 1000 + 1000);
4202 #else /* CONFIG_OF */
4203 static int fec_reset_phy(struct platform_device *pdev)
4206 * In case of platform probe, the reset has been done
4211 #endif /* CONFIG_OF */
4214 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
4216 struct device_node *np = pdev->dev.of_node;
4218 *num_tx = *num_rx = 1;
4220 if (!np || !of_device_is_available(np))
4223 /* parse the num of tx and rx queues */
4224 of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
4226 of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
4228 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
4229 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
4235 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
4236 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
4244 static int fec_enet_get_irq_cnt(struct platform_device *pdev)
4246 int irq_cnt = platform_irq_count(pdev);
4248 if (irq_cnt > FEC_IRQ_NUM)
4249 irq_cnt = FEC_IRQ_NUM; /* last for pps */
4250 else if (irq_cnt == 2)
4251 irq_cnt = 1; /* last for pps */
4252 else if (irq_cnt <= 0)
4253 irq_cnt = 1; /* At least 1 irq is needed */
4257 static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
4259 struct net_device *ndev = platform_get_drvdata(pdev);
4260 struct fec_enet_private *fep = netdev_priv(ndev);
4262 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
4263 fep->wake_irq = fep->irq[2];
4265 fep->wake_irq = fep->irq[0];
4268 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
4269 struct device_node *np)
4271 struct device_node *gpr_np;
4275 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
4279 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
4280 ARRAY_SIZE(out_val));
4282 dev_dbg(&fep->pdev->dev, "no stop mode property\n");
4286 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
4287 if (IS_ERR(fep->stop_gpr.gpr)) {
4288 dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
4289 ret = PTR_ERR(fep->stop_gpr.gpr);
4290 fep->stop_gpr.gpr = NULL;
4294 fep->stop_gpr.reg = out_val[1];
4295 fep->stop_gpr.bit = out_val[2];
4298 of_node_put(gpr_np);
4304 fec_probe(struct platform_device *pdev)
4306 struct fec_enet_private *fep;
4307 struct fec_platform_data *pdata;
4308 phy_interface_t interface;
4309 struct net_device *ndev;
4310 int i, irq, ret = 0;
4312 struct device_node *np = pdev->dev.of_node, *phy_node;
4317 const struct fec_devinfo *dev_info;
4319 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
4321 /* Init network device */
4322 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
4323 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
4327 SET_NETDEV_DEV(ndev, &pdev->dev);
4329 /* setup board info structure */
4330 fep = netdev_priv(ndev);
4332 dev_info = device_get_match_data(&pdev->dev);
4334 dev_info = (const struct fec_devinfo *)pdev->id_entry->driver_data;
4336 fep->quirks = dev_info->quirks;
4339 fep->num_rx_queues = num_rx_qs;
4340 fep->num_tx_queues = num_tx_qs;
4342 #if !defined(CONFIG_M5272)
4343 /* default enable pause frame auto negotiation */
4344 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
4345 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
4348 /* Select default pin state */
4349 pinctrl_pm_select_default_state(&pdev->dev);
4351 fep->hwp = devm_platform_ioremap_resource(pdev, 0);
4352 if (IS_ERR(fep->hwp)) {
4353 ret = PTR_ERR(fep->hwp);
4354 goto failed_ioremap;
4358 fep->dev_id = dev_id++;
4360 platform_set_drvdata(pdev, ndev);
4362 if ((of_machine_is_compatible("fsl,imx6q") ||
4363 of_machine_is_compatible("fsl,imx6dl")) &&
4364 !of_property_read_bool(np, "fsl,err006687-workaround-present"))
4365 fep->quirks |= FEC_QUIRK_ERR006687;
4367 ret = fec_enet_ipc_handle_init(fep);
4369 goto failed_ipc_init;
4371 if (of_property_read_bool(np, "fsl,magic-packet"))
4372 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
4374 ret = fec_enet_init_stop_mode(fep, np);
4376 goto failed_stop_mode;
4378 phy_node = of_parse_phandle(np, "phy-handle", 0);
4379 if (!phy_node && of_phy_is_fixed_link(np)) {
4380 ret = of_phy_register_fixed_link(np);
4383 "broken fixed-link specification\n");
4386 phy_node = of_node_get(np);
4388 fep->phy_node = phy_node;
4390 ret = of_get_phy_mode(pdev->dev.of_node, &interface);
4392 pdata = dev_get_platdata(&pdev->dev);
4394 fep->phy_interface = pdata->phy;
4396 fep->phy_interface = PHY_INTERFACE_MODE_MII;
4398 fep->phy_interface = interface;
4401 ret = fec_enet_parse_rgmii_delay(fep, np);
4403 goto failed_rgmii_delay;
4405 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
4406 if (IS_ERR(fep->clk_ipg)) {
4407 ret = PTR_ERR(fep->clk_ipg);
4411 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
4412 if (IS_ERR(fep->clk_ahb)) {
4413 ret = PTR_ERR(fep->clk_ahb);
4417 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
4419 /* enet_out is optional, depends on board */
4420 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
4421 if (IS_ERR(fep->clk_enet_out)) {
4422 ret = PTR_ERR(fep->clk_enet_out);
4426 fep->ptp_clk_on = false;
4427 mutex_init(&fep->ptp_clk_mutex);
4429 /* clk_ref is optional, depends on board */
4430 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
4431 if (IS_ERR(fep->clk_ref)) {
4432 ret = PTR_ERR(fep->clk_ref);
4435 fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
4437 /* clk_2x_txclk is optional, depends on board */
4438 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
4439 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
4440 if (IS_ERR(fep->clk_2x_txclk))
4441 fep->clk_2x_txclk = NULL;
4444 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
4445 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
4446 if (IS_ERR(fep->clk_ptp)) {
4447 fep->clk_ptp = NULL;
4448 fep->bufdesc_ex = false;
4451 ret = fec_enet_clk_enable(ndev, true);
4455 ret = clk_prepare_enable(fep->clk_ipg);
4457 goto failed_clk_ipg;
4458 ret = clk_prepare_enable(fep->clk_ahb);
4460 goto failed_clk_ahb;
4462 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
4463 if (!IS_ERR(fep->reg_phy)) {
4464 ret = regulator_enable(fep->reg_phy);
4467 "Failed to enable phy regulator: %d\n", ret);
4468 goto failed_regulator;
4471 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
4472 ret = -EPROBE_DEFER;
4473 goto failed_regulator;
4475 fep->reg_phy = NULL;
4478 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
4479 pm_runtime_use_autosuspend(&pdev->dev);
4480 pm_runtime_get_noresume(&pdev->dev);
4481 pm_runtime_set_active(&pdev->dev);
4482 pm_runtime_enable(&pdev->dev);
4484 ret = fec_reset_phy(pdev);
4488 irq_cnt = fec_enet_get_irq_cnt(pdev);
4489 if (fep->bufdesc_ex)
4490 fec_ptp_init(pdev, irq_cnt);
4492 ret = fec_enet_init(ndev);
4496 for (i = 0; i < irq_cnt; i++) {
4497 snprintf(irq_name, sizeof(irq_name), "int%d", i);
4498 irq = platform_get_irq_byname_optional(pdev, irq_name);
4500 irq = platform_get_irq(pdev, i);
4505 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
4506 0, pdev->name, ndev);
4513 /* Decide which interrupt line is wakeup capable */
4514 fec_enet_get_wakeup_irq(pdev);
4516 ret = fec_enet_mii_init(pdev);
4518 goto failed_mii_init;
4520 /* Carrier starts down, phylib will bring it up */
4521 netif_carrier_off(ndev);
4522 fec_enet_clk_enable(ndev, false);
4523 pinctrl_pm_select_sleep_state(&pdev->dev);
4525 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
4527 ret = register_netdev(ndev);
4529 goto failed_register;
4531 device_init_wakeup(&ndev->dev, fep->wol_flag &
4532 FEC_WOL_HAS_MAGIC_PACKET);
4534 if (fep->bufdesc_ex && fep->ptp_clock)
4535 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4537 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4539 pm_runtime_mark_last_busy(&pdev->dev);
4540 pm_runtime_put_autosuspend(&pdev->dev);
4545 fec_enet_mii_remove(fep);
4548 fec_enet_deinit(ndev);
4552 pm_runtime_put_noidle(&pdev->dev);
4553 pm_runtime_disable(&pdev->dev);
4555 regulator_disable(fep->reg_phy);
4557 clk_disable_unprepare(fep->clk_ahb);
4559 clk_disable_unprepare(fep->clk_ipg);
4561 fec_enet_clk_enable(ndev, false);
4564 if (of_phy_is_fixed_link(np))
4565 of_phy_deregister_fixed_link(np);
4566 of_node_put(phy_node);
4578 fec_drv_remove(struct platform_device *pdev)
4580 struct net_device *ndev = platform_get_drvdata(pdev);
4581 struct fec_enet_private *fep = netdev_priv(ndev);
4582 struct device_node *np = pdev->dev.of_node;
4585 ret = pm_runtime_get_sync(&pdev->dev);
4588 "Failed to resume device in remove callback (%pe)\n",
4591 cancel_work_sync(&fep->tx_timeout_work);
4593 unregister_netdev(ndev);
4594 fec_enet_mii_remove(fep);
4596 regulator_disable(fep->reg_phy);
4598 if (of_phy_is_fixed_link(np))
4599 of_phy_deregister_fixed_link(np);
4600 of_node_put(fep->phy_node);
4602 /* After pm_runtime_get_sync() failed, the clks are still off, so skip
4603 * disabling them again.
4606 clk_disable_unprepare(fep->clk_ahb);
4607 clk_disable_unprepare(fep->clk_ipg);
4609 pm_runtime_put_noidle(&pdev->dev);
4610 pm_runtime_disable(&pdev->dev);
4612 fec_enet_deinit(ndev);
4616 static int fec_suspend(struct device *dev)
4618 struct net_device *ndev = dev_get_drvdata(dev);
4619 struct fec_enet_private *fep = netdev_priv(ndev);
4623 if (netif_running(ndev)) {
4624 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4625 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4626 phy_stop(ndev->phydev);
4627 napi_disable(&fep->napi);
4628 netif_tx_lock_bh(ndev);
4629 netif_device_detach(ndev);
4630 netif_tx_unlock_bh(ndev);
4632 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4633 fec_irqs_disable(ndev);
4634 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4636 fec_irqs_disable_except_wakeup(ndev);
4637 if (fep->wake_irq > 0) {
4638 disable_irq(fep->wake_irq);
4639 enable_irq_wake(fep->wake_irq);
4641 fec_enet_stop_mode(fep, true);
4643 /* It's safe to disable clocks since interrupts are masked */
4644 fec_enet_clk_enable(ndev, false);
4646 fep->rpm_active = !pm_runtime_status_suspended(dev);
4647 if (fep->rpm_active) {
4648 ret = pm_runtime_force_suspend(dev);
4657 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4658 regulator_disable(fep->reg_phy);
4660 /* SOC supply clock to phy, when clock is disabled, phy link down
4661 * SOC control phy regulator, when regulator is disabled, phy link down
4663 if (fep->clk_enet_out || fep->reg_phy)
4669 static int fec_resume(struct device *dev)
4671 struct net_device *ndev = dev_get_drvdata(dev);
4672 struct fec_enet_private *fep = netdev_priv(ndev);
4676 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4677 ret = regulator_enable(fep->reg_phy);
4683 if (netif_running(ndev)) {
4684 if (fep->rpm_active)
4685 pm_runtime_force_resume(dev);
4687 ret = fec_enet_clk_enable(ndev, true);
4692 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4693 fec_enet_stop_mode(fep, false);
4694 if (fep->wake_irq) {
4695 disable_irq_wake(fep->wake_irq);
4696 enable_irq(fep->wake_irq);
4699 val = readl(fep->hwp + FEC_ECNTRL);
4700 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
4701 writel(val, fep->hwp + FEC_ECNTRL);
4702 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4704 pinctrl_pm_select_default_state(&fep->pdev->dev);
4707 netif_tx_lock_bh(ndev);
4708 netif_device_attach(ndev);
4709 netif_tx_unlock_bh(ndev);
4710 napi_enable(&fep->napi);
4711 phy_init_hw(ndev->phydev);
4712 phy_start(ndev->phydev);
4720 regulator_disable(fep->reg_phy);
4724 static int fec_runtime_suspend(struct device *dev)
4726 struct net_device *ndev = dev_get_drvdata(dev);
4727 struct fec_enet_private *fep = netdev_priv(ndev);
4729 clk_disable_unprepare(fep->clk_ahb);
4730 clk_disable_unprepare(fep->clk_ipg);
4735 static int fec_runtime_resume(struct device *dev)
4737 struct net_device *ndev = dev_get_drvdata(dev);
4738 struct fec_enet_private *fep = netdev_priv(ndev);
4741 ret = clk_prepare_enable(fep->clk_ahb);
4744 ret = clk_prepare_enable(fep->clk_ipg);
4746 goto failed_clk_ipg;
4751 clk_disable_unprepare(fep->clk_ahb);
4755 static const struct dev_pm_ops fec_pm_ops = {
4756 SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
4757 RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
4760 static struct platform_driver fec_driver = {
4762 .name = DRIVER_NAME,
4763 .pm = pm_ptr(&fec_pm_ops),
4764 .of_match_table = fec_dt_ids,
4765 .suppress_bind_attrs = true,
4767 .id_table = fec_devtype,
4769 .remove = fec_drv_remove,
4772 module_platform_driver(fec_driver);
4774 MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
4775 MODULE_LICENSE("GPL");