1 // SPDX-License-Identifier: GPL-2.0
3 /* Renesas Ethernet-TSN device driver
5 * Copyright (C) 2022 Renesas Electronics Corporation
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/module.h>
14 #include <linux/net_tstamp.h>
16 #include <linux/of_mdio.h>
17 #include <linux/of_net.h>
18 #include <linux/phy.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22 #include <linux/spinlock.h>
25 #include "rcar_gen4_ptp.h"
28 struct net_device *ndev;
29 struct platform_device *pdev;
31 struct rcar_gen4_ptp_private *ptp_priv;
33 struct reset_control *reset;
38 dma_addr_t tx_desc_bat_dma;
39 struct rtsn_desc *tx_desc_bat;
41 dma_addr_t rx_desc_bat_dma;
42 struct rtsn_desc *rx_desc_bat;
43 dma_addr_t tx_desc_dma;
44 dma_addr_t rx_desc_dma;
45 struct rtsn_ext_desc *tx_ring;
46 struct rtsn_ext_ts_desc *rx_ring;
47 struct sk_buff **tx_skb;
48 struct sk_buff **rx_skb;
49 spinlock_t lock; /* Register access lock */
55 struct napi_struct napi;
56 struct rtnl_link_stats64 stats;
59 phy_interface_t iface;
67 static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg)
69 return ioread32(priv->base + reg);
72 static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data)
74 iowrite32(data, priv->base + reg);
77 static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg,
80 rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set);
83 static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg,
84 u32 mask, u32 expected)
88 return readl_poll_timeout(priv->base + reg, val,
89 (val & mask) == expected,
90 RTSN_INTERVAL_US, RTSN_TIMEOUT_US);
93 static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable)
96 rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX));
97 rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX));
99 rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX));
100 rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX));
104 static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts)
106 struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv;
108 ptp_priv->info.gettime64(&ptp_priv->info, ts);
111 static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only)
113 struct rtsn_private *priv = netdev_priv(ndev);
114 struct rtsn_ext_desc *desc;
119 for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) {
120 entry = priv->dirty_tx % priv->num_tx_ring;
121 desc = &priv->tx_ring[entry];
122 if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY)
126 size = le16_to_cpu(desc->info_ds) & TX_DS;
127 skb = priv->tx_skb[entry];
129 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
130 struct skb_shared_hwtstamps shhwtstamps;
131 struct timespec64 ts;
133 rtsn_get_timestamp(priv, &ts);
134 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
135 shhwtstamps.hwtstamp = timespec64_to_ktime(ts);
136 skb_tstamp_tx(skb, &shhwtstamps);
138 dma_unmap_single(ndev->dev.parent,
139 le32_to_cpu(desc->dptr),
140 size, DMA_TO_DEVICE);
141 dev_kfree_skb_any(priv->tx_skb[entry]);
144 priv->stats.tx_packets++;
145 priv->stats.tx_bytes += size;
148 desc->die_dt = DT_EEMPTY;
151 desc = &priv->tx_ring[priv->num_tx_ring];
152 desc->die_dt = DT_LINK;
157 static int rtsn_rx(struct net_device *ndev, int budget)
159 struct rtsn_private *priv = netdev_priv(ndev);
160 unsigned int ndescriptors;
161 unsigned int rx_packets;
165 get_ts = priv->ptp_priv->tstamp_rx_ctrl &
166 RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
168 ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx;
170 for (i = 0; i < ndescriptors; i++) {
171 const unsigned int entry = priv->cur_rx % priv->num_rx_ring;
172 struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
177 /* Stop processing descriptors if budget is consumed. */
178 if (rx_packets >= budget)
181 /* Stop processing descriptors on first empty. */
182 if ((desc->die_dt & DT_MASK) == DT_FEMPTY)
186 pkt_len = le16_to_cpu(desc->info_ds) & RX_DS;
188 skb = priv->rx_skb[entry];
189 priv->rx_skb[entry] = NULL;
190 dma_addr = le32_to_cpu(desc->dptr);
191 dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ,
194 /* Get timestamp if enabled. */
196 struct skb_shared_hwtstamps *shhwtstamps;
197 struct timespec64 ts;
199 shhwtstamps = skb_hwtstamps(skb);
200 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
202 ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec);
203 ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff));
205 shhwtstamps->hwtstamp = timespec64_to_ktime(ts);
208 skb_put(skb, pkt_len);
209 skb->protocol = eth_type_trans(skb, ndev);
210 napi_gro_receive(&priv->napi, skb);
212 /* Update statistics. */
213 priv->stats.rx_packets++;
214 priv->stats.rx_bytes += pkt_len;
216 /* Update counters. */
221 /* Refill the RX ring buffers */
222 for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
223 const unsigned int entry = priv->dirty_rx % priv->num_rx_ring;
224 struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry];
228 desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
230 if (!priv->rx_skb[entry]) {
231 skb = napi_alloc_skb(&priv->napi,
232 PKT_BUF_SZ + RTSN_ALIGN - 1);
235 skb_reserve(skb, NET_IP_ALIGN);
236 dma_addr = dma_map_single(ndev->dev.parent, skb->data,
237 le16_to_cpu(desc->info_ds),
239 if (dma_mapping_error(ndev->dev.parent, dma_addr))
240 desc->info_ds = cpu_to_le16(0);
241 desc->dptr = cpu_to_le32(dma_addr);
242 skb_checksum_none_assert(skb);
243 priv->rx_skb[entry] = skb;
247 desc->die_dt = DT_FEMPTY | D_DIE;
250 priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK;
255 static int rtsn_poll(struct napi_struct *napi, int budget)
257 struct rtsn_private *priv;
258 struct net_device *ndev;
263 priv = netdev_priv(ndev);
265 /* Processing RX Descriptor Ring */
266 work_done = rtsn_rx(ndev, budget);
268 /* Processing TX Descriptor Ring */
269 spin_lock_irqsave(&priv->lock, flags);
270 rtsn_tx_free(ndev, true);
271 netif_wake_subqueue(ndev, 0);
272 spin_unlock_irqrestore(&priv->lock, flags);
274 /* Re-enable TX/RX interrupts */
275 if (work_done < budget && napi_complete_done(napi, work_done)) {
276 spin_lock_irqsave(&priv->lock, flags);
277 rtsn_ctrl_data_irq(priv, true);
278 spin_unlock_irqrestore(&priv->lock, flags);
284 static int rtsn_desc_alloc(struct rtsn_private *priv)
286 struct device *dev = &priv->pdev->dev;
289 priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS;
290 priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size,
291 &priv->tx_desc_bat_dma,
294 if (!priv->tx_desc_bat)
297 for (i = 0; i < TX_NUM_CHAINS; i++)
298 priv->tx_desc_bat[i].die_dt = DT_EOS;
300 priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS;
301 priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size,
302 &priv->rx_desc_bat_dma,
305 if (!priv->rx_desc_bat)
308 for (i = 0; i < RX_NUM_CHAINS; i++)
309 priv->rx_desc_bat[i].die_dt = DT_EOS;
314 static void rtsn_desc_free(struct rtsn_private *priv)
316 if (priv->tx_desc_bat)
317 dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size,
318 priv->tx_desc_bat, priv->tx_desc_bat_dma);
319 priv->tx_desc_bat = NULL;
321 if (priv->rx_desc_bat)
322 dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size,
323 priv->rx_desc_bat, priv->rx_desc_bat_dma);
324 priv->rx_desc_bat = NULL;
327 static void rtsn_chain_free(struct rtsn_private *priv)
329 struct device *dev = &priv->pdev->dev;
331 dma_free_coherent(dev,
332 sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1),
333 priv->tx_ring, priv->tx_desc_dma);
334 priv->tx_ring = NULL;
336 dma_free_coherent(dev,
337 sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1),
338 priv->rx_ring, priv->rx_desc_dma);
339 priv->rx_ring = NULL;
348 static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size)
350 struct net_device *ndev = priv->ndev;
354 priv->num_tx_ring = tx_size;
355 priv->num_rx_ring = rx_size;
357 priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL);
358 priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL);
360 if (!priv->rx_skb || !priv->tx_skb)
363 for (i = 0; i < rx_size; i++) {
364 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1);
367 skb_reserve(skb, NET_IP_ALIGN);
368 priv->rx_skb[i] = skb;
371 /* Allocate TX, RX descriptors */
372 priv->tx_ring = dma_alloc_coherent(ndev->dev.parent,
373 sizeof(struct rtsn_ext_desc) * (tx_size + 1),
374 &priv->tx_desc_dma, GFP_KERNEL);
375 priv->rx_ring = dma_alloc_coherent(ndev->dev.parent,
376 sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1),
377 &priv->rx_desc_dma, GFP_KERNEL);
379 if (!priv->tx_ring || !priv->rx_ring)
384 rtsn_chain_free(priv);
389 static void rtsn_chain_format(struct rtsn_private *priv)
391 struct net_device *ndev = priv->ndev;
392 struct rtsn_ext_ts_desc *rx_desc;
393 struct rtsn_ext_desc *tx_desc;
394 struct rtsn_desc *bat_desc;
404 memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring);
405 for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++)
406 tx_desc->die_dt = DT_EEMPTY | D_DIE;
408 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
409 tx_desc->die_dt = DT_LINK;
411 bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX];
412 bat_desc->die_dt = DT_LINK;
413 bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma);
416 memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring);
417 for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) {
418 dma_addr = dma_map_single(ndev->dev.parent,
419 priv->rx_skb[i]->data, PKT_BUF_SZ,
421 if (!dma_mapping_error(ndev->dev.parent, dma_addr))
422 rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ);
423 rx_desc->dptr = cpu_to_le32((u32)dma_addr);
424 rx_desc->die_dt = DT_FEMPTY | D_DIE;
426 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
427 rx_desc->die_dt = DT_LINK;
429 bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX];
430 bat_desc->die_dt = DT_LINK;
431 bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma);
434 static int rtsn_dmac_init(struct rtsn_private *priv)
438 ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE);
442 rtsn_chain_format(priv);
447 static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv)
449 return (rtsn_read(priv, OSR) & OSR_OPS) >> 1;
452 static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode)
456 /* Need to busy loop as mode changes can happen in atomic context. */
457 for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) {
458 if (rtsn_read_mode(priv) == mode)
461 udelay(RTSN_INTERVAL_US);
467 static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode)
471 rtsn_write(priv, OCR, mode);
472 ret = rtsn_wait_mode(priv, mode);
474 netdev_err(priv->ndev, "Failed to switch operation mode\n");
478 static int rtsn_get_data_irq_status(struct rtsn_private *priv)
482 val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX);
483 val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX);
488 static irqreturn_t rtsn_irq(int irq, void *dev_id)
490 struct rtsn_private *priv = dev_id;
493 spin_lock(&priv->lock);
495 if (rtsn_get_data_irq_status(priv)) {
496 /* Clear TX/RX irq status */
497 rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX));
498 rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX));
500 if (napi_schedule_prep(&priv->napi)) {
501 /* Disable TX/RX interrupts */
502 rtsn_ctrl_data_irq(priv, false);
504 __napi_schedule(&priv->napi);
510 spin_unlock(&priv->lock);
515 static int rtsn_request_irq(unsigned int irq, irq_handler_t handler,
516 unsigned long flags, struct rtsn_private *priv,
522 name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s",
523 priv->ndev->name, ch);
527 ret = request_irq(irq, handler, flags, name, priv);
529 netdev_err(priv->ndev, "Cannot request IRQ %s\n", name);
534 static void rtsn_free_irqs(struct rtsn_private *priv)
536 free_irq(priv->tx_data_irq, priv);
537 free_irq(priv->rx_data_irq, priv);
540 static int rtsn_request_irqs(struct rtsn_private *priv)
544 priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx");
545 if (priv->rx_data_irq < 0)
546 return priv->rx_data_irq;
548 priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx");
549 if (priv->tx_data_irq < 0)
550 return priv->tx_data_irq;
552 ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx");
556 ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx");
558 free_irq(priv->tx_data_irq, priv);
565 static int rtsn_reset(struct rtsn_private *priv)
567 reset_control_reset(priv->reset);
570 return rtsn_wait_mode(priv, OCR_OPC_DISABLE);
573 static int rtsn_axibmi_init(struct rtsn_private *priv)
577 ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE);
582 rtsn_write(priv, AXIWC, AXIWC_DEFAULT);
585 rtsn_write(priv, AXIRC, AXIRC_DEFAULT);
587 /* TX Descriptor chain setting */
588 rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX));
589 rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET);
590 rtsn_write(priv, TATLR, TATLR_TATL);
592 ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0);
596 /* RX Descriptor chain setting */
597 rtsn_write(priv, RATLS0,
598 RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX));
599 rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET);
600 rtsn_write(priv, RATLR, RATLR_RATL);
602 ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0);
606 /* Enable TX/RX interrupts */
607 rtsn_ctrl_data_irq(priv, true);
612 static void rtsn_mhd_init(struct rtsn_private *priv)
614 /* TX General setting */
615 rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM);
616 rtsn_write(priv, TMS0, TMS_MFS_MAX);
619 rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX));
620 rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX));
623 static int rtsn_get_phy_params(struct rtsn_private *priv)
627 ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface);
631 switch (priv->iface) {
632 case PHY_INTERFACE_MODE_MII:
635 case PHY_INTERFACE_MODE_RGMII:
636 case PHY_INTERFACE_MODE_RGMII_ID:
637 case PHY_INTERFACE_MODE_RGMII_RXID:
638 case PHY_INTERFACE_MODE_RGMII_TXID:
648 static void rtsn_set_phy_interface(struct rtsn_private *priv)
652 switch (priv->iface) {
653 case PHY_INTERFACE_MODE_MII:
656 case PHY_INTERFACE_MODE_RGMII:
657 case PHY_INTERFACE_MODE_RGMII_ID:
658 case PHY_INTERFACE_MODE_RGMII_RXID:
659 case PHY_INTERFACE_MODE_RGMII_TXID:
666 rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val);
669 static void rtsn_set_rate(struct rtsn_private *priv)
673 switch (priv->speed) {
687 rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val);
690 static int rtsn_rmac_init(struct rtsn_private *priv)
692 const u8 *mac_addr = priv->ndev->dev_addr;
695 /* Set MAC address */
696 rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]);
697 rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) |
698 (mac_addr[4] << 8) | mac_addr[5]);
701 rtsn_set_phy_interface(priv);
705 rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
706 MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
708 /* Link verification */
709 rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV);
710 ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0);
717 static int rtsn_hw_init(struct rtsn_private *priv)
721 ret = rtsn_reset(priv);
725 /* Change to CONFIG mode */
726 ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
730 ret = rtsn_axibmi_init(priv);
736 ret = rtsn_rmac_init(priv);
740 ret = rtsn_change_mode(priv, OCR_OPC_DISABLE);
744 /* Change to OPERATION mode */
745 ret = rtsn_change_mode(priv, OCR_OPC_OPERATION);
750 static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad,
753 struct rtsn_private *priv = bus->priv;
757 val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME;
760 val |= MPSM_PSMAD | MPSM_PRD_SET(data);
762 rtsn_write(priv, MPSM, val);
764 ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0);
769 ret = MPSM_PRD_GET(rtsn_read(priv, MPSM));
774 static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum)
776 return rtsn_mii_access(bus, true, addr, regnum, 0);
779 static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val)
781 return rtsn_mii_access(bus, false, addr, regnum, val);
784 static int rtsn_mdio_alloc(struct rtsn_private *priv)
786 struct platform_device *pdev = priv->pdev;
787 struct device *dev = &pdev->dev;
788 struct device_node *mdio_node;
792 mii = mdiobus_alloc();
796 mdio_node = of_get_child_by_name(dev->of_node, "mdio");
802 /* Enter config mode before registering the MDIO bus */
803 ret = rtsn_reset(priv);
807 ret = rtsn_change_mode(priv, OCR_OPC_CONFIG);
811 rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
812 MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT);
814 /* Register the MDIO bus */
815 mii->name = "rtsn_mii";
816 snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x",
817 pdev->name, pdev->id);
819 mii->read = rtsn_mii_read;
820 mii->write = rtsn_mii_write;
823 ret = of_mdiobus_register(mii, mdio_node);
824 of_node_put(mdio_node);
837 static void rtsn_mdio_free(struct rtsn_private *priv)
839 mdiobus_unregister(priv->mii);
840 mdiobus_free(priv->mii);
844 static void rtsn_adjust_link(struct net_device *ndev)
846 struct rtsn_private *priv = netdev_priv(ndev);
847 struct phy_device *phydev = ndev->phydev;
848 bool new_state = false;
851 spin_lock_irqsave(&priv->lock, flags);
854 if (phydev->speed != priv->speed) {
856 priv->speed = phydev->speed;
861 priv->link = phydev->link;
863 } else if (priv->link) {
870 /* Need to transition to CONFIG mode before reconfiguring and
871 * then back to the original mode. Any state change to/from
872 * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx.
874 enum rtsn_mode orgmode = rtsn_read_mode(priv);
876 /* Transit to CONFIG */
877 if (orgmode != OCR_OPC_CONFIG) {
878 if (orgmode != OCR_OPC_DISABLE &&
879 rtsn_change_mode(priv, OCR_OPC_DISABLE))
881 if (rtsn_change_mode(priv, OCR_OPC_CONFIG))
887 /* Transition to original mode */
888 if (orgmode != OCR_OPC_CONFIG) {
889 if (rtsn_change_mode(priv, OCR_OPC_DISABLE))
891 if (orgmode != OCR_OPC_DISABLE &&
892 rtsn_change_mode(priv, orgmode))
897 spin_unlock_irqrestore(&priv->lock, flags);
900 phy_print_status(phydev);
903 static int rtsn_phy_init(struct rtsn_private *priv)
905 struct device_node *np = priv->ndev->dev.parent->of_node;
906 struct phy_device *phydev;
907 struct device_node *phy;
911 phy = of_parse_phandle(np, "phy-handle", 0);
915 phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0,
921 /* Only support full-duplex mode */
922 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
923 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
924 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
926 phy_attached_info(phydev);
931 static void rtsn_phy_deinit(struct rtsn_private *priv)
933 phy_disconnect(priv->ndev->phydev);
934 priv->ndev->phydev = NULL;
937 static int rtsn_init(struct rtsn_private *priv)
941 ret = rtsn_desc_alloc(priv);
945 ret = rtsn_dmac_init(priv);
947 goto error_free_desc;
949 ret = rtsn_hw_init(priv);
951 goto error_free_chain;
953 ret = rtsn_phy_init(priv);
955 goto error_free_chain;
957 ret = rtsn_request_irqs(priv);
963 rtsn_phy_deinit(priv);
965 rtsn_chain_free(priv);
967 rtsn_desc_free(priv);
971 static void rtsn_deinit(struct rtsn_private *priv)
973 rtsn_free_irqs(priv);
974 rtsn_phy_deinit(priv);
975 rtsn_chain_free(priv);
976 rtsn_desc_free(priv);
979 static void rtsn_parse_mac_address(struct device_node *np,
980 struct net_device *ndev)
982 struct rtsn_private *priv = netdev_priv(ndev);
987 /* Try to read address from Device Tree. */
988 if (!of_get_mac_address(np, addr)) {
989 eth_hw_addr_set(ndev, addr);
993 /* Try to read address from device. */
994 mrmac0 = rtsn_read(priv, MRMAC0);
995 mrmac1 = rtsn_read(priv, MRMAC1);
997 addr[0] = (mrmac0 >> 8) & 0xff;
998 addr[1] = (mrmac0 >> 0) & 0xff;
999 addr[2] = (mrmac1 >> 24) & 0xff;
1000 addr[3] = (mrmac1 >> 16) & 0xff;
1001 addr[4] = (mrmac1 >> 8) & 0xff;
1002 addr[5] = (mrmac1 >> 0) & 0xff;
1004 if (is_valid_ether_addr(addr)) {
1005 eth_hw_addr_set(ndev, addr);
1009 /* Fallback to a random address */
1010 eth_hw_addr_random(ndev);
1013 static int rtsn_open(struct net_device *ndev)
1015 struct rtsn_private *priv = netdev_priv(ndev);
1018 napi_enable(&priv->napi);
1020 ret = rtsn_init(priv);
1022 napi_disable(&priv->napi);
1026 phy_start(ndev->phydev);
1028 netif_start_queue(ndev);
1033 static int rtsn_stop(struct net_device *ndev)
1035 struct rtsn_private *priv = netdev_priv(ndev);
1037 phy_stop(priv->ndev->phydev);
1038 napi_disable(&priv->napi);
1039 rtsn_change_mode(priv, OCR_OPC_DISABLE);
1045 static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1047 struct rtsn_private *priv = netdev_priv(ndev);
1048 struct rtsn_ext_desc *desc;
1049 int ret = NETDEV_TX_OK;
1050 unsigned long flags;
1051 dma_addr_t dma_addr;
1054 spin_lock_irqsave(&priv->lock, flags);
1056 /* Drop packet if it won't fit in a single descriptor. */
1057 if (skb->len >= TX_DS) {
1058 priv->stats.tx_dropped++;
1059 priv->stats.tx_errors++;
1060 dev_kfree_skb_any(skb);
1064 if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) {
1065 netif_stop_subqueue(ndev, 0);
1066 ret = NETDEV_TX_BUSY;
1070 if (skb_put_padto(skb, ETH_ZLEN))
1073 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len,
1075 if (dma_mapping_error(ndev->dev.parent, dma_addr)) {
1076 dev_kfree_skb_any(skb);
1080 entry = priv->cur_tx % priv->num_tx_ring;
1081 priv->tx_skb[entry] = skb;
1082 desc = &priv->tx_ring[entry];
1083 desc->dptr = cpu_to_le32(dma_addr);
1084 desc->info_ds = cpu_to_le16(skb->len);
1085 desc->info1 = cpu_to_le64(skb->len);
1087 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
1088 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1090 desc->info_ds |= cpu_to_le16(TXC);
1091 desc->info = priv->ts_tag;
1094 skb_tx_timestamp(skb);
1097 desc->die_dt = DT_FSINGLE | D_DIE;
1101 rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX));
1103 spin_unlock_irqrestore(&priv->lock, flags);
1107 static void rtsn_get_stats64(struct net_device *ndev,
1108 struct rtnl_link_stats64 *storage)
1110 struct rtsn_private *priv = netdev_priv(ndev);
1111 *storage = priv->stats;
1114 static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
1116 if (!netif_running(ndev))
1119 return phy_do_ioctl_running(ndev, ifr, cmd);
1122 static int rtsn_hwtstamp_get(struct net_device *ndev,
1123 struct kernel_hwtstamp_config *config)
1125 struct rcar_gen4_ptp_private *ptp_priv;
1126 struct rtsn_private *priv;
1128 if (!netif_running(ndev))
1131 priv = netdev_priv(ndev);
1132 ptp_priv = priv->ptp_priv;
1137 ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
1139 switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) {
1140 case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT:
1141 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
1143 case RCAR_GEN4_RXTSTAMP_TYPE_ALL:
1144 config->rx_filter = HWTSTAMP_FILTER_ALL;
1147 config->rx_filter = HWTSTAMP_FILTER_NONE;
1154 static int rtsn_hwtstamp_set(struct net_device *ndev,
1155 struct kernel_hwtstamp_config *config,
1156 struct netlink_ext_ack *extack)
1158 struct rcar_gen4_ptp_private *ptp_priv;
1159 struct rtsn_private *priv;
1163 if (!netif_running(ndev))
1166 priv = netdev_priv(ndev);
1167 ptp_priv = priv->ptp_priv;
1172 switch (config->tx_type) {
1173 case HWTSTAMP_TX_OFF:
1176 case HWTSTAMP_TX_ON:
1177 tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED;
1183 switch (config->rx_filter) {
1184 case HWTSTAMP_FILTER_NONE:
1187 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1188 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
1189 RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT;
1192 config->rx_filter = HWTSTAMP_FILTER_ALL;
1193 tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED |
1194 RCAR_GEN4_RXTSTAMP_TYPE_ALL;
1198 ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl;
1199 ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl;
1204 static const struct net_device_ops rtsn_netdev_ops = {
1205 .ndo_open = rtsn_open,
1206 .ndo_stop = rtsn_stop,
1207 .ndo_start_xmit = rtsn_start_xmit,
1208 .ndo_get_stats64 = rtsn_get_stats64,
1209 .ndo_eth_ioctl = rtsn_do_ioctl,
1210 .ndo_validate_addr = eth_validate_addr,
1211 .ndo_set_mac_address = eth_mac_addr,
1212 .ndo_hwtstamp_set = rtsn_hwtstamp_set,
1213 .ndo_hwtstamp_get = rtsn_hwtstamp_get,
1216 static int rtsn_get_ts_info(struct net_device *ndev,
1217 struct kernel_ethtool_ts_info *info)
1219 struct rtsn_private *priv = netdev_priv(ndev);
1221 info->phc_index = ptp_clock_index(priv->ptp_priv->clock);
1222 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
1223 SOF_TIMESTAMPING_TX_HARDWARE |
1224 SOF_TIMESTAMPING_RX_HARDWARE |
1225 SOF_TIMESTAMPING_RAW_HARDWARE;
1226 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
1227 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
1232 static const struct ethtool_ops rtsn_ethtool_ops = {
1233 .nway_reset = phy_ethtool_nway_reset,
1234 .get_link = ethtool_op_get_link,
1235 .get_ts_info = rtsn_get_ts_info,
1236 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1237 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1240 static const struct of_device_id rtsn_match_table[] = {
1241 { .compatible = "renesas,r8a779g0-ethertsn", },
1245 MODULE_DEVICE_TABLE(of, rtsn_match_table);
1247 static int rtsn_probe(struct platform_device *pdev)
1249 struct rtsn_private *priv;
1250 struct net_device *ndev;
1251 struct resource *res;
1254 ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS,
1259 priv = netdev_priv(ndev);
1262 priv->ptp_priv = rcar_gen4_ptp_alloc(pdev);
1264 spin_lock_init(&priv->lock);
1265 platform_set_drvdata(pdev, priv);
1267 priv->clk = devm_clk_get(&pdev->dev, NULL);
1268 if (IS_ERR(priv->clk)) {
1269 ret = PTR_ERR(priv->clk);
1273 priv->reset = devm_reset_control_get(&pdev->dev, NULL);
1274 if (IS_ERR(priv->reset)) {
1275 ret = PTR_ERR(priv->reset);
1279 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes");
1281 dev_err(&pdev->dev, "Can't find tsnes resource\n");
1286 priv->base = devm_ioremap_resource(&pdev->dev, res);
1287 if (IS_ERR(priv->base)) {
1288 ret = PTR_ERR(priv->base);
1292 SET_NETDEV_DEV(ndev, &pdev->dev);
1294 ndev->features = NETIF_F_RXCSUM;
1295 ndev->hw_features = NETIF_F_RXCSUM;
1296 ndev->base_addr = res->start;
1297 ndev->netdev_ops = &rtsn_netdev_ops;
1298 ndev->ethtool_ops = &rtsn_ethtool_ops;
1300 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp");
1302 dev_err(&pdev->dev, "Can't find gptp resource\n");
1307 priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res);
1308 if (IS_ERR(priv->ptp_priv->addr)) {
1309 ret = PTR_ERR(priv->ptp_priv->addr);
1313 ret = rtsn_get_phy_params(priv);
1317 pm_runtime_enable(&pdev->dev);
1318 pm_runtime_get_sync(&pdev->dev);
1320 netif_napi_add(ndev, &priv->napi, rtsn_poll);
1322 rtsn_parse_mac_address(pdev->dev.of_node, ndev);
1324 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1326 device_set_wakeup_capable(&pdev->dev, 1);
1328 ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT,
1329 clk_get_rate(priv->clk));
1333 ret = rtsn_mdio_alloc(priv);
1337 ret = register_netdev(ndev);
1341 netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr);
1346 rtsn_mdio_free(priv);
1348 rcar_gen4_ptp_unregister(priv->ptp_priv);
1350 netif_napi_del(&priv->napi);
1351 rtsn_change_mode(priv, OCR_OPC_DISABLE);
1352 pm_runtime_put_sync(&pdev->dev);
1353 pm_runtime_disable(&pdev->dev);
1360 static void rtsn_remove(struct platform_device *pdev)
1362 struct rtsn_private *priv = platform_get_drvdata(pdev);
1364 unregister_netdev(priv->ndev);
1365 rtsn_mdio_free(priv);
1366 rcar_gen4_ptp_unregister(priv->ptp_priv);
1367 rtsn_change_mode(priv, OCR_OPC_DISABLE);
1368 netif_napi_del(&priv->napi);
1370 pm_runtime_put_sync(&pdev->dev);
1371 pm_runtime_disable(&pdev->dev);
1373 free_netdev(priv->ndev);
1376 static struct platform_driver rtsn_driver = {
1377 .probe = rtsn_probe,
1378 .remove = rtsn_remove,
1381 .of_match_table = rtsn_match_table,
1384 module_platform_driver(rtsn_driver);
1386 MODULE_AUTHOR("Phong Hoang, Niklas Söderlund");
1387 MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver");
1388 MODULE_LICENSE("GPL");