1 // SPDX-License-Identifier: GPL-2.0+
4 * This file is driver for Renesas Ethernet AVB.
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
8 * Based on the SuperH Ethernet driver.
18 #include <asm/cache.h>
19 #include <linux/bitops.h>
20 #include <linux/delay.h>
21 #include <linux/mii.h>
24 #include <asm/global_data.h>
28 #define RAVB_REG_CCC 0x000
29 #define RAVB_REG_DBAT 0x004
30 #define RAVB_REG_CSR 0x00C
31 #define RAVB_REG_APSR 0x08C
32 #define RAVB_REG_RCR 0x090
33 #define RAVB_REG_TGC 0x300
34 #define RAVB_REG_TCCR 0x304
35 #define RAVB_REG_RIC0 0x360
36 #define RAVB_REG_RIC1 0x368
37 #define RAVB_REG_RIC2 0x370
38 #define RAVB_REG_TIC 0x378
39 #define RAVB_REG_ECMR 0x500
40 #define RAVB_REG_RFLR 0x508
41 #define RAVB_REG_ECSIPR 0x518
42 #define RAVB_REG_PIR 0x520
43 #define RAVB_REG_GECMR 0x5b0
44 #define RAVB_REG_MAHR 0x5c0
45 #define RAVB_REG_MALR 0x5c8
47 #define CCC_OPC_CONFIG BIT(0)
48 #define CCC_OPC_OPERATION BIT(1)
49 #define CCC_BOC BIT(20)
51 #define CSR_OPS 0x0000000F
52 #define CSR_OPS_CONFIG BIT(1)
54 #define APSR_RDM BIT(13)
55 #define APSR_TDM BIT(14)
57 #define TCCR_TSRQ0 BIT(0)
59 #define RFLR_RFL_MIN 0x05EE
61 #define PIR_MDI BIT(3)
62 #define PIR_MDO BIT(2)
63 #define PIR_MMD BIT(1)
64 #define PIR_MDC BIT(0)
66 #define ECMR_TRCCM BIT(26)
67 #define ECMR_RZPF BIT(20)
68 #define ECMR_PFR BIT(18)
69 #define ECMR_RXF BIT(17)
70 #define ECMR_RE BIT(6)
71 #define ECMR_TE BIT(5)
72 #define ECMR_DM BIT(1)
73 #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
76 #define RAVB_NUM_BASE_DESC 16
77 #define RAVB_NUM_TX_DESC 8
78 #define RAVB_NUM_RX_DESC 8
80 #define RAVB_TX_QUEUE_OFFSET 0
81 #define RAVB_RX_QUEUE_OFFSET 4
83 #define RAVB_DESC_DT(n) ((n) << 28)
84 #define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
85 #define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
86 #define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
87 #define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
88 #define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
89 #define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
91 #define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
92 #define RAVB_DESC_DS_MASK 0xfff
94 #define RAVB_RX_DESC_MSC_MC BIT(23)
95 #define RAVB_RX_DESC_MSC_CEEF BIT(22)
96 #define RAVB_RX_DESC_MSC_CRL BIT(21)
97 #define RAVB_RX_DESC_MSC_FRE BIT(20)
98 #define RAVB_RX_DESC_MSC_RTLF BIT(19)
99 #define RAVB_RX_DESC_MSC_RTSF BIT(18)
100 #define RAVB_RX_DESC_MSC_RFE BIT(17)
101 #define RAVB_RX_DESC_MSC_CRC BIT(16)
102 #define RAVB_RX_DESC_MSC_MASK (0xff << 16)
104 #define RAVB_RX_DESC_MSC_RX_ERR_MASK \
105 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
106 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
108 #define RAVB_TX_TIMEOUT_MS 1000
116 struct ravb_desc data;
117 struct ravb_desc link;
119 u8 packet[PKTSIZE_ALIGN];
123 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
124 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
125 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
129 struct phy_device *phydev;
131 void __iomem *iobase;
132 struct clk_bulk clks;
135 static inline void ravb_flush_dcache(u32 addr, u32 len)
137 flush_dcache_range(addr, addr + len);
140 static inline void ravb_invalidate_dcache(u32 addr, u32 len)
142 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
143 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
144 invalidate_dcache_range(start, end);
147 static int ravb_send(struct udevice *dev, void *packet, int len)
149 struct ravb_priv *eth = dev_get_priv(dev);
150 struct ravb_desc *desc = ð->tx_desc[eth->tx_desc_idx];
153 /* Update TX descriptor */
154 ravb_flush_dcache((uintptr_t)packet, len);
155 memset(desc, 0x0, sizeof(*desc));
156 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
157 desc->dptr = (uintptr_t)packet;
158 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
160 /* Restart the transmitter if disabled */
161 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
162 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
164 /* Wait until packet is transmitted */
165 start = get_timer(0);
166 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
167 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
168 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
173 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
176 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
180 static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
182 struct ravb_priv *eth = dev_get_priv(dev);
183 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx];
187 /* Check if the rx descriptor is ready */
188 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
189 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
192 /* Check for errors */
193 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
194 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
198 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
199 packet = (u8 *)(uintptr_t)desc->data.dptr;
200 ravb_invalidate_dcache((uintptr_t)packet, len);
206 static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
208 struct ravb_priv *eth = dev_get_priv(dev);
209 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx];
211 /* Make current descriptor available again */
212 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
213 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
215 /* Point to the next descriptor */
216 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
217 desc = ð->rx_desc[eth->rx_desc_idx];
218 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
223 static int ravb_reset(struct udevice *dev)
225 struct ravb_priv *eth = dev_get_priv(dev);
227 /* Set config mode */
228 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
230 /* Check the operating mode is changed to the config mode. */
231 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
232 CSR_OPS_CONFIG, true, 100, true);
235 static void ravb_base_desc_init(struct ravb_priv *eth)
237 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
240 /* Initialize all descriptors */
241 memset(eth->base_desc, 0x0, desc_size);
243 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
244 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
246 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
248 /* Register the descriptor base address table */
249 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
252 static void ravb_tx_desc_init(struct ravb_priv *eth)
254 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
257 /* Initialize all descriptors */
258 memset(eth->tx_desc, 0x0, desc_size);
259 eth->tx_desc_idx = 0;
261 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
262 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
264 /* Mark the end of the descriptors */
265 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
266 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
267 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
269 /* Point the controller to the TX descriptor list. */
270 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
271 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
272 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_TX_QUEUE_OFFSET],
273 sizeof(struct ravb_desc));
276 static void ravb_rx_desc_init(struct ravb_priv *eth)
278 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
281 /* Initialize all descriptors */
282 memset(eth->rx_desc, 0x0, desc_size);
283 eth->rx_desc_idx = 0;
285 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
286 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
287 RAVB_DESC_DS(PKTSIZE_ALIGN);
288 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
290 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
291 eth->rx_desc[i].link.dptr = (uintptr_t)ð->rx_desc[i + 1];
294 /* Mark the end of the descriptors */
295 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
296 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
297 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
299 /* Point the controller to the rx descriptor list */
300 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
301 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
302 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_RX_QUEUE_OFFSET],
303 sizeof(struct ravb_desc));
306 static int ravb_phy_config(struct udevice *dev)
308 struct ravb_priv *eth = dev_get_priv(dev);
309 struct eth_pdata *pdata = dev_get_plat(dev);
310 struct phy_device *phydev;
313 phydev = phy_connect(eth->bus, -1, dev, pdata->phy_interface);
317 eth->phydev = phydev;
319 phydev->supported &= SUPPORTED_100baseT_Full |
320 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
321 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
322 SUPPORTED_Asym_Pause;
324 if (pdata->max_speed != 1000) {
325 phydev->supported &= ~SUPPORTED_1000baseT_Full;
326 reg = phy_read(phydev, -1, MII_CTRL1000);
327 reg &= ~(BIT(9) | BIT(8));
328 phy_write(phydev, -1, MII_CTRL1000, reg);
336 /* Set Mac address */
337 static int ravb_write_hwaddr(struct udevice *dev)
339 struct ravb_priv *eth = dev_get_priv(dev);
340 struct eth_pdata *pdata = dev_get_plat(dev);
341 unsigned char *mac = pdata->enetaddr;
343 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
344 eth->iobase + RAVB_REG_MAHR);
346 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
351 /* E-MAC init function */
352 static int ravb_mac_init(struct ravb_priv *eth)
354 /* Disable MAC Interrupt */
355 writel(0, eth->iobase + RAVB_REG_ECSIPR);
357 /* Recv frame limit set register */
358 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
363 /* AVB-DMAC init function */
364 static int ravb_dmac_init(struct udevice *dev)
366 struct ravb_priv *eth = dev_get_priv(dev);
367 struct eth_pdata *pdata = dev_get_plat(dev);
371 bool explicit_delay = false;
373 /* Set CONFIG mode */
374 ret = ravb_reset(dev);
378 /* Disable all interrupts */
379 writel(0, eth->iobase + RAVB_REG_RIC0);
380 writel(0, eth->iobase + RAVB_REG_RIC1);
381 writel(0, eth->iobase + RAVB_REG_RIC2);
382 writel(0, eth->iobase + RAVB_REG_TIC);
384 /* Set little endian */
385 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
388 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
391 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
393 /* Delay CLK: 2ns (not applicable on R-Car E3/D3) */
394 if ((renesas_get_cpu_type() == RENESAS_CPU_TYPE_R8A77990) ||
395 (renesas_get_cpu_type() == RENESAS_CPU_TYPE_R8A77995))
398 if (!dev_read_u32(dev, "rx-internal-delay-ps", &delay)) {
399 /* Valid values are 0 and 1800, according to DT bindings */
402 explicit_delay = true;
406 if (!dev_read_u32(dev, "tx-internal-delay-ps", &delay)) {
407 /* Valid values are 0 and 2000, according to DT bindings */
410 explicit_delay = true;
414 if (!explicit_delay) {
415 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
416 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
419 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
420 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
424 writel(mode, eth->iobase + RAVB_REG_APSR);
429 static int ravb_config(struct udevice *dev)
431 struct ravb_priv *eth = dev_get_priv(dev);
432 struct phy_device *phy = eth->phydev;
433 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
436 /* Configure AVB-DMAC register */
439 /* Configure E-MAC registers */
441 ravb_write_hwaddr(dev);
443 ret = phy_startup(phy);
447 /* Set the transfer speed */
448 if (phy->speed == 100)
449 writel(0, eth->iobase + RAVB_REG_GECMR);
450 else if (phy->speed == 1000)
451 writel(1, eth->iobase + RAVB_REG_GECMR);
453 /* Check if full duplex mode is supported by the phy */
457 writel(mask, eth->iobase + RAVB_REG_ECMR);
462 static int ravb_start(struct udevice *dev)
464 struct ravb_priv *eth = dev_get_priv(dev);
467 ret = ravb_reset(dev);
471 ravb_base_desc_init(eth);
472 ravb_tx_desc_init(eth);
473 ravb_rx_desc_init(eth);
475 ret = ravb_config(dev);
479 /* Setting the control will start the AVB-DMAC process. */
480 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
485 static void ravb_stop(struct udevice *dev)
487 struct ravb_priv *eth = dev_get_priv(dev);
489 phy_shutdown(eth->phydev);
493 static int ravb_probe(struct udevice *dev)
495 struct eth_pdata *pdata = dev_get_plat(dev);
496 struct ravb_priv *eth = dev_get_priv(dev);
497 struct mii_dev *mdiodev;
498 void __iomem *iobase;
501 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
502 eth->iobase = iobase;
504 ret = clk_get_bulk(dev, ð->clks);
508 mdiodev = mdio_alloc();
514 mdiodev->read = bb_miiphy_read;
515 mdiodev->write = bb_miiphy_write;
516 bb_miiphy_buses[0].priv = eth;
517 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
519 ret = mdio_register(mdiodev);
521 goto err_mdio_register;
523 eth->bus = miiphy_get_dev_by_name(dev->name);
526 ret = clk_enable_bulk(ð->clks);
528 goto err_mdio_register;
530 ret = ravb_reset(dev);
534 ret = ravb_phy_config(dev);
541 clk_release_bulk(ð->clks);
545 unmap_physmem(eth->iobase, MAP_NOCACHE);
549 static int ravb_remove(struct udevice *dev)
551 struct ravb_priv *eth = dev_get_priv(dev);
553 clk_release_bulk(ð->clks);
556 mdio_unregister(eth->bus);
558 unmap_physmem(eth->iobase, MAP_NOCACHE);
563 int ravb_bb_init(struct bb_miiphy_bus *bus)
568 int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
570 struct ravb_priv *eth = bus->priv;
572 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
577 int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
579 struct ravb_priv *eth = bus->priv;
581 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
586 int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
588 struct ravb_priv *eth = bus->priv;
591 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
593 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
598 int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
600 struct ravb_priv *eth = bus->priv;
602 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
607 int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
609 struct ravb_priv *eth = bus->priv;
612 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
614 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
619 int ravb_bb_delay(struct bb_miiphy_bus *bus)
626 struct bb_miiphy_bus bb_miiphy_buses[] = {
629 .init = ravb_bb_init,
630 .mdio_active = ravb_bb_mdio_active,
631 .mdio_tristate = ravb_bb_mdio_tristate,
632 .set_mdio = ravb_bb_set_mdio,
633 .get_mdio = ravb_bb_get_mdio,
634 .set_mdc = ravb_bb_set_mdc,
635 .delay = ravb_bb_delay,
638 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
640 static const struct eth_ops ravb_ops = {
644 .free_pkt = ravb_free_pkt,
646 .write_hwaddr = ravb_write_hwaddr,
649 int ravb_of_to_plat(struct udevice *dev)
651 struct eth_pdata *pdata = dev_get_plat(dev);
654 pdata->iobase = dev_read_addr(dev);
656 pdata->phy_interface = dev_read_phy_mode(dev);
657 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
660 pdata->max_speed = 1000;
661 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
663 pdata->max_speed = fdt32_to_cpu(*cell);
665 sprintf(bb_miiphy_buses[0].name, dev->name);
670 static const struct udevice_id ravb_ids[] = {
671 { .compatible = "renesas,etheravb-rcar-gen3" },
672 { .compatible = "renesas,etheravb-rcar-gen4" },
676 U_BOOT_DRIVER(eth_ravb) = {
679 .of_match = ravb_ids,
680 .of_to_plat = ravb_of_to_plat,
682 .remove = ravb_remove,
684 .priv_auto = sizeof(struct ravb_priv),
685 .plat_auto = sizeof(struct eth_pdata),
686 .flags = DM_FLAG_ALLOC_PRIV_DMA,