1 // SPDX-License-Identifier: GPL-2.0+
4 * This file is driver for Renesas Ethernet AVB.
6 * Copyright (C) 2015-2017 Renesas Electronics Corporation
8 * Based on the SuperH Ethernet driver.
19 #include <asm/cache.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <linux/mii.h>
25 #include <asm/global_data.h>
29 #define RAVB_REG_CCC 0x000
30 #define RAVB_REG_DBAT 0x004
31 #define RAVB_REG_CSR 0x00C
32 #define RAVB_REG_APSR 0x08C
33 #define RAVB_REG_RCR 0x090
34 #define RAVB_REG_TGC 0x300
35 #define RAVB_REG_TCCR 0x304
36 #define RAVB_REG_RIC0 0x360
37 #define RAVB_REG_RIC1 0x368
38 #define RAVB_REG_RIC2 0x370
39 #define RAVB_REG_TIC 0x378
40 #define RAVB_REG_ECMR 0x500
41 #define RAVB_REG_RFLR 0x508
42 #define RAVB_REG_ECSIPR 0x518
43 #define RAVB_REG_PIR 0x520
44 #define RAVB_REG_GECMR 0x5b0
45 #define RAVB_REG_MAHR 0x5c0
46 #define RAVB_REG_MALR 0x5c8
48 #define CCC_OPC_CONFIG BIT(0)
49 #define CCC_OPC_OPERATION BIT(1)
50 #define CCC_BOC BIT(20)
52 #define CSR_OPS 0x0000000F
53 #define CSR_OPS_CONFIG BIT(1)
55 #define APSR_RDM BIT(13)
56 #define APSR_TDM BIT(14)
58 #define TCCR_TSRQ0 BIT(0)
60 #define RFLR_RFL_MIN 0x05EE
62 #define PIR_MDI BIT(3)
63 #define PIR_MDO BIT(2)
64 #define PIR_MMD BIT(1)
65 #define PIR_MDC BIT(0)
67 #define ECMR_TRCCM BIT(26)
68 #define ECMR_RZPF BIT(20)
69 #define ECMR_PFR BIT(18)
70 #define ECMR_RXF BIT(17)
71 #define ECMR_RE BIT(6)
72 #define ECMR_TE BIT(5)
73 #define ECMR_DM BIT(1)
74 #define ECMR_CHG_DM (ECMR_TRCCM | ECMR_RZPF | ECMR_PFR | ECMR_RXF)
77 #define RAVB_NUM_BASE_DESC 16
78 #define RAVB_NUM_TX_DESC 8
79 #define RAVB_NUM_RX_DESC 8
81 #define RAVB_TX_QUEUE_OFFSET 0
82 #define RAVB_RX_QUEUE_OFFSET 4
84 #define RAVB_DESC_DT(n) ((n) << 28)
85 #define RAVB_DESC_DT_FSINGLE RAVB_DESC_DT(0x7)
86 #define RAVB_DESC_DT_LINKFIX RAVB_DESC_DT(0x9)
87 #define RAVB_DESC_DT_EOS RAVB_DESC_DT(0xa)
88 #define RAVB_DESC_DT_FEMPTY RAVB_DESC_DT(0xc)
89 #define RAVB_DESC_DT_EEMPTY RAVB_DESC_DT(0x3)
90 #define RAVB_DESC_DT_MASK RAVB_DESC_DT(0xf)
92 #define RAVB_DESC_DS(n) (((n) & 0xfff) << 0)
93 #define RAVB_DESC_DS_MASK 0xfff
95 #define RAVB_RX_DESC_MSC_MC BIT(23)
96 #define RAVB_RX_DESC_MSC_CEEF BIT(22)
97 #define RAVB_RX_DESC_MSC_CRL BIT(21)
98 #define RAVB_RX_DESC_MSC_FRE BIT(20)
99 #define RAVB_RX_DESC_MSC_RTLF BIT(19)
100 #define RAVB_RX_DESC_MSC_RTSF BIT(18)
101 #define RAVB_RX_DESC_MSC_RFE BIT(17)
102 #define RAVB_RX_DESC_MSC_CRC BIT(16)
103 #define RAVB_RX_DESC_MSC_MASK (0xff << 16)
105 #define RAVB_RX_DESC_MSC_RX_ERR_MASK \
106 (RAVB_RX_DESC_MSC_CRC | RAVB_RX_DESC_MSC_RFE | RAVB_RX_DESC_MSC_RTLF | \
107 RAVB_RX_DESC_MSC_RTSF | RAVB_RX_DESC_MSC_CEEF)
109 #define RAVB_TX_TIMEOUT_MS 1000
117 struct ravb_desc data;
118 struct ravb_desc link;
120 u8 packet[PKTSIZE_ALIGN];
124 struct ravb_desc base_desc[RAVB_NUM_BASE_DESC];
125 struct ravb_desc tx_desc[RAVB_NUM_TX_DESC];
126 struct ravb_rxdesc rx_desc[RAVB_NUM_RX_DESC];
130 struct phy_device *phydev;
132 void __iomem *iobase;
133 struct clk_bulk clks;
134 struct gpio_desc reset_gpio;
137 static inline void ravb_flush_dcache(u32 addr, u32 len)
139 flush_dcache_range(addr, addr + len);
142 static inline void ravb_invalidate_dcache(u32 addr, u32 len)
144 u32 start = addr & ~((uintptr_t)ARCH_DMA_MINALIGN - 1);
145 u32 end = roundup(addr + len, ARCH_DMA_MINALIGN);
146 invalidate_dcache_range(start, end);
149 static int ravb_send(struct udevice *dev, void *packet, int len)
151 struct ravb_priv *eth = dev_get_priv(dev);
152 struct ravb_desc *desc = ð->tx_desc[eth->tx_desc_idx];
155 /* Update TX descriptor */
156 ravb_flush_dcache((uintptr_t)packet, len);
157 memset(desc, 0x0, sizeof(*desc));
158 desc->ctrl = RAVB_DESC_DT_FSINGLE | RAVB_DESC_DS(len);
159 desc->dptr = (uintptr_t)packet;
160 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
162 /* Restart the transmitter if disabled */
163 if (!(readl(eth->iobase + RAVB_REG_TCCR) & TCCR_TSRQ0))
164 setbits_le32(eth->iobase + RAVB_REG_TCCR, TCCR_TSRQ0);
166 /* Wait until packet is transmitted */
167 start = get_timer(0);
168 while (get_timer(start) < RAVB_TX_TIMEOUT_MS) {
169 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
170 if ((desc->ctrl & RAVB_DESC_DT_MASK) != RAVB_DESC_DT_FSINGLE)
175 if (get_timer(start) >= RAVB_TX_TIMEOUT_MS)
178 eth->tx_desc_idx = (eth->tx_desc_idx + 1) % (RAVB_NUM_TX_DESC - 1);
182 static int ravb_recv(struct udevice *dev, int flags, uchar **packetp)
184 struct ravb_priv *eth = dev_get_priv(dev);
185 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx];
189 /* Check if the rx descriptor is ready */
190 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
191 if ((desc->data.ctrl & RAVB_DESC_DT_MASK) == RAVB_DESC_DT_FEMPTY)
194 /* Check for errors */
195 if (desc->data.ctrl & RAVB_RX_DESC_MSC_RX_ERR_MASK) {
196 desc->data.ctrl &= ~RAVB_RX_DESC_MSC_MASK;
200 len = desc->data.ctrl & RAVB_DESC_DS_MASK;
201 packet = (u8 *)(uintptr_t)desc->data.dptr;
202 ravb_invalidate_dcache((uintptr_t)packet, len);
208 static int ravb_free_pkt(struct udevice *dev, uchar *packet, int length)
210 struct ravb_priv *eth = dev_get_priv(dev);
211 struct ravb_rxdesc *desc = ð->rx_desc[eth->rx_desc_idx];
213 /* Make current descriptor available again */
214 desc->data.ctrl = RAVB_DESC_DT_FEMPTY | RAVB_DESC_DS(PKTSIZE_ALIGN);
215 ravb_flush_dcache((uintptr_t)desc, sizeof(*desc));
217 /* Point to the next descriptor */
218 eth->rx_desc_idx = (eth->rx_desc_idx + 1) % RAVB_NUM_RX_DESC;
219 desc = ð->rx_desc[eth->rx_desc_idx];
220 ravb_invalidate_dcache((uintptr_t)desc, sizeof(*desc));
225 static int ravb_reset(struct udevice *dev)
227 struct ravb_priv *eth = dev_get_priv(dev);
229 /* Set config mode */
230 writel(CCC_OPC_CONFIG, eth->iobase + RAVB_REG_CCC);
232 /* Check the operating mode is changed to the config mode. */
233 return wait_for_bit_le32(eth->iobase + RAVB_REG_CSR,
234 CSR_OPS_CONFIG, true, 100, true);
237 static void ravb_base_desc_init(struct ravb_priv *eth)
239 const u32 desc_size = RAVB_NUM_BASE_DESC * sizeof(struct ravb_desc);
242 /* Initialize all descriptors */
243 memset(eth->base_desc, 0x0, desc_size);
245 for (i = 0; i < RAVB_NUM_BASE_DESC; i++)
246 eth->base_desc[i].ctrl = RAVB_DESC_DT_EOS;
248 ravb_flush_dcache((uintptr_t)eth->base_desc, desc_size);
250 /* Register the descriptor base address table */
251 writel((uintptr_t)eth->base_desc, eth->iobase + RAVB_REG_DBAT);
254 static void ravb_tx_desc_init(struct ravb_priv *eth)
256 const u32 desc_size = RAVB_NUM_TX_DESC * sizeof(struct ravb_desc);
259 /* Initialize all descriptors */
260 memset(eth->tx_desc, 0x0, desc_size);
261 eth->tx_desc_idx = 0;
263 for (i = 0; i < RAVB_NUM_TX_DESC; i++)
264 eth->tx_desc[i].ctrl = RAVB_DESC_DT_EEMPTY;
266 /* Mark the end of the descriptors */
267 eth->tx_desc[RAVB_NUM_TX_DESC - 1].ctrl = RAVB_DESC_DT_LINKFIX;
268 eth->tx_desc[RAVB_NUM_TX_DESC - 1].dptr = (uintptr_t)eth->tx_desc;
269 ravb_flush_dcache((uintptr_t)eth->tx_desc, desc_size);
271 /* Point the controller to the TX descriptor list. */
272 eth->base_desc[RAVB_TX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
273 eth->base_desc[RAVB_TX_QUEUE_OFFSET].dptr = (uintptr_t)eth->tx_desc;
274 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_TX_QUEUE_OFFSET],
275 sizeof(struct ravb_desc));
278 static void ravb_rx_desc_init(struct ravb_priv *eth)
280 const u32 desc_size = RAVB_NUM_RX_DESC * sizeof(struct ravb_rxdesc);
283 /* Initialize all descriptors */
284 memset(eth->rx_desc, 0x0, desc_size);
285 eth->rx_desc_idx = 0;
287 for (i = 0; i < RAVB_NUM_RX_DESC; i++) {
288 eth->rx_desc[i].data.ctrl = RAVB_DESC_DT_EEMPTY |
289 RAVB_DESC_DS(PKTSIZE_ALIGN);
290 eth->rx_desc[i].data.dptr = (uintptr_t)eth->rx_desc[i].packet;
292 eth->rx_desc[i].link.ctrl = RAVB_DESC_DT_LINKFIX;
293 eth->rx_desc[i].link.dptr = (uintptr_t)ð->rx_desc[i + 1];
296 /* Mark the end of the descriptors */
297 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.ctrl = RAVB_DESC_DT_LINKFIX;
298 eth->rx_desc[RAVB_NUM_RX_DESC - 1].link.dptr = (uintptr_t)eth->rx_desc;
299 ravb_flush_dcache((uintptr_t)eth->rx_desc, desc_size);
301 /* Point the controller to the rx descriptor list */
302 eth->base_desc[RAVB_RX_QUEUE_OFFSET].ctrl = RAVB_DESC_DT_LINKFIX;
303 eth->base_desc[RAVB_RX_QUEUE_OFFSET].dptr = (uintptr_t)eth->rx_desc;
304 ravb_flush_dcache((uintptr_t)ð->base_desc[RAVB_RX_QUEUE_OFFSET],
305 sizeof(struct ravb_desc));
308 static int ravb_phy_config(struct udevice *dev)
310 struct ravb_priv *eth = dev_get_priv(dev);
311 struct eth_pdata *pdata = dev_get_plat(dev);
312 struct phy_device *phydev;
313 int mask = 0xffffffff, reg;
315 if (dm_gpio_is_valid(ð->reset_gpio)) {
316 dm_gpio_set_value(ð->reset_gpio, 1);
318 dm_gpio_set_value(ð->reset_gpio, 0);
322 phydev = phy_find_by_mask(eth->bus, mask, pdata->phy_interface);
326 phy_connect_dev(phydev, dev);
328 eth->phydev = phydev;
330 phydev->supported &= SUPPORTED_100baseT_Full |
331 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
332 SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_Pause |
333 SUPPORTED_Asym_Pause;
335 if (pdata->max_speed != 1000) {
336 phydev->supported &= ~SUPPORTED_1000baseT_Full;
337 reg = phy_read(phydev, -1, MII_CTRL1000);
338 reg &= ~(BIT(9) | BIT(8));
339 phy_write(phydev, -1, MII_CTRL1000, reg);
347 /* Set Mac address */
348 static int ravb_write_hwaddr(struct udevice *dev)
350 struct ravb_priv *eth = dev_get_priv(dev);
351 struct eth_pdata *pdata = dev_get_plat(dev);
352 unsigned char *mac = pdata->enetaddr;
354 writel((mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3],
355 eth->iobase + RAVB_REG_MAHR);
357 writel((mac[4] << 8) | mac[5], eth->iobase + RAVB_REG_MALR);
362 /* E-MAC init function */
363 static int ravb_mac_init(struct ravb_priv *eth)
365 /* Disable MAC Interrupt */
366 writel(0, eth->iobase + RAVB_REG_ECSIPR);
368 /* Recv frame limit set register */
369 writel(RFLR_RFL_MIN, eth->iobase + RAVB_REG_RFLR);
374 /* AVB-DMAC init function */
375 static int ravb_dmac_init(struct udevice *dev)
377 struct ravb_priv *eth = dev_get_priv(dev);
378 struct eth_pdata *pdata = dev_get_plat(dev);
382 bool explicit_delay = false;
384 /* Set CONFIG mode */
385 ret = ravb_reset(dev);
389 /* Disable all interrupts */
390 writel(0, eth->iobase + RAVB_REG_RIC0);
391 writel(0, eth->iobase + RAVB_REG_RIC1);
392 writel(0, eth->iobase + RAVB_REG_RIC2);
393 writel(0, eth->iobase + RAVB_REG_TIC);
395 /* Set little endian */
396 clrbits_le32(eth->iobase + RAVB_REG_CCC, CCC_BOC);
399 writel(0x18000001, eth->iobase + RAVB_REG_RCR);
402 writel(0x00222210, eth->iobase + RAVB_REG_TGC);
404 /* Delay CLK: 2ns (not applicable on R-Car E3/D3) */
405 if ((rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77990) ||
406 (rmobile_get_cpu_type() == RMOBILE_CPU_TYPE_R8A77995))
409 if (!dev_read_u32(dev, "rx-internal-delay-ps", &delay)) {
410 /* Valid values are 0 and 1800, according to DT bindings */
413 explicit_delay = true;
417 if (!dev_read_u32(dev, "tx-internal-delay-ps", &delay)) {
418 /* Valid values are 0 and 2000, according to DT bindings */
421 explicit_delay = true;
425 if (!explicit_delay) {
426 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
427 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
430 if (pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
431 pdata->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
435 writel(mode, eth->iobase + RAVB_REG_APSR);
440 static int ravb_config(struct udevice *dev)
442 struct ravb_priv *eth = dev_get_priv(dev);
443 struct phy_device *phy = eth->phydev;
444 u32 mask = ECMR_CHG_DM | ECMR_RE | ECMR_TE;
447 /* Configure AVB-DMAC register */
450 /* Configure E-MAC registers */
452 ravb_write_hwaddr(dev);
454 ret = phy_startup(phy);
458 /* Set the transfer speed */
459 if (phy->speed == 100)
460 writel(0, eth->iobase + RAVB_REG_GECMR);
461 else if (phy->speed == 1000)
462 writel(1, eth->iobase + RAVB_REG_GECMR);
464 /* Check if full duplex mode is supported by the phy */
468 writel(mask, eth->iobase + RAVB_REG_ECMR);
473 static int ravb_start(struct udevice *dev)
475 struct ravb_priv *eth = dev_get_priv(dev);
478 ret = ravb_reset(dev);
482 ravb_base_desc_init(eth);
483 ravb_tx_desc_init(eth);
484 ravb_rx_desc_init(eth);
486 ret = ravb_config(dev);
490 /* Setting the control will start the AVB-DMAC process. */
491 writel(CCC_OPC_OPERATION, eth->iobase + RAVB_REG_CCC);
496 static void ravb_stop(struct udevice *dev)
498 struct ravb_priv *eth = dev_get_priv(dev);
500 phy_shutdown(eth->phydev);
504 static int ravb_probe(struct udevice *dev)
506 struct eth_pdata *pdata = dev_get_plat(dev);
507 struct ravb_priv *eth = dev_get_priv(dev);
508 struct ofnode_phandle_args phandle_args;
509 struct mii_dev *mdiodev;
510 void __iomem *iobase;
513 iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
514 eth->iobase = iobase;
516 ret = clk_get_bulk(dev, ð->clks);
520 ret = dev_read_phandle_with_args(dev, "phy-handle", NULL, 0, 0, &phandle_args);
522 gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
523 ð->reset_gpio, GPIOD_IS_OUT);
526 if (!dm_gpio_is_valid(ð->reset_gpio)) {
527 gpio_request_by_name(dev, "reset-gpios", 0, ð->reset_gpio,
531 mdiodev = mdio_alloc();
537 mdiodev->read = bb_miiphy_read;
538 mdiodev->write = bb_miiphy_write;
539 bb_miiphy_buses[0].priv = eth;
540 snprintf(mdiodev->name, sizeof(mdiodev->name), dev->name);
542 ret = mdio_register(mdiodev);
544 goto err_mdio_register;
546 eth->bus = miiphy_get_dev_by_name(dev->name);
549 ret = clk_enable_bulk(ð->clks);
551 goto err_mdio_register;
553 ret = ravb_reset(dev);
557 ret = ravb_phy_config(dev);
564 clk_release_bulk(ð->clks);
568 unmap_physmem(eth->iobase, MAP_NOCACHE);
572 static int ravb_remove(struct udevice *dev)
574 struct ravb_priv *eth = dev_get_priv(dev);
576 clk_release_bulk(ð->clks);
579 mdio_unregister(eth->bus);
581 if (dm_gpio_is_valid(ð->reset_gpio))
582 dm_gpio_free(dev, ð->reset_gpio);
583 unmap_physmem(eth->iobase, MAP_NOCACHE);
588 int ravb_bb_init(struct bb_miiphy_bus *bus)
593 int ravb_bb_mdio_active(struct bb_miiphy_bus *bus)
595 struct ravb_priv *eth = bus->priv;
597 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
602 int ravb_bb_mdio_tristate(struct bb_miiphy_bus *bus)
604 struct ravb_priv *eth = bus->priv;
606 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MMD);
611 int ravb_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
613 struct ravb_priv *eth = bus->priv;
616 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
618 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDO);
623 int ravb_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
625 struct ravb_priv *eth = bus->priv;
627 *v = (readl(eth->iobase + RAVB_REG_PIR) & PIR_MDI) >> 3;
632 int ravb_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
634 struct ravb_priv *eth = bus->priv;
637 setbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
639 clrbits_le32(eth->iobase + RAVB_REG_PIR, PIR_MDC);
644 int ravb_bb_delay(struct bb_miiphy_bus *bus)
651 struct bb_miiphy_bus bb_miiphy_buses[] = {
654 .init = ravb_bb_init,
655 .mdio_active = ravb_bb_mdio_active,
656 .mdio_tristate = ravb_bb_mdio_tristate,
657 .set_mdio = ravb_bb_set_mdio,
658 .get_mdio = ravb_bb_get_mdio,
659 .set_mdc = ravb_bb_set_mdc,
660 .delay = ravb_bb_delay,
663 int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);
665 static const struct eth_ops ravb_ops = {
669 .free_pkt = ravb_free_pkt,
671 .write_hwaddr = ravb_write_hwaddr,
674 int ravb_of_to_plat(struct udevice *dev)
676 struct eth_pdata *pdata = dev_get_plat(dev);
679 pdata->iobase = dev_read_addr(dev);
681 pdata->phy_interface = dev_read_phy_mode(dev);
682 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
685 pdata->max_speed = 1000;
686 cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
688 pdata->max_speed = fdt32_to_cpu(*cell);
690 sprintf(bb_miiphy_buses[0].name, dev->name);
695 static const struct udevice_id ravb_ids[] = {
696 { .compatible = "renesas,etheravb-r8a7795" },
697 { .compatible = "renesas,etheravb-r8a7796" },
698 { .compatible = "renesas,etheravb-r8a77965" },
699 { .compatible = "renesas,etheravb-r8a77970" },
700 { .compatible = "renesas,etheravb-r8a77990" },
701 { .compatible = "renesas,etheravb-r8a77995" },
702 { .compatible = "renesas,etheravb-rcar-gen3" },
706 U_BOOT_DRIVER(eth_ravb) = {
709 .of_match = ravb_ids,
710 .of_to_plat = ravb_of_to_plat,
712 .remove = ravb_remove,
714 .priv_auto = sizeof(struct ravb_priv),
715 .plat_auto = sizeof(struct eth_pdata),
716 .flags = DM_FLAG_ALLOC_PRIV_DMA,