1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright (C) 2020 Cortina Access Inc.
7 * Ethernet MAC Driver for all supported CAxxxx SoCs
15 #include <linux/delay.h>
16 #include <linux/bitops.h>
17 #include <u-boot/crc.h>
20 #include "cortina_ni.h"
22 #define HEADER_A_SIZE 8
39 static struct udevice *curr_dev;
41 static u32 *ca_rdwrptr_adv_one(u32 *x, unsigned long base, unsigned long max)
43 if (x + 1 >= (u32 *)max)
49 static void ca_reg_read(void *reg, u64 base, u64 offset)
51 u32 *val = (u32 *)reg;
53 *val = readl(KSEG1_ATU_XLAT(base + offset));
56 static void ca_reg_write(void *reg, u64 base, u64 offset)
58 u32 val = *(u32 *)reg;
60 writel(val, KSEG1_ATU_XLAT(base + offset));
63 static int ca_mdio_write_rgmii(u32 addr, u32 offset, u16 data)
65 /* up to 10000 cycles*/
66 u32 loop_wait = __MDIO_ACCESS_TIMEOUT;
67 struct PER_MDIO_ADDR_t mdio_addr;
68 struct PER_MDIO_CTRL_t mdio_ctrl;
69 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
71 memset(&mdio_addr, 0, sizeof(mdio_addr));
72 mdio_addr.mdio_addr = addr;
73 mdio_addr.mdio_offset = offset;
74 mdio_addr.mdio_rd_wr = __MDIO_WR_FLAG;
75 ca_reg_write(&mdio_addr, (u64)priv->per_mdio_base_addr,
76 PER_MDIO_ADDR_OFFSET);
77 ca_reg_write(&data, (u64)priv->per_mdio_base_addr,
78 PER_MDIO_WRDATA_OFFSET);
80 memset(&mdio_ctrl, 0, sizeof(mdio_ctrl));
81 mdio_ctrl.mdiostart = 1;
82 ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
83 PER_MDIO_CTRL_OFFSET);
85 debug("%s: phy_addr=%d, offset=%d, data=0x%x\n",
86 __func__, addr, offset, data);
89 ca_reg_read(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
90 PER_MDIO_CTRL_OFFSET);
91 if (mdio_ctrl.mdiodone) {
92 ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
93 PER_MDIO_CTRL_OFFSET);
96 } while (--loop_wait);
98 printf("CA NI %s: PHY write timeout!!!\n", __func__);
102 int ca_mdio_write(u32 addr, u32 offset, u16 data)
104 u32 reg_addr, reg_val;
105 struct NI_MDIO_OPER_T mdio_oper;
107 /* support range: 1~31*/
108 if (addr < CA_MDIO_ADDR_MIN || addr > CA_MDIO_ADDR_MAX)
111 /* the phy addr 5 is connect to RGMII */
113 return ca_mdio_write_rgmii(addr, offset, data);
115 memset(&mdio_oper, 0, sizeof(mdio_oper));
116 mdio_oper.reg_off = offset;
117 mdio_oper.phy_addr = addr;
118 mdio_oper.reg_base = CA_NI_MDIO_REG_BASE;
120 memcpy(®_addr, &mdio_oper, sizeof(reg_addr));
121 ca_reg_write(®_val, (u64)reg_addr, 0);
126 static int ca_mdio_read_rgmii(u32 addr, u32 offset, u16 *data)
128 u32 loop_wait = __MDIO_ACCESS_TIMEOUT;
129 struct PER_MDIO_ADDR_t mdio_addr;
130 struct PER_MDIO_CTRL_t mdio_ctrl;
131 struct PER_MDIO_RDDATA_t read_data;
132 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
134 memset(&mdio_addr, 0, sizeof(mdio_addr));
135 mdio_addr.mdio_addr = addr;
136 mdio_addr.mdio_offset = offset;
137 mdio_addr.mdio_rd_wr = __MDIO_RD_FLAG;
138 ca_reg_write(&mdio_addr, (u64)priv->per_mdio_base_addr,
139 PER_MDIO_ADDR_OFFSET);
141 memset(&mdio_ctrl, 0, sizeof(mdio_ctrl));
142 mdio_ctrl.mdiostart = 1;
143 ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
144 PER_MDIO_CTRL_OFFSET);
147 ca_reg_read(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
148 PER_MDIO_CTRL_OFFSET);
149 if (mdio_ctrl.mdiodone) {
150 ca_reg_write(&mdio_ctrl, (u64)priv->per_mdio_base_addr,
151 PER_MDIO_CTRL_OFFSET);
152 ca_reg_read(&read_data, (u64)priv->per_mdio_base_addr,
153 PER_MDIO_RDDATA_OFFSET);
154 *data = read_data.mdio_rddata;
157 } while (--loop_wait);
159 printf("CA NI %s: TIMEOUT!!\n", __func__);
163 int ca_mdio_read(u32 addr, u32 offset, u16 *data)
165 u32 reg_addr, reg_val;
166 struct NI_MDIO_OPER_T mdio_oper;
171 /* support range: 1~31*/
172 if (addr < CA_MDIO_ADDR_MIN || addr > CA_MDIO_ADDR_MAX)
175 /* the phy addr 5 is connect to RGMII */
177 return ca_mdio_read_rgmii(addr, offset, data);
179 memset(&mdio_oper, 0, sizeof(mdio_oper));
180 mdio_oper.reg_off = offset;
181 mdio_oper.phy_addr = addr;
182 mdio_oper.reg_base = CA_NI_MDIO_REG_BASE;
184 memcpy(®_addr, &mdio_oper, sizeof(reg_addr));
185 ca_reg_read(®_val, (u64)reg_addr, 0);
190 int ca_miiphy_read(const char *devname, u8 addr, u8 reg, u16 *value)
192 return ca_mdio_read(addr, reg, value);
195 int ca_miiphy_write(const char *devname, u8 addr, u8 reg, u16 value)
197 return ca_mdio_write(addr, reg, value);
200 static int cortina_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
204 ca_mdio_read(addr, reg, &data);
208 static int cortina_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
211 return ca_mdio_write(addr, reg, val);
214 static void ca_ni_setup_mac_addr(void)
217 struct NI_HV_GLB_MAC_ADDR_CFG0_t mac_addr_cfg0;
218 struct NI_HV_GLB_MAC_ADDR_CFG1_t mac_addr_cfg1;
219 struct NI_HV_PT_PORT_STATIC_CFG_t port_static_cfg;
220 struct NI_HV_XRAM_CPUXRAM_CFG_t cpuxram_cfg;
221 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
223 /* parsing ethaddr and set to NI registers. */
224 if (eth_env_get_enetaddr("ethaddr", mac)) {
225 /* The complete MAC address consists of
226 * {MAC_ADDR0_mac_addr0[0-3], MAC_ADDR1_mac_addr1[4],
227 * PT_PORT_STATIC_CFG_mac_addr6[5]}.
229 mac_addr_cfg0.mac_addr0 = (mac[0] << 24) + (mac[1] << 16) +
230 (mac[2] << 8) + mac[3];
231 ca_reg_write(&mac_addr_cfg0, (u64)priv->ni_hv_base_addr,
232 NI_HV_GLB_MAC_ADDR_CFG0_OFFSET);
234 memset(&mac_addr_cfg1, 0, sizeof(mac_addr_cfg1));
235 mac_addr_cfg1.mac_addr1 = mac[4];
236 ca_reg_write(&mac_addr_cfg1, (u64)priv->ni_hv_base_addr,
237 NI_HV_GLB_MAC_ADDR_CFG1_OFFSET);
239 ca_reg_read(&port_static_cfg, (u64)priv->ni_hv_base_addr,
240 NI_HV_PT_PORT_STATIC_CFG_OFFSET +
241 (APB0_NI_HV_PT_STRIDE * priv->active_port));
243 port_static_cfg.mac_addr6 = mac[5];
244 ca_reg_write(&port_static_cfg, (u64)priv->ni_hv_base_addr,
245 NI_HV_PT_PORT_STATIC_CFG_OFFSET +
246 (APB0_NI_HV_PT_STRIDE * priv->active_port));
248 /* received only Broadcast and Address matched packets */
249 ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
250 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
251 cpuxram_cfg.xram_mgmt_promisc_mode = 0;
252 cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
253 cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
254 ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
255 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
257 /* received all packets(promiscuous mode) */
258 ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
259 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
260 cpuxram_cfg.xram_mgmt_promisc_mode = 3;
261 cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
262 cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
263 ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
264 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
268 static void ca_ni_enable_tx_rx(void)
270 struct NI_HV_PT_RXMAC_CFG_t rxmac_cfg;
271 struct NI_HV_PT_TXMAC_CFG_t txmac_cfg;
272 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
274 /* Enable TX and RX functions */
275 ca_reg_read(&rxmac_cfg, (u64)priv->ni_hv_base_addr,
276 NI_HV_PT_RXMAC_CFG_OFFSET +
277 (APB0_NI_HV_PT_STRIDE * priv->active_port));
279 ca_reg_write(&rxmac_cfg, (u64)priv->ni_hv_base_addr,
280 NI_HV_PT_RXMAC_CFG_OFFSET +
281 (APB0_NI_HV_PT_STRIDE * priv->active_port));
283 ca_reg_read(&txmac_cfg, (u64)priv->ni_hv_base_addr,
284 NI_HV_PT_TXMAC_CFG_OFFSET +
285 (APB0_NI_HV_PT_STRIDE * priv->active_port));
287 ca_reg_write(&txmac_cfg, (u64)priv->ni_hv_base_addr,
288 NI_HV_PT_TXMAC_CFG_OFFSET +
289 (APB0_NI_HV_PT_STRIDE * priv->active_port));
292 #define AUTO_SCAN_TIMEOUT 3000 /* 3 seconds */
293 static int ca_ni_auto_scan_active_port(struct cortina_ni_priv *priv)
299 start_time = get_timer(0);
300 while (get_timer(start_time) < AUTO_SCAN_TIMEOUT) {
301 for (i = 0; i < priv->valid_port_num; i++) {
302 if (!priv->port_map[i].phy_addr)
305 ca_mdio_read(priv->port_map[i].phy_addr, 1, &data);
307 priv->active_port = priv->port_map[i].port;
313 printf("CA NI %s: auto scan active_port timeout.\n", __func__);
317 static void ca_ni_led(int port, int status)
320 struct udevice *led_dev;
322 if (IS_ENABLED(CONFIG_LED_CORTINA)) {
323 snprintf(label, sizeof(label), "led%d", port);
324 debug("%s: set port %d led %s.\n",
325 __func__, port, status ? "on" : "off");
326 led_get_by_label(label, &led_dev);
327 led_set_state(led_dev, status);
331 static void ca_ni_reset(void)
334 struct NI_HV_GLB_INIT_DONE_t init_done;
335 struct NI_HV_GLB_INTF_RST_CONFIG_t intf_rst_config;
336 struct NI_HV_GLB_STATIC_CFG_t static_cfg;
337 struct GLOBAL_BLOCK_RESET_t glb_blk_reset;
338 struct cortina_ni_priv *priv = dev_get_priv(curr_dev);
340 /* NI global resets */
341 ca_reg_read(&glb_blk_reset, (u64)priv->glb_base_addr,
342 GLOBAL_BLOCK_RESET_OFFSET);
343 glb_blk_reset.reset_ni = 1;
344 ca_reg_write(&glb_blk_reset, (u64)priv->glb_base_addr,
345 GLOBAL_BLOCK_RESET_OFFSET);
347 glb_blk_reset.reset_ni = 0;
348 ca_reg_write(&glb_blk_reset, (u64)priv->glb_base_addr,
349 GLOBAL_BLOCK_RESET_OFFSET);
351 /* check the ready bit of NI module */
352 for (i = 0; i < NI_READ_POLL_COUNT; i++) {
353 ca_reg_read(&init_done, (u64)priv->ni_hv_base_addr,
354 NI_HV_GLB_INIT_DONE_OFFSET);
355 if (init_done.ni_init_done)
358 if (i == NI_READ_POLL_COUNT) {
359 printf("CA NI %s: NI init done not ready, init_done=0x%x!!!\n",
360 __func__, init_done.ni_init_done);
363 ca_reg_read(&intf_rst_config, (u64)priv->ni_hv_base_addr,
364 NI_HV_GLB_INTF_RST_CONFIG_OFFSET);
365 switch (priv->active_port) {
367 intf_rst_config.intf_rst_p0 = 0;
368 intf_rst_config.mac_rx_rst_p0 = 0;
369 intf_rst_config.mac_tx_rst_p0 = 0;
372 intf_rst_config.intf_rst_p1 = 0;
373 intf_rst_config.mac_rx_rst_p1 = 0;
374 intf_rst_config.mac_tx_rst_p1 = 0;
377 intf_rst_config.intf_rst_p2 = 0;
378 intf_rst_config.mac_rx_rst_p2 = 0;
379 intf_rst_config.mac_tx_rst_p2 = 0;
382 intf_rst_config.intf_rst_p3 = 0;
383 intf_rst_config.mac_tx_rst_p3 = 0;
384 intf_rst_config.mac_rx_rst_p3 = 0;
387 intf_rst_config.intf_rst_p4 = 0;
388 intf_rst_config.mac_tx_rst_p4 = 0;
389 intf_rst_config.mac_rx_rst_p4 = 0;
393 ca_reg_write(&intf_rst_config, (u64)priv->ni_hv_base_addr,
394 NI_HV_GLB_INTF_RST_CONFIG_OFFSET);
396 /* Only one GMAC can connect to CPU */
397 ca_reg_read(&static_cfg, (u64)priv->ni_hv_base_addr,
398 NI_HV_GLB_STATIC_CFG_OFFSET);
399 static_cfg.port_to_cpu = priv->active_port;
400 static_cfg.txmib_mode = 1;
401 static_cfg.rxmib_mode = 1;
403 ca_reg_write(&static_cfg, (u64)priv->ni_hv_base_addr,
404 NI_HV_GLB_STATIC_CFG_OFFSET);
407 static void ca_internal_gphy_cal(struct cortina_ni_priv *priv)
412 num = priv->gphy_num;
413 for (port = 0; port < 4; port++) {
414 for (i = 0; i < num; i++) {
415 reg_off = priv->gphy_values[i].reg_off + (port * 0x80);
416 value = priv->gphy_values[i].value;
417 ca_reg_write(&value, reg_off, 0);
423 static int ca_mdio_register(struct udevice *dev)
426 struct cortina_ni_priv *priv = dev_get_priv(dev);
427 struct mii_dev *mdio_bus = mdio_alloc();
432 mdio_bus->read = cortina_mdio_read;
433 mdio_bus->write = cortina_mdio_write;
434 snprintf(mdio_bus->name, sizeof(mdio_bus->name), dev->name);
436 mdio_bus->priv = (void *)priv;
438 ret = mdio_register(mdio_bus);
442 priv->mdio_bus = mdio_bus;
446 static void ca_rgmii_init(struct cortina_ni_priv *priv)
448 struct GLOBAL_GLOBAL_CONFIG_t glb_config;
449 struct GLOBAL_IO_DRIVE_CONTROL_t io_drive_control;
451 /* Generating 25Mhz reference clock for switch */
452 ca_reg_read(&glb_config, (u64)priv->glb_base_addr,
453 GLOBAL_GLOBAL_CONFIG_OFFSET);
454 glb_config.refclk_sel = 0x01;
455 glb_config.ext_reset = 0x01;
456 ca_reg_write(&glb_config, (u64)priv->glb_base_addr,
457 GLOBAL_GLOBAL_CONFIG_OFFSET);
461 /* Do external reset */
462 ca_reg_read(&glb_config, (u64)priv->glb_base_addr,
463 GLOBAL_GLOBAL_CONFIG_OFFSET);
464 glb_config.ext_reset = 0x0;
465 ca_reg_write(&glb_config, (u64)priv->glb_base_addr,
466 GLOBAL_GLOBAL_CONFIG_OFFSET);
468 ca_reg_read(&io_drive_control, (u64)priv->glb_base_addr,
469 GLOBAL_IO_DRIVE_CONTROL_OFFSET);
470 io_drive_control.gmac_mode = 2;
471 io_drive_control.gmac_dn = 1;
472 io_drive_control.gmac_dp = 1;
473 ca_reg_write(&io_drive_control, (u64)priv->glb_base_addr,
474 GLOBAL_IO_DRIVE_CONTROL_OFFSET);
477 static int ca_phy_probe(struct udevice *dev)
479 int auto_scan_active_port = 0, tmp_port;
481 struct cortina_ni_priv *priv = dev_get_priv(dev);
482 struct phy_device *int_phydev, *ext_phydev;
484 /* Initialize internal phy device */
485 int_phydev = phy_connect(priv->mdio_bus,
486 priv->port_map[NI_PORT_3].phy_addr,
487 dev, priv->phy_interface);
489 int_phydev->supported &= PHY_GBIT_FEATURES;
490 int_phydev->advertising = int_phydev->supported;
491 phy_config(int_phydev);
493 printf("CA NI %s: There is no internal phy device\n", __func__);
496 /* Initialize external phy device */
497 ext_phydev = phy_connect(priv->mdio_bus,
498 priv->port_map[NI_PORT_4].phy_addr,
499 dev, priv->phy_interface);
501 ext_phydev->supported &= PHY_GBIT_FEATURES;
502 ext_phydev->advertising = int_phydev->supported;
503 phy_config(ext_phydev);
505 printf("CA NI %s: There is no external phy device\n", __func__);
508 /* auto scan the first link up port as active_port */
509 buf = env_get("auto_scan_active_port");
511 auto_scan_active_port = simple_strtoul(buf, NULL, 0);
512 printf("CA NI %s: auto_scan_active_port=%d\n", __func__,
513 auto_scan_active_port);
516 if (auto_scan_active_port) {
517 ca_ni_auto_scan_active_port(priv);
519 buf = env_get("active_port");
521 tmp_port = simple_strtoul(buf, NULL, 0);
523 !(priv->valid_port_map && BIT(tmp_port))) {
524 printf("CA NI ERROR: not support this port.");
530 priv->active_port = tmp_port;
534 printf("CA NI %s: active_port=%d\n", __func__, priv->active_port);
535 if (priv->active_port == NI_PORT_4)
536 priv->phydev = ext_phydev;
538 priv->phydev = int_phydev;
543 static int cortina_eth_start(struct udevice *dev)
546 struct NI_HV_XRAM_CPUXRAM_ADRCFG_RX_t cpuxram_adrcfg_rx;
547 struct NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_t cpuxram_adrcfg_tx;
548 struct NI_HV_XRAM_CPUXRAM_CFG_t cpuxram_cfg;
549 struct NI_HV_PT_PORT_STATIC_CFG_t port_static_cfg;
550 struct NI_HV_PT_PORT_GLB_CFG_t port_glb_cfg;
551 struct cortina_ni_priv *priv = dev_get_priv(dev);
552 struct phy_device *phydev = priv->phydev;
554 ret = phy_startup(priv->phydev);
556 ca_ni_led(priv->active_port, CA_LED_OFF);
557 printf("CA NI Could not initialize PHY %s, active_port=%d\n",
558 priv->phydev->dev->name, priv->active_port);
562 if (!priv->phydev->link) {
563 printf("CA NI %s: link down.\n", priv->phydev->dev->name);
567 ca_ni_led(priv->active_port, CA_LED_ON);
568 printf("CA NI PHY ID 0x%08X %dMbps %s duplex\n",
569 phydev->phy_id, phydev->speed,
570 phydev->duplex == DUPLEX_HALF ? "half" : "full");
572 /* RX XRAM ADDRESS CONFIG (start and end address) */
573 memset(&cpuxram_adrcfg_rx, 0, sizeof(cpuxram_adrcfg_rx));
574 cpuxram_adrcfg_rx.rx_top_addr = RX_TOP_ADDR;
575 cpuxram_adrcfg_rx.rx_base_addr = RX_BASE_ADDR;
576 ca_reg_write(&cpuxram_adrcfg_rx, (u64)priv->ni_hv_base_addr,
577 NI_HV_XRAM_CPUXRAM_ADRCFG_RX_OFFSET);
579 /* TX XRAM ADDRESS CONFIG (start and end address) */
580 memset(&cpuxram_adrcfg_tx, 0, sizeof(cpuxram_adrcfg_tx));
581 cpuxram_adrcfg_tx.tx_top_addr = TX_TOP_ADDR;
582 cpuxram_adrcfg_tx.tx_base_addr = TX_BASE_ADDR;
583 ca_reg_write(&cpuxram_adrcfg_tx, (u64)priv->ni_hv_base_addr,
584 NI_HV_XRAM_CPUXRAM_ADRCFG_TX_0_OFFSET);
587 * Configuration for Management Ethernet Interface:
588 * - RGMII 1000 mode or RGMII 100 mode
591 ca_reg_read(&port_static_cfg, (u64)priv->ni_hv_base_addr,
592 NI_HV_PT_PORT_STATIC_CFG_OFFSET +
593 (APB0_NI_HV_PT_STRIDE * priv->active_port));
594 if (phydev->speed == SPEED_1000) {
595 /* port 4 connects to RGMII PHY */
596 if (phydev->addr == 5)
597 port_static_cfg.int_cfg = GE_MAC_INTF_RGMII_1000;
599 port_static_cfg.int_cfg = GE_MAC_INTF_GMII;
601 /* port 4 connects to RGMII PHY */
602 if (phydev->addr == 5)
603 port_static_cfg.int_cfg = GE_MAC_INTF_RGMII_100;
605 port_static_cfg.int_cfg = GE_MAC_INTF_MII;
608 ca_reg_write(&port_static_cfg, (u64)priv->ni_hv_base_addr,
609 NI_HV_PT_PORT_STATIC_CFG_OFFSET +
610 (APB0_NI_HV_PT_STRIDE * priv->active_port));
612 ca_reg_read(&port_glb_cfg, (u64)priv->ni_hv_base_addr,
613 NI_HV_PT_PORT_GLB_CFG_OFFSET +
614 (APB0_NI_HV_PT_STRIDE * priv->active_port));
615 port_glb_cfg.speed = phydev->speed == SPEED_10 ? 1 : 0;
616 port_glb_cfg.duplex = phydev->duplex == DUPLEX_HALF ? 1 : 0;
617 ca_reg_write(&port_glb_cfg, (u64)priv->ni_hv_base_addr,
618 NI_HV_PT_PORT_GLB_CFG_OFFSET +
619 (APB0_NI_HV_PT_STRIDE * priv->active_port));
621 /* Need to toggle the tx and rx cpu_pkt_dis bit */
622 /* after changing Address config register. */
623 ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
624 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
625 cpuxram_cfg.rx_0_cpu_pkt_dis = 1;
626 cpuxram_cfg.tx_0_cpu_pkt_dis = 1;
627 ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
628 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
630 ca_reg_read(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
631 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
632 cpuxram_cfg.rx_0_cpu_pkt_dis = 0;
633 cpuxram_cfg.tx_0_cpu_pkt_dis = 0;
634 ca_reg_write(&cpuxram_cfg, (u64)priv->ni_hv_base_addr,
635 NI_HV_XRAM_CPUXRAM_CFG_OFFSET);
637 ca_ni_enable_tx_rx();
642 /*********************************************
643 * Packet receive routine from Management FE
644 * Expects a previously allocated buffer and
646 * Retruns 0 on success -1 on failure
647 *******************************************/
648 static int cortina_eth_recv(struct udevice *dev, int flags, uchar **packetp)
651 u32 next_link, pktlen = 0;
652 u32 sw_rx_rd_ptr, hw_rx_wr_ptr, *rx_xram_ptr, *data_ptr;
653 int loop, index = 0, blk_num;
654 struct cortina_ni_priv *priv = dev_get_priv(dev);
655 struct NI_HEADER_X_T header_x;
656 struct NI_PACKET_STATUS packet_status;
657 struct NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_t cpuxram_cpu_sta_rx;
658 struct NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_t cpuxram_cpu_cfg_rx;
660 /* get the hw write pointer */
661 memset(&cpuxram_cpu_sta_rx, 0, sizeof(cpuxram_cpu_sta_rx));
662 ca_reg_read(&cpuxram_cpu_sta_rx, (u64)priv->ni_hv_base_addr,
663 NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
664 hw_rx_wr_ptr = cpuxram_cpu_sta_rx.pkt_wr_ptr;
666 /* get the sw read pointer */
667 memset(&cpuxram_cpu_cfg_rx, 0, sizeof(cpuxram_cpu_cfg_rx));
668 ca_reg_read(&cpuxram_cpu_cfg_rx, (u64)priv->ni_hv_base_addr,
669 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
670 sw_rx_rd_ptr = cpuxram_cpu_cfg_rx.pkt_rd_ptr;
672 debug("%s: NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0 = 0x%p, ", __func__,
673 priv->ni_hv_base_addr + NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
674 debug("NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0 = 0x%p\n",
675 priv->ni_hv_base_addr + NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
676 debug("%s : RX hw_wr_ptr = %d, sw_rd_ptr = %d\n",
677 __func__, hw_rx_wr_ptr, sw_rx_rd_ptr);
679 while (sw_rx_rd_ptr != hw_rx_wr_ptr) {
680 /* Point to the absolute memory address of XRAM
681 * where read pointer is
683 rx_xram_ptr = (u32 *)
684 ((unsigned long)priv->ni_xram_base
687 /* Wrap around if required */
688 if (rx_xram_ptr >= (u32 *)(unsigned long)priv->rx_xram_end_adr)
689 rx_xram_ptr = (u32 *)
690 (unsigned long)priv->rx_xram_base_adr;
692 /* Checking header XR. Do not update the read pointer yet */
693 /* skip unused 32-bit in Header XR */
694 rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
695 priv->rx_xram_base_adr,
696 priv->rx_xram_end_adr);
698 memcpy(&header_x, rx_xram_ptr, sizeof(header_x));
699 next_link = header_x.next_link;
700 /* Header XR [31:0] */
702 if (*rx_xram_ptr == 0xffffffff)
703 printf("CA NI %s: XRAM Error !\n", __func__);
705 debug("%s : RX next link 0x%x\n", __func__, next_link);
706 debug("%s : bytes_valid %x\n", __func__, header_x.bytes_valid);
708 if (header_x.ownership == 0) {
709 /* point to Packet status [31:0] */
710 rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
711 priv->rx_xram_base_adr,
712 priv->rx_xram_end_adr);
714 memcpy(&packet_status, rx_xram_ptr,
715 sizeof(*rx_xram_ptr));
716 if (packet_status.valid == 0) {
717 debug("%s: Invalid Packet !!, ", __func__);
718 debug("next_link=%d\n", next_link);
720 /* Update the software read pointer */
721 ca_reg_write(&next_link,
722 (u64)priv->ni_hv_base_addr,
723 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
727 if (packet_status.drop ||
728 packet_status.runt ||
729 packet_status.oversize ||
730 packet_status.jabber ||
731 packet_status.crc_error ||
732 packet_status.jumbo) {
733 debug("%s: Error Packet!!, ", __func__);
734 debug("next_link=%d\n", next_link);
736 /* Update the software read pointer */
737 ca_reg_write(&next_link,
738 (u64)priv->ni_hv_base_addr,
739 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
743 /* check whether packet size is larger than 1514 */
744 if (packet_status.packet_size > 1518) {
745 debug("%s: Error Packet !! Packet size=%d, ",
746 __func__, packet_status.packet_size);
747 debug("larger than 1518, next_link=%d\n",
750 /* Update the software read pointer */
751 ca_reg_write(&next_link,
752 (u64)priv->ni_hv_base_addr,
753 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
757 rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
758 priv->rx_xram_base_adr,
759 priv->rx_xram_end_adr);
761 pktlen = packet_status.packet_size;
763 debug("%s : rx packet length = %d\n",
764 __func__, packet_status.packet_size);
766 rx_xram_ptr = ca_rdwrptr_adv_one(rx_xram_ptr,
767 priv->rx_xram_base_adr,
768 priv->rx_xram_end_adr);
770 data_ptr = (u32 *)net_rx_packets[index];
772 /* Read out the packet */
773 /* Data is in little endian form in the XRAM */
775 /* Send the packet to upper layer */
777 debug("%s: packet data[]=", __func__);
779 for (loop = 0; loop <= pktlen / 4; loop++) {
780 ptr = (u8 *)rx_xram_ptr;
782 debug("[0x%x]-[0x%x]-[0x%x]-[0x%x]",
783 ptr[0], ptr[1], ptr[2], ptr[3]);
784 *data_ptr++ = *rx_xram_ptr++;
785 /* Wrap around if required */
786 if (rx_xram_ptr >= (u32 *)
787 (unsigned long)priv->rx_xram_end_adr) {
788 rx_xram_ptr = (u32 *)(unsigned long)
789 (priv->rx_xram_base_adr);
794 net_process_received_packet(net_rx_packets[index],
796 if (++index >= PKTBUFSRX)
798 blk_num = net_rx_packets[index][0x2c] * 255 +
799 net_rx_packets[index][0x2d];
800 debug("%s: tftp block number=%d\n", __func__, blk_num);
802 /* Update the software read pointer */
803 ca_reg_write(&next_link,
804 (u64)priv->ni_hv_base_addr,
805 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
808 /* get the hw write pointer */
809 ca_reg_read(&cpuxram_cpu_sta_rx, (u64)priv->ni_hv_base_addr,
810 NI_HV_XRAM_CPUXRAM_CPU_STA_RX_0_OFFSET);
811 hw_rx_wr_ptr = cpuxram_cpu_sta_rx.pkt_wr_ptr;
813 /* get the sw read pointer */
814 ca_reg_read(&sw_rx_rd_ptr, (u64)priv->ni_hv_base_addr,
815 NI_HV_XRAM_CPUXRAM_CPU_CFG_RX_0_OFFSET);
820 static int cortina_eth_send(struct udevice *dev, void *packet, int length)
822 u32 hw_tx_rd_ptr = 0, sw_tx_wr_ptr = 0;
823 u32 loop, new_pkt_len, ca_crc32;
824 u32 *tx_xram_ptr, *data_ptr;
826 u8 *ptr, *pkt_buf_ptr, valid_bytes = 0;
828 static u8 pkt_buf[2048];
829 struct NI_HEADER_X_T hdr_xt;
830 struct NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_t cpuxram_cpu_cfg_tx;
831 struct cortina_ni_priv *priv = dev_get_priv(dev);
833 if (!packet || length > 2032)
836 /* Get the hardware read pointer */
837 ca_reg_read(&hw_tx_rd_ptr, (u64)priv->ni_hv_base_addr,
838 NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET);
840 /* Get the software write pointer */
841 ca_reg_read(&sw_tx_wr_ptr, (u64)priv->ni_hv_base_addr,
842 NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET);
844 debug("%s: NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0=0x%p, ",
846 KSEG1_ATU_XLAT(priv->ni_hv_base_addr +
847 NI_HV_XRAM_CPUXRAM_CPU_STAT_TX_0_OFFSET));
848 debug("NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0=0x%p\n",
849 KSEG1_ATU_XLAT(priv->ni_hv_base_addr +
850 NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET));
851 debug("%s : hw_tx_rd_ptr = %d\n", __func__, hw_tx_rd_ptr);
852 debug("%s : sw_tx_wr_ptr = %d\n", __func__, sw_tx_wr_ptr);
854 if (hw_tx_rd_ptr != sw_tx_wr_ptr) {
855 printf("CA NI %s: Tx FIFO is not available!\n", __func__);
859 /* a workaround on 2015/10/01
860 * the packet size+CRC should be 8-byte alignment
862 if (((length + 4) % 8) != 0)
863 length += (8 - ((length + 4) % 8));
865 memset(pkt_buf, 0x00, sizeof(pkt_buf));
867 /* add 8-byte header_A at the beginning of packet */
868 memcpy(&pkt_buf[HEADER_A_SIZE], (const void *)packet, length);
870 pad = 64 - (length + 4); /* if packet length < 60 */
871 pad = (pad < 0) ? 0 : pad;
873 debug("%s: length=%d, pad=%d\n", __func__, length, pad);
875 new_pkt_len = length + pad; /* new packet length */
877 pkt_buf_ptr = (u8 *)pkt_buf;
879 /* Calculate the CRC32, skip 8-byte header_A */
880 ca_crc32 = crc32(0, (u8 *)(pkt_buf_ptr + HEADER_A_SIZE), new_pkt_len);
882 debug("%s: crc32 is 0x%x\n", __func__, ca_crc32);
883 debug("%s: ~crc32 is 0x%x\n", __func__, ~ca_crc32);
884 debug("%s: pkt len %d\n", __func__, new_pkt_len);
885 /* should add 8-byte header_! */
886 /* CRC will re-calculated by hardware */
887 memcpy((pkt_buf_ptr + new_pkt_len + HEADER_A_SIZE),
888 (u8 *)(&ca_crc32), sizeof(ca_crc32));
889 new_pkt_len = new_pkt_len + 4; /* add CRC */
891 valid_bytes = new_pkt_len % 8;
892 valid_bytes = valid_bytes ? valid_bytes : 0;
893 debug("%s: valid_bytes %d\n", __func__, valid_bytes);
895 /* should add 8-byte headerA */
896 next_link = sw_tx_wr_ptr +
897 (new_pkt_len + 7 + HEADER_A_SIZE) / 8; /* for headr XT */
899 next_link = next_link + 1;
900 /* Wrap around if required */
901 if (next_link > priv->tx_xram_end) {
902 next_link = priv->tx_xram_start +
903 (next_link - (priv->tx_xram_end + 1));
906 debug("%s: TX next_link %x\n", __func__, next_link);
907 memset(&hdr_xt, 0, sizeof(hdr_xt));
908 hdr_xt.ownership = 1;
909 hdr_xt.bytes_valid = valid_bytes;
910 hdr_xt.next_link = next_link;
912 tx_xram_ptr = (u32 *)((unsigned long)priv->ni_xram_base
915 /* Wrap around if required */
916 if (tx_xram_ptr >= (u32 *)(unsigned long)priv->tx_xram_end_adr)
917 tx_xram_ptr = (u32 *)(unsigned long)priv->tx_xram_base_adr;
919 tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
920 priv->tx_xram_base_adr,
921 priv->tx_xram_end_adr);
923 memcpy(tx_xram_ptr, &hdr_xt, sizeof(*tx_xram_ptr));
925 tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
926 priv->tx_xram_base_adr,
927 priv->tx_xram_end_adr);
929 /* Now to copy the data. The first byte on the line goes first */
930 data_ptr = (u32 *)pkt_buf_ptr;
931 debug("%s: packet data[]=", __func__);
933 /* copy header_A to XRAM */
934 for (loop = 0; loop <= (new_pkt_len + HEADER_A_SIZE) / 4; loop++) {
935 ptr = (u8 *)data_ptr;
938 debug("[0x%x]-[0x%x]-[0x%x]-[0x%x]-",
939 ptr[0], ptr[1], ptr[2], ptr[3]);
941 *tx_xram_ptr = *data_ptr++;
942 tx_xram_ptr = ca_rdwrptr_adv_one(tx_xram_ptr,
943 priv->tx_xram_base_adr,
944 priv->tx_xram_end_adr);
948 /* Publish the software write pointer */
949 cpuxram_cpu_cfg_tx.pkt_wr_ptr = next_link;
950 ca_reg_write(&cpuxram_cpu_cfg_tx,
951 (u64)priv->ni_hv_base_addr,
952 NI_HV_XRAM_CPUXRAM_CPU_CFG_TX_0_OFFSET);
957 static void cortina_eth_stop(struct udevice *netdev)
959 /* Nothing to do for now. */
962 static int cortina_eth_probe(struct udevice *dev)
965 struct cortina_ni_priv *priv;
967 priv = dev_get_priv(dev);
968 priv->rx_xram_base_adr = priv->ni_xram_base + (RX_BASE_ADDR * 8);
969 priv->rx_xram_end_adr = priv->ni_xram_base + ((RX_TOP_ADDR + 1) * 8);
970 priv->rx_xram_start = RX_BASE_ADDR;
971 priv->rx_xram_end = RX_TOP_ADDR;
972 priv->tx_xram_base_adr = priv->ni_xram_base + (TX_BASE_ADDR * 8);
973 priv->tx_xram_end_adr = priv->ni_xram_base + ((TX_TOP_ADDR + 1) * 8);
974 priv->tx_xram_start = TX_BASE_ADDR;
975 priv->tx_xram_end = TX_TOP_ADDR;
978 debug("%s: rx_base_addr:%x\t rx_top_addr %x\n",
979 __func__, priv->rx_xram_start, priv->rx_xram_end);
980 debug("%s: tx_base_addr:%x\t tx_top_addr %x\n",
981 __func__, priv->tx_xram_start, priv->tx_xram_end);
982 debug("%s: rx physical start address = %x end address = %x\n",
983 __func__, priv->rx_xram_base_adr, priv->rx_xram_end_adr);
984 debug("%s: tx physical start address = %x end address = %x\n",
985 __func__, priv->tx_xram_base_adr, priv->tx_xram_end_adr);
988 ret = ca_mdio_register(dev);
992 /* set MDIO pre-scale value */
993 ca_reg_read(®_value, (u64)priv->per_mdio_base_addr,
994 PER_MDIO_CFG_OFFSET);
995 reg_value = reg_value | 0x00280000;
996 ca_reg_write(®_value, (u64)priv->per_mdio_base_addr,
997 PER_MDIO_CFG_OFFSET);
1000 priv->phydev->addr = priv->port_map[priv->active_port].phy_addr;
1002 ca_ni_led(priv->active_port, CA_LED_ON);
1006 printf("CA NI %s: active_port=%d, phy_addr=%d\n",
1007 __func__, priv->active_port, priv->phydev->addr);
1008 printf("CA NI %s: phy_id=0x%x, phy_id & PHY_ID_MASK=0x%x\n", __func__,
1009 priv->phydev->phy_id, priv->phydev->phy_id & 0xFFFFFFF0);
1011 /* parsing ethaddr and set to NI registers. */
1012 ca_ni_setup_mac_addr();
1014 #ifdef MIIPHY_REGISTER
1015 /* the phy_read and phy_write
1016 * should meet the proto type of miiphy_register
1018 miiphy_register(dev->name, ca_miiphy_read, ca_miiphy_write);
1021 if (priv->init_rgmii) {
1022 /* hardware settings for RGMII port */
1023 ca_rgmii_init(priv);
1026 if (priv->gphy_num > 0) {
1027 /* do internal gphy calibration */
1028 ca_internal_gphy_cal(priv);
1033 static int ca_ni_of_to_plat(struct udevice *dev)
1036 struct cortina_ni_priv *priv = dev_get_priv(dev);
1038 memset(priv, 0, sizeof(struct cortina_ni_priv));
1039 priv->glb_base_addr = dev_remap_addr_index(dev, 0);
1040 if (!priv->glb_base_addr)
1042 printf("CA NI %s: priv->glb_base_addr for index 0 is 0x%p\n",
1043 __func__, priv->glb_base_addr);
1045 priv->per_mdio_base_addr = dev_remap_addr_index(dev, 1);
1046 if (!priv->per_mdio_base_addr)
1048 printf("CA NI %s: priv->per_mdio_base_addr for index 1 is 0x%p\n",
1049 __func__, priv->per_mdio_base_addr);
1051 priv->ni_hv_base_addr = dev_remap_addr_index(dev, 2);
1052 if (!priv->ni_hv_base_addr)
1054 printf("CA NI %s: priv->ni_hv_base_addr for index 2 is 0x%p\n",
1055 __func__, priv->ni_hv_base_addr);
1057 priv->valid_port_map = dev_read_u32_default(dev, "valid-port-map", 1);
1058 priv->valid_port_num = dev_read_u32_default(dev, "valid-port-num", 1);
1060 for (i = 0; i < priv->valid_port_num; i++) {
1061 ret = dev_read_u32_index(dev, "valid-ports", i * 2,
1062 &priv->port_map[i].phy_addr);
1063 ret = dev_read_u32_index(dev, "valid-ports", (i * 2) + 1,
1064 &priv->port_map[i].port);
1067 priv->gphy_num = dev_read_u32_default(dev, "inter-gphy-num", 1);
1068 for (i = 0; i < priv->gphy_num; i++) {
1069 ret = dev_read_u32_index(dev, "inter-gphy-val", i * 2,
1070 &priv->gphy_values[i].reg_off);
1071 ret = dev_read_u32_index(dev, "inter-gphy-val", (i * 2) + 1,
1072 &priv->gphy_values[i].value);
1075 priv->active_port = dev_read_u32_default(dev, "def-active-port", 1);
1076 priv->init_rgmii = dev_read_u32_default(dev, "init-rgmii", 1);
1077 priv->ni_xram_base = dev_read_u32_default(dev, "ni-xram-base", 1);
1081 static const struct eth_ops cortina_eth_ops = {
1082 .start = cortina_eth_start,
1083 .send = cortina_eth_send,
1084 .recv = cortina_eth_recv,
1085 .stop = cortina_eth_stop,
1088 static const struct udevice_id cortina_eth_ids[] = {
1089 { .compatible = "eth_cortina" },
1093 U_BOOT_DRIVER(eth_cortina) = {
1094 .name = "eth_cortina",
1096 .of_match = cortina_eth_ids,
1097 .probe = cortina_eth_probe,
1098 .ops = &cortina_eth_ops,
1099 .priv_auto = sizeof(struct cortina_ni_priv),
1100 .plat_auto = sizeof(struct eth_pdata),
1101 .of_to_plat = ca_ni_of_to_plat,