1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2014-2017 Broadcom.
9 #include <linux/printk.h>
18 #include <asm/cache.h>
21 #include <linux/delay.h>
22 #include <linux/bitops.h>
24 #include "bcm-sf2-eth.h"
25 #include "bcm-sf2-eth-gmac.h"
27 #define SPINWAIT(exp, us) { \
28 uint countdown = (us) + 9; \
29 while ((exp) && (countdown >= 10)) {\
35 #define RX_BUF_SIZE_ALIGNED ALIGN(RX_BUF_SIZE, ARCH_DMA_MINALIGN)
36 #define TX_BUF_SIZE_ALIGNED ALIGN(TX_BUF_SIZE, ARCH_DMA_MINALIGN)
37 #define DESCP_SIZE_ALIGNED ALIGN(sizeof(dma64dd_t), ARCH_DMA_MINALIGN)
39 static int gmac_disable_dma(struct eth_dma *dma, int dir);
40 static int gmac_enable_dma(struct eth_dma *dma, int dir);
44 /* misc control bits */
46 /* buffer count and address extension */
48 /* memory address of the date buffer, bits 31:0 */
50 /* memory address of the date buffer, bits 63:32 */
54 uint32_t g_dmactrlflags;
56 static uint32_t dma_ctrlflags(uint32_t mask, uint32_t flags)
58 debug("%s enter\n", __func__);
60 g_dmactrlflags &= ~mask;
61 g_dmactrlflags |= flags;
63 /* If trying to enable parity, check if parity is actually supported */
64 if (g_dmactrlflags & DMA_CTRL_PEN) {
67 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
68 writel(control | D64_XC_PD, GMAC0_DMA_TX_CTRL_ADDR);
69 if (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_PD) {
71 * We *can* disable it, therefore it is supported;
72 * restore control register
74 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
76 /* Not supported, don't allow it to be enabled */
77 g_dmactrlflags &= ~DMA_CTRL_PEN;
81 return g_dmactrlflags;
84 static inline void reg32_clear_bits(uint32_t reg, uint32_t value)
86 uint32_t v = readl(reg);
91 static inline void reg32_set_bits(uint32_t reg, uint32_t value)
93 uint32_t v = readl(reg);
99 static void dma_tx_dump(struct eth_dma *dma)
101 dma64dd_t *descp = NULL;
105 printf("TX DMA Register:\n");
106 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
107 readl(GMAC0_DMA_TX_CTRL_ADDR),
108 readl(GMAC0_DMA_TX_PTR_ADDR),
109 readl(GMAC0_DMA_TX_ADDR_LOW_ADDR),
110 readl(GMAC0_DMA_TX_ADDR_HIGH_ADDR),
111 readl(GMAC0_DMA_TX_STATUS0_ADDR),
112 readl(GMAC0_DMA_TX_STATUS1_ADDR));
114 printf("TX Descriptors:\n");
115 for (i = 0; i < TX_BUF_NUM; i++) {
116 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
117 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
118 descp->ctrl1, descp->ctrl2,
119 descp->addrhigh, descp->addrlow);
122 printf("TX Buffers:\n");
123 /* Initialize TX DMA descriptor table */
124 for (i = 0; i < TX_BUF_NUM; i++) {
125 bufp = (uint8_t *)(dma->tx_buf + i * TX_BUF_SIZE_ALIGNED);
126 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
131 static void dma_rx_dump(struct eth_dma *dma)
133 dma64dd_t *descp = NULL;
137 printf("RX DMA Register:\n");
138 printf("control:0x%x; ptr:0x%x; addrl:0x%x; addrh:0x%x; stat0:0x%x, stat1:0x%x\n",
139 readl(GMAC0_DMA_RX_CTRL_ADDR),
140 readl(GMAC0_DMA_RX_PTR_ADDR),
141 readl(GMAC0_DMA_RX_ADDR_LOW_ADDR),
142 readl(GMAC0_DMA_RX_ADDR_HIGH_ADDR),
143 readl(GMAC0_DMA_RX_STATUS0_ADDR),
144 readl(GMAC0_DMA_RX_STATUS1_ADDR));
146 printf("RX Descriptors:\n");
147 for (i = 0; i < RX_BUF_NUM; i++) {
148 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
149 printf("ctrl1:0x%08x; ctrl2:0x%08x; addr:0x%x 0x%08x\n",
150 descp->ctrl1, descp->ctrl2,
151 descp->addrhigh, descp->addrlow);
154 printf("RX Buffers:\n");
155 for (i = 0; i < RX_BUF_NUM; i++) {
156 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
157 printf("buf%d:0x%x; ", i, (uint32_t)bufp);
163 static int dma_tx_init(struct eth_dma *dma)
165 dma64dd_t *descp = NULL;
170 debug("%s enter\n", __func__);
172 /* clear descriptor memory */
173 memset((void *)(dma->tx_desc_aligned), 0,
174 TX_BUF_NUM * DESCP_SIZE_ALIGNED);
175 memset(dma->tx_buf, 0, TX_BUF_NUM * TX_BUF_SIZE_ALIGNED);
177 /* Initialize TX DMA descriptor table */
178 for (i = 0; i < TX_BUF_NUM; i++) {
179 descp = (dma64dd_t *)(dma->tx_desc_aligned) + i;
180 bufp = dma->tx_buf + i * TX_BUF_SIZE_ALIGNED;
181 /* clear buffer memory */
182 memset((void *)bufp, 0, TX_BUF_SIZE_ALIGNED);
185 /* if last descr set endOfTable */
186 if (i == (TX_BUF_NUM-1))
187 ctrl = D64_CTRL1_EOT;
190 descp->addrlow = (uint32_t)bufp;
194 /* flush descriptor and buffer */
195 descp = dma->tx_desc_aligned;
197 flush_dcache_range((unsigned long)descp,
198 (unsigned long)descp +
199 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
200 flush_dcache_range((unsigned long)bufp,
201 (unsigned long)bufp +
202 TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
204 /* initialize the DMA channel */
205 writel((uint32_t)(dma->tx_desc_aligned), GMAC0_DMA_TX_ADDR_LOW_ADDR);
206 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
208 /* now update the dma last descriptor */
209 writel(((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK,
210 GMAC0_DMA_TX_PTR_ADDR);
215 static int dma_rx_init(struct eth_dma *dma)
218 dma64dd_t *descp = NULL;
223 debug("%s enter\n", __func__);
225 /* clear descriptor memory */
226 memset((void *)(dma->rx_desc_aligned), 0,
227 RX_BUF_NUM * DESCP_SIZE_ALIGNED);
228 /* clear buffer memory */
229 memset(dma->rx_buf, 0, RX_BUF_NUM * RX_BUF_SIZE_ALIGNED);
231 /* Initialize RX DMA descriptor table */
232 for (i = 0; i < RX_BUF_NUM; i++) {
233 descp = (dma64dd_t *)(dma->rx_desc_aligned) + i;
234 bufp = dma->rx_buf + i * RX_BUF_SIZE_ALIGNED;
236 /* if last descr set endOfTable */
237 if (i == (RX_BUF_NUM - 1))
238 ctrl = D64_CTRL1_EOT;
240 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
241 descp->addrlow = (uint32_t)bufp;
244 last_desc = ((uint32_t)(descp) & D64_XP_LD_MASK)
248 descp = dma->rx_desc_aligned;
250 /* flush descriptor and buffer */
251 flush_dcache_range((unsigned long)descp,
252 (unsigned long)descp +
253 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
254 flush_dcache_range((unsigned long)(bufp),
255 (unsigned long)bufp +
256 RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
258 /* initailize the DMA channel */
259 writel((uint32_t)descp, GMAC0_DMA_RX_ADDR_LOW_ADDR);
260 writel(0, GMAC0_DMA_RX_ADDR_HIGH_ADDR);
262 /* now update the dma last descriptor */
263 writel(last_desc, GMAC0_DMA_RX_PTR_ADDR);
268 static int dma_init(struct eth_dma *dma)
270 debug(" %s enter\n", __func__);
273 * Default flags: For backwards compatibility both
274 * Rx Overflow Continue and Parity are DISABLED.
276 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
278 debug("rx burst len 0x%x\n",
279 (readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK)
281 debug("tx burst len 0x%x\n",
282 (readl(GMAC0_DMA_TX_CTRL_ADDR) & D64_XC_BL_MASK)
288 /* From end of chip_init() */
289 /* enable the overflow continue feature and disable parity */
290 dma_ctrlflags(DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */,
291 DMA_CTRL_ROC /* value */);
296 static int dma_deinit(struct eth_dma *dma)
298 debug(" %s enter\n", __func__);
300 gmac_disable_dma(dma, MAC_DMA_RX);
301 gmac_disable_dma(dma, MAC_DMA_TX);
305 free(dma->tx_desc_aligned);
306 dma->tx_desc_aligned = NULL;
310 free(dma->rx_desc_aligned);
311 dma->rx_desc_aligned = NULL;
316 int gmac_tx_packet(struct eth_dma *dma, void *packet, int length)
318 uint8_t *bufp = dma->tx_buf + dma->cur_tx_index * TX_BUF_SIZE_ALIGNED;
320 /* kick off the dma */
322 int txout = dma->cur_tx_index;
324 dma64dd_t *descp = NULL;
326 uint32_t last_desc = (((uint32_t)dma->tx_desc_aligned) +
327 sizeof(dma64dd_t)) & D64_XP_LD_MASK;
330 debug("%s enter\n", __func__);
332 /* load the buffer */
333 memcpy(bufp, packet, len);
335 /* Add 4 bytes for Ethernet FCS/CRC */
338 ctrl = (buflen & D64_CTRL2_BC_MASK);
340 /* the transmit will only be one frame or set SOF, EOF */
341 /* also set int on completion */
342 flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
344 /* txout points to the descriptor to uset */
345 /* if last descriptor then set EOT */
346 if (txout == (TX_BUF_NUM - 1)) {
347 flags |= D64_CTRL1_EOT;
348 last_desc = ((uint32_t)(dma->tx_desc_aligned)) & D64_XP_LD_MASK;
351 /* write the descriptor */
352 descp = ((dma64dd_t *)(dma->tx_desc_aligned)) + txout;
353 descp->addrlow = (uint32_t)bufp;
355 descp->ctrl1 = flags;
358 /* flush descriptor and buffer */
359 flush_dcache_range((unsigned long)dma->tx_desc_aligned,
360 (unsigned long)dma->tx_desc_aligned +
361 DESCP_SIZE_ALIGNED * TX_BUF_NUM);
362 flush_dcache_range((unsigned long)bufp,
363 (unsigned long)bufp + TX_BUF_SIZE_ALIGNED);
365 /* now update the dma last descriptor */
366 writel(last_desc, GMAC0_DMA_TX_PTR_ADDR);
368 /* tx dma should be enabled so packet should go out */
371 dma->cur_tx_index = (txout + 1) & (TX_BUF_NUM - 1);
376 bool gmac_check_tx_done(struct eth_dma *dma)
378 /* wait for tx to complete */
380 bool xfrdone = false;
382 debug("%s enter\n", __func__);
384 intstatus = readl(GMAC0_INT_STATUS_ADDR);
386 debug("int(0x%x)\n", intstatus);
387 if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) {
389 /* clear the int bits */
390 intstatus &= ~(I_XI0 | I_XI1 | I_XI2 | I_XI3);
391 writel(intstatus, GMAC0_INT_STATUS_ADDR);
393 debug("Tx int(0x%x)\n", intstatus);
399 int gmac_check_rx_done(struct eth_dma *dma, uint8_t *buf)
402 size_t rcvlen = 0, buflen = 0;
403 uint32_t stat0 = 0, stat1 = 0;
404 uint32_t control, offset;
405 uint8_t statbuf[HWRXOFF*2];
407 int index, curr, active;
408 dma64dd_t *descp = NULL;
413 * this api will check if a packet has been received.
414 * If so it will return the address of the buffer and current
415 * descriptor index will be incremented to the
416 * next descriptor. Once done with the frame the buffer should be
417 * added back onto the descriptor and the lastdscr should be updated
418 * to this descriptor.
420 index = dma->cur_rx_index;
421 offset = (uint32_t)(dma->rx_desc_aligned);
422 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR) & D64_RS0_CD_MASK;
423 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR) & D64_RS0_CD_MASK;
424 curr = ((stat0 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
425 active = ((stat1 - offset) & D64_RS0_CD_MASK) / sizeof(dma64dd_t);
427 /* check if any frame */
431 debug("received packet\n");
432 debug("expect(0x%x) curr(0x%x) active(0x%x)\n", index, curr, active);
437 /* get the packet pointer that corresponds to the rx descriptor */
438 bufp = dma->rx_buf + index * RX_BUF_SIZE_ALIGNED;
440 descp = (dma64dd_t *)(dma->rx_desc_aligned) + index;
441 /* flush descriptor and buffer */
442 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
443 (unsigned long)dma->rx_desc_aligned +
444 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
445 flush_dcache_range((unsigned long)bufp,
446 (unsigned long)bufp + RX_BUF_SIZE_ALIGNED);
448 buflen = (descp->ctrl2 & D64_CTRL2_BC_MASK);
450 stat0 = readl(GMAC0_DMA_RX_STATUS0_ADDR);
451 stat1 = readl(GMAC0_DMA_RX_STATUS1_ADDR);
453 debug("bufp(0x%x) index(0x%x) buflen(0x%x) stat0(0x%x) stat1(0x%x)\n",
454 (uint32_t)bufp, index, buflen, stat0, stat1);
456 dma->cur_rx_index = (index + 1) & (RX_BUF_NUM - 1);
458 /* get buffer offset */
459 control = readl(GMAC0_DMA_RX_CTRL_ADDR);
460 offset = (control & D64_RC_RO_MASK) >> D64_RC_RO_SHIFT;
461 rcvlen = *(uint16_t *)bufp;
463 debug("Received %d bytes\n", rcvlen);
464 /* copy status into temp buf then copy data from rx buffer */
465 memcpy(statbuf, bufp, offset);
466 datap = (void *)((uint32_t)bufp + offset);
467 memcpy(buf, datap, rcvlen);
469 /* update descriptor that is being added back on ring */
470 descp->ctrl2 = RX_BUF_SIZE_ALIGNED;
471 descp->addrlow = (uint32_t)bufp;
473 /* flush descriptor */
474 flush_dcache_range((unsigned long)dma->rx_desc_aligned,
475 (unsigned long)dma->rx_desc_aligned +
476 DESCP_SIZE_ALIGNED * RX_BUF_NUM);
478 /* set the lastdscr for the rx ring */
479 writel(((uint32_t)descp) & D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
484 static int gmac_disable_dma(struct eth_dma *dma, int dir)
488 debug("%s enter\n", __func__);
490 if (dir == MAC_DMA_TX) {
491 /* address PR8249/PR7577 issue */
492 /* suspend tx DMA first */
493 writel(D64_XC_SE, GMAC0_DMA_TX_CTRL_ADDR);
494 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
496 D64_XS0_XS_DISABLED) &&
497 (status != D64_XS0_XS_IDLE) &&
498 (status != D64_XS0_XS_STOPPED), 10000);
501 * PR2414 WAR: DMA engines are not disabled until
504 writel(0, GMAC0_DMA_TX_CTRL_ADDR);
505 SPINWAIT(((status = (readl(GMAC0_DMA_TX_STATUS0_ADDR) &
507 D64_XS0_XS_DISABLED), 10000);
509 /* wait for the last transaction to complete */
512 status = (status == D64_XS0_XS_DISABLED);
515 * PR2414 WAR: DMA engines are not disabled until
518 writel(0, GMAC0_DMA_RX_CTRL_ADDR);
519 SPINWAIT(((status = (readl(GMAC0_DMA_RX_STATUS0_ADDR) &
521 D64_RS0_RS_DISABLED), 10000);
523 status = (status == D64_RS0_RS_DISABLED);
529 static int gmac_enable_dma(struct eth_dma *dma, int dir)
533 debug("%s enter\n", __func__);
535 if (dir == MAC_DMA_TX) {
536 dma->cur_tx_index = 0;
539 * These bits 20:18 (burstLen) of control register can be
540 * written but will take effect only if these bits are
541 * valid. So this will not affect previous versions
542 * of the DMA. They will continue to have those bits set to 0.
544 control = readl(GMAC0_DMA_TX_CTRL_ADDR);
546 control |= D64_XC_XE;
547 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
548 control |= D64_XC_PD;
550 writel(control, GMAC0_DMA_TX_CTRL_ADDR);
552 /* initailize the DMA channel */
553 writel((uint32_t)(dma->tx_desc_aligned),
554 GMAC0_DMA_TX_ADDR_LOW_ADDR);
555 writel(0, GMAC0_DMA_TX_ADDR_HIGH_ADDR);
557 dma->cur_rx_index = 0;
559 control = (readl(GMAC0_DMA_RX_CTRL_ADDR) &
560 D64_RC_AE) | D64_RC_RE;
562 if ((g_dmactrlflags & DMA_CTRL_PEN) == 0)
563 control |= D64_RC_PD;
565 if (g_dmactrlflags & DMA_CTRL_ROC)
566 control |= D64_RC_OC;
569 * These bits 20:18 (burstLen) of control register can be
570 * written but will take effect only if these bits are
571 * valid. So this will not affect previous versions
572 * of the DMA. They will continue to have those bits set to 0.
574 control &= ~D64_RC_BL_MASK;
575 /* Keep default Rx burstlen */
576 control |= readl(GMAC0_DMA_RX_CTRL_ADDR) & D64_RC_BL_MASK;
577 control |= HWRXOFF << D64_RC_RO_SHIFT;
579 writel(control, GMAC0_DMA_RX_CTRL_ADDR);
582 * the rx descriptor ring should have
583 * the addresses set properly;
584 * set the lastdscr for the rx ring
586 writel(((uint32_t)(dma->rx_desc_aligned) +
587 (RX_BUF_NUM - 1) * RX_BUF_SIZE_ALIGNED) &
588 D64_XP_LD_MASK, GMAC0_DMA_RX_PTR_ADDR);
594 bool gmac_mii_busywait(unsigned int timeout)
598 while (timeout > 10) {
599 tmp = readl(GMAC_MII_CTRL_ADDR);
600 if (tmp & (1 << GMAC_MII_BUSY_SHIFT)) {
607 return tmp & (1 << GMAC_MII_BUSY_SHIFT);
610 int gmac_miiphy_read(struct mii_dev *bus, int phyaddr, int devad, int reg)
615 /* Busy wait timeout is 1ms */
616 if (gmac_mii_busywait(1000)) {
617 pr_err("%s: Prepare MII read: MII/MDIO busy\n", __func__);
622 tmp = GMAC_MII_DATA_READ_CMD;
623 tmp |= (phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
624 (reg << GMAC_MII_PHY_REG_SHIFT);
625 debug("MII read cmd 0x%x, phy 0x%x, reg 0x%x\n", tmp, phyaddr, reg);
626 writel(tmp, GMAC_MII_DATA_ADDR);
628 if (gmac_mii_busywait(1000)) {
629 pr_err("%s: MII read failure: MII/MDIO busy\n", __func__);
633 value = readl(GMAC_MII_DATA_ADDR) & 0xffff;
634 debug("MII read data 0x%x\n", value);
638 int gmac_miiphy_write(struct mii_dev *bus, int phyaddr, int devad, int reg,
643 /* Busy wait timeout is 1ms */
644 if (gmac_mii_busywait(1000)) {
645 pr_err("%s: Prepare MII write: MII/MDIO busy\n", __func__);
649 /* Write operation */
650 tmp = GMAC_MII_DATA_WRITE_CMD | (value & 0xffff);
651 tmp |= ((phyaddr << GMAC_MII_PHY_ADDR_SHIFT) |
652 (reg << GMAC_MII_PHY_REG_SHIFT));
653 debug("MII write cmd 0x%x, phy 0x%x, reg 0x%x, data 0x%x\n",
654 tmp, phyaddr, reg, value);
655 writel(tmp, GMAC_MII_DATA_ADDR);
657 if (gmac_mii_busywait(1000)) {
658 pr_err("%s: MII write failure: MII/MDIO busy\n", __func__);
665 void gmac_init_reset(void)
667 debug("%s enter\n", __func__);
669 /* set command config reg CC_SR */
670 reg32_set_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
671 udelay(GMAC_RESET_DELAY);
674 void gmac_clear_reset(void)
676 debug("%s enter\n", __func__);
678 /* clear command config reg CC_SR */
679 reg32_clear_bits(UNIMAC0_CMD_CFG_ADDR, CC_SR);
680 udelay(GMAC_RESET_DELAY);
683 static void gmac_enable_local(bool en)
687 debug("%s enter\n", __func__);
689 /* read command config reg */
690 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
692 /* put mac in reset */
697 /* first deassert rx_ena and tx_ena while in reset */
698 cmdcfg &= ~(CC_RE | CC_TE);
699 /* write command config reg */
700 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
702 /* bring mac out of reset */
705 /* if not enable exit now */
709 /* enable the mac transmit and receive paths now */
712 cmdcfg |= (CC_RE | CC_TE);
714 /* assert rx_ena and tx_ena when out of reset to enable the mac */
715 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
720 int gmac_enable(void)
722 gmac_enable_local(1);
724 /* clear interrupts */
725 writel(I_INTMASK, GMAC0_INT_STATUS_ADDR);
729 int gmac_disable(void)
731 gmac_enable_local(0);
735 int gmac_set_speed(int speed, int duplex)
741 hd_ena = duplex ? 0 : CC_HD;
744 } else if (speed == 100) {
746 } else if (speed == 10) {
749 pr_err("%s: Invalid GMAC speed(%d)!\n", __func__, speed);
753 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
754 cmdcfg &= ~(CC_ES_MASK | CC_HD);
755 cmdcfg |= ((speed_cfg << CC_ES_SHIFT) | hd_ena);
757 printf("Change GMAC speed to %dMB\n", speed);
758 debug("GMAC speed cfg 0x%x\n", cmdcfg);
759 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
764 int gmac_set_mac_addr(unsigned char *mac)
766 /* set our local address */
767 debug("GMAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
768 mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
769 writel(htonl(*(uint32_t *)mac), UNIMAC0_MAC_MSB_ADDR);
770 writew(htons(*(uint32_t *)&mac[4]), UNIMAC0_MAC_LSB_ADDR);
775 int gmac_mac_init(struct eth_device *dev)
777 struct eth_info *eth = (struct eth_info *)(dev->priv);
778 struct eth_dma *dma = &(eth->dma);
784 debug("%s enter\n", __func__);
786 /* Always use GMAC0 */
787 printf("Using GMAC%d\n", 0);
789 /* Reset AMAC0 core */
790 writel(0, AMAC0_IDM_RESET_ADDR);
791 tmp = readl(AMAC0_IO_CTRL_DIRECT_ADDR);
793 tmp &= ~(1 << AMAC0_IO_CTRL_CLK_250_SEL_SHIFT);
794 tmp |= (1 << AMAC0_IO_CTRL_GMII_MODE_SHIFT);
796 tmp &= ~(1 << AMAC0_IO_CTRL_DEST_SYNC_MODE_EN_SHIFT);
797 writel(tmp, AMAC0_IO_CTRL_DIRECT_ADDR);
801 * As AMAC is just reset, NO need?
802 * set eth_data into loopback mode to ensure no rx traffic
803 * gmac_loopback(eth_data, TRUE);
804 * ET_TRACE(("%s gmac loopback\n", __func__));
808 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
809 cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML |
810 CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI |
812 cmdcfg |= (CC_PROM | CC_NLC | CC_CFE);
813 /* put mac in reset */
815 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
818 /* enable clear MIB on read */
819 reg32_set_bits(GMAC0_DEV_CTRL_ADDR, DC_MROR);
820 /* PHY: set smi_master to drive mdc_clk */
821 reg32_set_bits(GMAC0_PHY_CTRL_ADDR, PC_MTE);
823 /* clear persistent sw intstatus */
824 writel(0, GMAC0_INT_STATUS_ADDR);
826 if (dma_init(dma) < 0) {
827 pr_err("%s: GMAC dma_init failed\n", __func__);
832 printf("%s: Chip ID: 0x%x\n", __func__, chipid);
834 /* set switch bypass mode */
835 tmp = readl(SWITCH_GLOBAL_CONFIG_ADDR);
836 tmp |= (1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT);
839 /* tmp &= ~(1 << CDRU_SWITCH_BYPASS_SWITCH_SHIFT); */
841 writel(tmp, SWITCH_GLOBAL_CONFIG_ADDR);
843 tmp = readl(CRMU_CHIP_IO_PAD_CONTROL_ADDR);
844 tmp &= ~(1 << CDRU_IOMUX_FORCE_PAD_IN_SHIFT);
845 writel(tmp, CRMU_CHIP_IO_PAD_CONTROL_ADDR);
847 /* Set MDIO to internal GPHY */
848 tmp = readl(GMAC_MII_CTRL_ADDR);
849 /* Select internal MDC/MDIO bus*/
850 tmp &= ~(1 << GMAC_MII_CTRL_BYP_SHIFT);
851 /* select MDC/MDIO connecting to on-chip internal PHYs */
852 tmp &= ~(1 << GMAC_MII_CTRL_EXT_SHIFT);
854 * give bit[6:0](MDCDIV) with required divisor to set
855 * the MDC clock frequency, 66MHZ/0x1A=2.5MHZ
859 writel(tmp, GMAC_MII_CTRL_ADDR);
861 if (gmac_mii_busywait(1000)) {
862 pr_err("%s: Configure MDIO: MII/MDIO busy\n", __func__);
866 /* Configure GMAC0 */
867 /* enable one rx interrupt per received frame */
868 writel(1 << GMAC0_IRL_FRAMECOUNT_SHIFT, GMAC0_INTR_RECV_LAZY_ADDR);
870 /* read command config reg */
871 cmdcfg = readl(UNIMAC0_CMD_CFG_ADDR);
872 /* enable 802.3x tx flow control (honor received PAUSE frames) */
874 /* enable promiscuous mode */
876 /* Disable loopback mode */
879 cmdcfg &= ~(CC_ES_MASK | CC_HD);
880 /* Set to 1Gbps and full duplex by default */
881 cmdcfg |= (2 << CC_ES_SHIFT);
883 /* put mac in reset */
886 writel(cmdcfg, UNIMAC0_CMD_CFG_ADDR);
887 /* bring mac out of reset */
890 /* set max frame lengths; account for possible vlan tag */
891 writel(PKTSIZE + 32, UNIMAC0_FRM_LENGTH_ADDR);
900 int gmac_add(struct eth_device *dev)
902 struct eth_info *eth = (struct eth_info *)(dev->priv);
903 struct eth_dma *dma = &(eth->dma);
907 * Desc has to be 16-byte aligned. But for dcache flush it must be
908 * aligned to ARCH_DMA_MINALIGN.
910 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
912 printf("%s: Failed to allocate TX desc Buffer\n", __func__);
916 dma->tx_desc_aligned = (void *)tmp;
917 debug("TX Descriptor Buffer: %p; length: 0x%x\n",
918 dma->tx_desc_aligned, DESCP_SIZE_ALIGNED * TX_BUF_NUM);
920 tmp = memalign(ARCH_DMA_MINALIGN, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
922 printf("%s: Failed to allocate TX Data Buffer\n", __func__);
923 free(dma->tx_desc_aligned);
926 dma->tx_buf = (uint8_t *)tmp;
927 debug("TX Data Buffer: %p; length: 0x%x\n",
928 dma->tx_buf, TX_BUF_SIZE_ALIGNED * TX_BUF_NUM);
930 /* Desc has to be 16-byte aligned */
931 tmp = memalign(ARCH_DMA_MINALIGN, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
933 printf("%s: Failed to allocate RX Descriptor\n", __func__);
934 free(dma->tx_desc_aligned);
938 dma->rx_desc_aligned = (void *)tmp;
939 debug("RX Descriptor Buffer: %p, length: 0x%x\n",
940 dma->rx_desc_aligned, DESCP_SIZE_ALIGNED * RX_BUF_NUM);
942 tmp = memalign(ARCH_DMA_MINALIGN, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
944 printf("%s: Failed to allocate RX Data Buffer\n", __func__);
945 free(dma->tx_desc_aligned);
947 free(dma->rx_desc_aligned);
950 dma->rx_buf = (uint8_t *)tmp;
951 debug("RX Data Buffer: %p; length: 0x%x\n",
952 dma->rx_buf, RX_BUF_SIZE_ALIGNED * RX_BUF_NUM);
956 eth->phy_interface = PHY_INTERFACE_MODE_GMII;
958 dma->tx_packet = gmac_tx_packet;
959 dma->check_tx_done = gmac_check_tx_done;
961 dma->check_rx_done = gmac_check_rx_done;
963 dma->enable_dma = gmac_enable_dma;
964 dma->disable_dma = gmac_disable_dma;
966 eth->miiphy_read = gmac_miiphy_read;
967 eth->miiphy_write = gmac_miiphy_write;
969 eth->mac_init = gmac_mac_init;
970 eth->disable_mac = gmac_disable;
971 eth->enable_mac = gmac_enable;
972 eth->set_mac_addr = gmac_set_mac_addr;
973 eth->set_mac_speed = gmac_set_speed;