1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2006-2011 Freescale Semiconductor, Inc.
12 #include <linux/delay.h>
13 #include <linux/errno.h>
15 #include <linux/immap_qe.h>
23 #if !defined(CONFIG_DM_ETH)
24 /* Default UTBIPAR SMI address */
25 #ifndef CONFIG_UTBIPAR_INIT_TBIPA
26 #define CONFIG_UTBIPAR_INIT_TBIPA 0x1F
29 static struct uec_inf uec_info[] = {
30 #ifdef CONFIG_UEC_ETH1
31 STD_UEC_INFO(1), /* UEC1 */
33 #ifdef CONFIG_UEC_ETH2
34 STD_UEC_INFO(2), /* UEC2 */
36 #ifdef CONFIG_UEC_ETH3
37 STD_UEC_INFO(3), /* UEC3 */
39 #ifdef CONFIG_UEC_ETH4
40 STD_UEC_INFO(4), /* UEC4 */
42 #ifdef CONFIG_UEC_ETH5
43 STD_UEC_INFO(5), /* UEC5 */
45 #ifdef CONFIG_UEC_ETH6
46 STD_UEC_INFO(6), /* UEC6 */
48 #ifdef CONFIG_UEC_ETH7
49 STD_UEC_INFO(7), /* UEC7 */
51 #ifdef CONFIG_UEC_ETH8
52 STD_UEC_INFO(8), /* UEC8 */
56 #define MAXCONTROLLERS (8)
58 static struct eth_device *devlist[MAXCONTROLLERS];
60 static int uec_mac_enable(struct uec_priv *uec, comm_dir_e mode)
66 printf("%s: uec not initial\n", __func__);
69 uec_regs = uec->uec_regs;
71 maccfg1 = in_be32(&uec_regs->maccfg1);
73 if (mode & COMM_DIR_TX) {
74 maccfg1 |= MACCFG1_ENABLE_TX;
75 out_be32(&uec_regs->maccfg1, maccfg1);
76 uec->mac_tx_enabled = 1;
79 if (mode & COMM_DIR_RX) {
80 maccfg1 |= MACCFG1_ENABLE_RX;
81 out_be32(&uec_regs->maccfg1, maccfg1);
82 uec->mac_rx_enabled = 1;
88 static int uec_mac_disable(struct uec_priv *uec, comm_dir_e mode)
94 printf("%s: uec not initial\n", __func__);
97 uec_regs = uec->uec_regs;
99 maccfg1 = in_be32(&uec_regs->maccfg1);
101 if (mode & COMM_DIR_TX) {
102 maccfg1 &= ~MACCFG1_ENABLE_TX;
103 out_be32(&uec_regs->maccfg1, maccfg1);
104 uec->mac_tx_enabled = 0;
107 if (mode & COMM_DIR_RX) {
108 maccfg1 &= ~MACCFG1_ENABLE_RX;
109 out_be32(&uec_regs->maccfg1, maccfg1);
110 uec->mac_rx_enabled = 0;
116 static int uec_graceful_stop_tx(struct uec_priv *uec)
122 if (!uec || !uec->uccf) {
123 printf("%s: No handle passed.\n", __func__);
127 uf_regs = uec->uccf->uf_regs;
129 /* Clear the grace stop event */
130 out_be32(&uf_regs->ucce, UCCE_GRA);
132 /* Issue host command */
134 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
135 qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
136 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
138 /* Wait for command to complete */
140 ucce = in_be32(&uf_regs->ucce);
141 } while (!(ucce & UCCE_GRA));
143 uec->grace_stopped_tx = 1;
148 static int uec_graceful_stop_rx(struct uec_priv *uec)
154 printf("%s: No handle passed.\n", __func__);
158 if (!uec->p_rx_glbl_pram) {
159 printf("%s: No init rx global parameter\n", __func__);
163 /* Clear acknowledge bit */
164 ack = uec->p_rx_glbl_pram->rxgstpack;
165 ack &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX;
166 uec->p_rx_glbl_pram->rxgstpack = ack;
168 /* Keep issuing cmd and checking ack bit until it is asserted */
170 /* Issue host command */
172 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
173 qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock,
174 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
175 ack = uec->p_rx_glbl_pram->rxgstpack;
176 } while (!(ack & GRACEFUL_STOP_ACKNOWLEDGE_RX));
178 uec->grace_stopped_rx = 1;
183 static int uec_restart_tx(struct uec_priv *uec)
187 if (!uec || !uec->uec_info) {
188 printf("%s: No handle passed.\n", __func__);
193 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
194 qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
195 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
197 uec->grace_stopped_tx = 0;
202 static int uec_restart_rx(struct uec_priv *uec)
206 if (!uec || !uec->uec_info) {
207 printf("%s: No handle passed.\n", __func__);
212 ucc_fast_get_qe_cr_subblock(uec->uec_info->uf_info.ucc_num);
213 qe_issue_cmd(QE_RESTART_RX, cecr_subblock,
214 (u8)QE_CR_PROTOCOL_ETHERNET, 0);
216 uec->grace_stopped_rx = 0;
221 static int uec_open(struct uec_priv *uec, comm_dir_e mode)
223 struct ucc_fast_priv *uccf;
225 if (!uec || !uec->uccf) {
226 printf("%s: No handle passed.\n", __func__);
231 /* check if the UCC number is in range. */
232 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
233 printf("%s: ucc_num out of range.\n", __func__);
238 uec_mac_enable(uec, mode);
240 /* Enable UCC fast */
241 ucc_fast_enable(uccf, mode);
243 /* RISC microcode start */
244 if ((mode & COMM_DIR_TX) && uec->grace_stopped_tx)
246 if ((mode & COMM_DIR_RX) && uec->grace_stopped_rx)
252 static int uec_stop(struct uec_priv *uec, comm_dir_e mode)
254 if (!uec || !uec->uccf) {
255 printf("%s: No handle passed.\n", __func__);
259 /* check if the UCC number is in range. */
260 if (uec->uec_info->uf_info.ucc_num >= UCC_MAX_NUM) {
261 printf("%s: ucc_num out of range.\n", __func__);
264 /* Stop any transmissions */
265 if ((mode & COMM_DIR_TX) && !uec->grace_stopped_tx)
266 uec_graceful_stop_tx(uec);
268 /* Stop any receptions */
269 if ((mode & COMM_DIR_RX) && !uec->grace_stopped_rx)
270 uec_graceful_stop_rx(uec);
272 /* Disable the UCC fast */
273 ucc_fast_disable(uec->uccf, mode);
275 /* Disable the MAC */
276 uec_mac_disable(uec, mode);
281 static int uec_set_mac_duplex(struct uec_priv *uec, int duplex)
287 printf("%s: uec not initial\n", __func__);
290 uec_regs = uec->uec_regs;
292 if (duplex == DUPLEX_HALF) {
293 maccfg2 = in_be32(&uec_regs->maccfg2);
294 maccfg2 &= ~MACCFG2_FDX;
295 out_be32(&uec_regs->maccfg2, maccfg2);
298 if (duplex == DUPLEX_FULL) {
299 maccfg2 = in_be32(&uec_regs->maccfg2);
300 maccfg2 |= MACCFG2_FDX;
301 out_be32(&uec_regs->maccfg2, maccfg2);
307 static int uec_set_mac_if_mode(struct uec_priv *uec,
308 phy_interface_t if_mode, int speed)
310 phy_interface_t enet_if_mode;
316 printf("%s: uec not initial\n", __func__);
320 uec_regs = uec->uec_regs;
321 enet_if_mode = if_mode;
323 maccfg2 = in_be32(&uec_regs->maccfg2);
324 maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK;
326 upsmr = in_be32(&uec->uccf->uf_regs->upsmr);
327 upsmr &= ~(UPSMR_RPM | UPSMR_TBIM | UPSMR_R10M | UPSMR_RMM);
331 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
332 switch (enet_if_mode) {
333 case PHY_INTERFACE_MODE_MII:
335 case PHY_INTERFACE_MODE_RGMII:
336 upsmr |= (UPSMR_RPM | UPSMR_R10M);
338 case PHY_INTERFACE_MODE_RMII:
339 upsmr |= (UPSMR_R10M | UPSMR_RMM);
346 maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE;
347 switch (enet_if_mode) {
348 case PHY_INTERFACE_MODE_MII:
350 case PHY_INTERFACE_MODE_RGMII:
353 case PHY_INTERFACE_MODE_RMII:
361 maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE;
362 switch (enet_if_mode) {
363 case PHY_INTERFACE_MODE_GMII:
365 case PHY_INTERFACE_MODE_TBI:
368 case PHY_INTERFACE_MODE_RTBI:
369 upsmr |= (UPSMR_RPM | UPSMR_TBIM);
371 case PHY_INTERFACE_MODE_RGMII_RXID:
372 case PHY_INTERFACE_MODE_RGMII_TXID:
373 case PHY_INTERFACE_MODE_RGMII_ID:
374 case PHY_INTERFACE_MODE_RGMII:
377 case PHY_INTERFACE_MODE_SGMII:
388 out_be32(&uec_regs->maccfg2, maccfg2);
389 out_be32(&uec->uccf->uf_regs->upsmr, upsmr);
394 static int init_mii_management_configuration(uec_mii_t *uec_mii_regs)
396 uint timeout = 0x1000;
399 miimcfg = in_be32(&uec_mii_regs->miimcfg);
400 miimcfg |= MIIMCFG_MNGMNT_CLC_DIV_INIT_VALUE;
401 out_be32(&uec_mii_regs->miimcfg, miimcfg);
403 /* Wait until the bus is free */
404 while ((in_be32(&uec_mii_regs->miimcfg) & MIIMIND_BUSY) && timeout--)
407 printf("%s: The MII Bus is stuck!", __func__);
414 static int init_phy(struct eth_device *dev)
416 struct uec_priv *uec;
417 uec_mii_t *umii_regs;
418 struct uec_mii_info *mii_info;
419 struct phy_info *curphy;
422 uec = (struct uec_priv *)dev->priv;
423 umii_regs = uec->uec_mii_regs;
429 mii_info = malloc(sizeof(*mii_info));
431 printf("%s: Could not allocate mii_info", dev->name);
434 memset(mii_info, 0, sizeof(*mii_info));
436 if (uec->uec_info->uf_info.eth_type == GIGA_ETH)
437 mii_info->speed = SPEED_1000;
439 mii_info->speed = SPEED_100;
441 mii_info->duplex = DUPLEX_FULL;
445 mii_info->advertising = (ADVERTISED_10baseT_Half |
446 ADVERTISED_10baseT_Full |
447 ADVERTISED_100baseT_Half |
448 ADVERTISED_100baseT_Full |
449 ADVERTISED_1000baseT_Full);
450 mii_info->autoneg = 1;
451 mii_info->mii_id = uec->uec_info->phy_address;
454 mii_info->mdio_read = &uec_read_phy_reg;
455 mii_info->mdio_write = &uec_write_phy_reg;
457 uec->mii_info = mii_info;
459 qe_set_mii_clk_src(uec->uec_info->uf_info.ucc_num);
461 if (init_mii_management_configuration(umii_regs)) {
462 printf("%s: The MII Bus is stuck!", dev->name);
467 /* get info for this PHY */
468 curphy = uec_get_phy_info(uec->mii_info);
470 printf("%s: No PHY found", dev->name);
475 mii_info->phyinfo = curphy;
477 /* Run the commands which initialize the PHY */
479 err = curphy->init(uec->mii_info);
493 static void adjust_link(struct eth_device *dev)
495 struct uec_priv *uec = (struct uec_priv *)dev->priv;
496 struct uec_mii_info *mii_info = uec->mii_info;
498 if (mii_info->link) {
500 * Now we make sure that we can be in full duplex mode.
501 * If not, we operate in half-duplex mode.
503 if (mii_info->duplex != uec->oldduplex) {
504 if (!(mii_info->duplex)) {
505 uec_set_mac_duplex(uec, DUPLEX_HALF);
506 printf("%s: Half Duplex\n", dev->name);
508 uec_set_mac_duplex(uec, DUPLEX_FULL);
509 printf("%s: Full Duplex\n", dev->name);
511 uec->oldduplex = mii_info->duplex;
514 if (mii_info->speed != uec->oldspeed) {
515 phy_interface_t mode =
516 uec->uec_info->enet_interface_type;
517 if (uec->uec_info->uf_info.eth_type == GIGA_ETH) {
518 switch (mii_info->speed) {
522 printf("switching to rgmii 100\n");
523 mode = PHY_INTERFACE_MODE_RGMII;
526 printf("switching to rgmii 10\n");
527 mode = PHY_INTERFACE_MODE_RGMII;
530 printf("%s: Ack,Speed(%d)is illegal\n",
531 dev->name, mii_info->speed);
537 change_phy_interface_mode(dev, mode, mii_info->speed);
538 /* change the MAC interface mode */
539 uec_set_mac_if_mode(uec, mode, mii_info->speed);
541 printf("%s: Speed %dBT\n", dev->name, mii_info->speed);
542 uec->oldspeed = mii_info->speed;
546 printf("%s: Link is up\n", dev->name);
550 } else { /* if (mii_info->link) */
552 printf("%s: Link is down\n", dev->name);
560 static void phy_change(struct eth_device *dev)
562 struct uec_priv *uec = (struct uec_priv *)dev->priv;
564 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
565 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
567 /* QE9 and QE12 need to be set for enabling QE MII management signals */
568 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
569 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
572 /* Update the link, speed, duplex */
573 uec->mii_info->phyinfo->read_status(uec->mii_info);
575 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
577 * QE12 is muxed with LBCTL, it needs to be released for enabling
578 * LBCTL signal for LBC usage.
580 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
583 /* Adjust the interface according to speed */
587 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
590 * Find a device index from the devlist by name
593 * The index where the device is located, -1 on error
595 static int uec_miiphy_find_dev_by_name(const char *devname)
599 for (i = 0; i < MAXCONTROLLERS; i++) {
600 if (strncmp(devname, devlist[i]->name, strlen(devname)) == 0)
604 /* If device cannot be found, returns -1 */
605 if (i == MAXCONTROLLERS) {
606 debug("%s: device %s not found in devlist\n", __func__,
615 * Read a MII PHY register.
620 static int uec_miiphy_read(struct mii_dev *bus, int addr, int devad, int reg)
622 unsigned short value = 0;
626 debug("%s: NULL pointer given\n", __func__);
628 devindex = uec_miiphy_find_dev_by_name(bus->name);
630 value = uec_read_phy_reg(devlist[devindex], addr, reg);
636 * Write a MII PHY register.
641 static int uec_miiphy_write(struct mii_dev *bus, int addr, int devad, int reg,
647 debug("%s: NULL pointer given\n", __func__);
649 devindex = uec_miiphy_find_dev_by_name(bus->name);
651 uec_write_phy_reg(devlist[devindex], addr, reg, value);
657 static int uec_set_mac_address(struct uec_priv *uec, u8 *mac_addr)
664 printf("%s: uec not initial\n", __func__);
668 uec_regs = uec->uec_regs;
671 * if a station address of 0x12345678ABCD, perform a write to
672 * MACSTNADDR1 of 0xCDAB7856,
673 * MACSTNADDR2 of 0x34120000
676 mac_addr1 = (mac_addr[5] << 24) | (mac_addr[4] << 16) |
677 (mac_addr[3] << 8) | (mac_addr[2]);
678 out_be32(&uec_regs->macstnaddr1, mac_addr1);
680 mac_addr2 = ((mac_addr[1] << 24) | (mac_addr[0] << 16)) & 0xffff0000;
681 out_be32(&uec_regs->macstnaddr2, mac_addr2);
686 static int uec_convert_threads_num(enum uec_num_of_threads threads_num,
687 int *threads_num_ret)
689 int num_threads_numerica;
691 switch (threads_num) {
692 case UEC_NUM_OF_THREADS_1:
693 num_threads_numerica = 1;
695 case UEC_NUM_OF_THREADS_2:
696 num_threads_numerica = 2;
698 case UEC_NUM_OF_THREADS_4:
699 num_threads_numerica = 4;
701 case UEC_NUM_OF_THREADS_6:
702 num_threads_numerica = 6;
704 case UEC_NUM_OF_THREADS_8:
705 num_threads_numerica = 8;
708 printf("%s: Bad number of threads value.",
713 *threads_num_ret = num_threads_numerica;
718 static void uec_init_tx_parameter(struct uec_priv *uec, int num_threads_tx)
720 struct uec_inf *uec_info;
725 uec_info = uec->uec_info;
727 /* Alloc global Tx parameter RAM page */
728 uec->tx_glbl_pram_offset =
729 qe_muram_alloc(sizeof(struct uec_tx_global_pram),
730 UEC_TX_GLOBAL_PRAM_ALIGNMENT);
731 uec->p_tx_glbl_pram = (struct uec_tx_global_pram *)
732 qe_muram_addr(uec->tx_glbl_pram_offset);
734 /* Zero the global Tx prameter RAM */
735 memset(uec->p_tx_glbl_pram, 0, sizeof(struct uec_tx_global_pram));
737 /* Init global Tx parameter RAM */
739 /* TEMODER, RMON statistics disable, one Tx queue */
740 out_be16(&uec->p_tx_glbl_pram->temoder, TEMODER_INIT_VALUE);
743 uec->send_q_mem_reg_offset =
744 qe_muram_alloc(sizeof(struct uec_send_queue_qd),
745 UEC_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT);
746 uec->p_send_q_mem_reg = (struct uec_send_queue_mem_region *)
747 qe_muram_addr(uec->send_q_mem_reg_offset);
748 out_be32(&uec->p_tx_glbl_pram->sqptr, uec->send_q_mem_reg_offset);
750 /* Setup the table with TxBDs ring */
751 end_bd = (u32)uec->p_tx_bd_ring + (uec_info->tx_bd_ring_len - 1)
753 out_be32(&uec->p_send_q_mem_reg->sqqd[0].bd_ring_base,
754 (u32)(uec->p_tx_bd_ring));
755 out_be32(&uec->p_send_q_mem_reg->sqqd[0].last_bd_completed_address,
758 /* Scheduler Base Pointer, we have only one Tx queue, no need it */
759 out_be32(&uec->p_tx_glbl_pram->schedulerbasepointer, 0);
761 /* TxRMON Base Pointer, TxRMON disable, we don't need it */
762 out_be32(&uec->p_tx_glbl_pram->txrmonbaseptr, 0);
764 /* TSTATE, global snooping, big endian, the CSB bus selected */
765 bmrx = BMR_INIT_VALUE;
766 out_be32(&uec->p_tx_glbl_pram->tstate, ((u32)(bmrx) << BMR_SHIFT));
769 for (i = 0; i < MAX_IPH_OFFSET_ENTRY; i++)
770 out_8(&uec->p_tx_glbl_pram->iphoffset[i], 0);
773 for (i = 0; i < UEC_TX_VTAG_TABLE_ENTRY_MAX; i++)
774 out_be32(&uec->p_tx_glbl_pram->vtagtable[i], 0);
777 uec->thread_dat_tx_offset =
778 qe_muram_alloc(num_threads_tx *
779 sizeof(struct uec_thread_data_tx) +
780 32 * (num_threads_tx == 1),
781 UEC_THREAD_DATA_ALIGNMENT);
783 uec->p_thread_data_tx = (struct uec_thread_data_tx *)
784 qe_muram_addr(uec->thread_dat_tx_offset);
785 out_be32(&uec->p_tx_glbl_pram->tqptr, uec->thread_dat_tx_offset);
788 static void uec_init_rx_parameter(struct uec_priv *uec, int num_threads_rx)
792 struct uec_82xx_add_filtering_pram *p_af_pram;
794 /* Allocate global Rx parameter RAM page */
795 uec->rx_glbl_pram_offset =
796 qe_muram_alloc(sizeof(struct uec_rx_global_pram),
797 UEC_RX_GLOBAL_PRAM_ALIGNMENT);
798 uec->p_rx_glbl_pram = (struct uec_rx_global_pram *)
799 qe_muram_addr(uec->rx_glbl_pram_offset);
801 /* Zero Global Rx parameter RAM */
802 memset(uec->p_rx_glbl_pram, 0, sizeof(struct uec_rx_global_pram));
804 /* Init global Rx parameter RAM */
806 * REMODER, Extended feature mode disable, VLAN disable,
807 * LossLess flow control disable, Receive firmware statisic disable,
808 * Extended address parsing mode disable, One Rx queues,
809 * Dynamic maximum/minimum frame length disable, IP checksum check
810 * disable, IP address alignment disable
812 out_be32(&uec->p_rx_glbl_pram->remoder, REMODER_INIT_VALUE);
815 uec->thread_dat_rx_offset =
816 qe_muram_alloc(num_threads_rx *
817 sizeof(struct uec_thread_data_rx),
818 UEC_THREAD_DATA_ALIGNMENT);
819 uec->p_thread_data_rx = (struct uec_thread_data_rx *)
820 qe_muram_addr(uec->thread_dat_rx_offset);
821 out_be32(&uec->p_rx_glbl_pram->rqptr, uec->thread_dat_rx_offset);
824 out_be16(&uec->p_rx_glbl_pram->typeorlen, 3072);
826 /* RxRMON base pointer, we don't need it */
827 out_be32(&uec->p_rx_glbl_pram->rxrmonbaseptr, 0);
829 /* IntCoalescingPTR, we don't need it, no interrupt */
830 out_be32(&uec->p_rx_glbl_pram->intcoalescingptr, 0);
832 /* RSTATE, global snooping, big endian, the CSB bus selected */
833 bmrx = BMR_INIT_VALUE;
834 out_8(&uec->p_rx_glbl_pram->rstate, bmrx);
837 out_be16(&uec->p_rx_glbl_pram->mrblr, MAX_RXBUF_LEN);
840 uec->rx_bd_qs_tbl_offset =
841 qe_muram_alloc(sizeof(struct uec_rx_bd_queues_entry) +
842 sizeof(struct uec_rx_pref_bds),
843 UEC_RX_BD_QUEUES_ALIGNMENT);
844 uec->p_rx_bd_qs_tbl = (struct uec_rx_bd_queues_entry *)
845 qe_muram_addr(uec->rx_bd_qs_tbl_offset);
848 memset(uec->p_rx_bd_qs_tbl, 0, sizeof(struct uec_rx_bd_queues_entry) +
849 sizeof(struct uec_rx_pref_bds));
850 out_be32(&uec->p_rx_glbl_pram->rbdqptr, uec->rx_bd_qs_tbl_offset);
851 out_be32(&uec->p_rx_bd_qs_tbl->externalbdbaseptr,
852 (u32)uec->p_rx_bd_ring);
855 out_be16(&uec->p_rx_glbl_pram->mflr, MAX_FRAME_LEN);
857 out_be16(&uec->p_rx_glbl_pram->minflr, MIN_FRAME_LEN);
859 out_be16(&uec->p_rx_glbl_pram->maxd1, MAX_DMA1_LEN);
861 out_be16(&uec->p_rx_glbl_pram->maxd2, MAX_DMA2_LEN);
863 out_be32(&uec->p_rx_glbl_pram->ecamptr, 0);
865 out_be32(&uec->p_rx_glbl_pram->l2qt, 0);
867 for (i = 0; i < 8; i++)
868 out_be32(&uec->p_rx_glbl_pram->l3qt[i], 0);
871 out_be16(&uec->p_rx_glbl_pram->vlantype, 0x8100);
873 out_be16(&uec->p_rx_glbl_pram->vlantci, 0);
875 /* Clear PQ2 style address filtering hash table */
876 p_af_pram = (struct uec_82xx_add_filtering_pram *)
877 uec->p_rx_glbl_pram->addressfiltering;
879 p_af_pram->iaddr_h = 0;
880 p_af_pram->iaddr_l = 0;
881 p_af_pram->gaddr_h = 0;
882 p_af_pram->gaddr_l = 0;
885 static int uec_issue_init_enet_rxtx_cmd(struct uec_priv *uec,
886 int thread_tx, int thread_rx)
888 struct uec_init_cmd_pram *p_init_enet_param;
889 u32 init_enet_param_offset;
890 struct uec_inf *uec_info;
891 struct ucc_fast_inf *uf_info;
899 uec_info = uec->uec_info;
900 uf_info = &uec_info->uf_info;
902 /* Allocate init enet command parameter */
903 uec->init_enet_param_offset =
904 qe_muram_alloc(sizeof(struct uec_init_cmd_pram), 4);
905 init_enet_param_offset = uec->init_enet_param_offset;
906 uec->p_init_enet_param = (struct uec_init_cmd_pram *)
907 qe_muram_addr(uec->init_enet_param_offset);
909 /* Zero init enet command struct */
910 memset((void *)uec->p_init_enet_param, 0,
911 sizeof(struct uec_init_cmd_pram));
913 /* Init the command struct */
914 p_init_enet_param = uec->p_init_enet_param;
915 p_init_enet_param->resinit0 = ENET_INIT_PARAM_MAGIC_RES_INIT0;
916 p_init_enet_param->resinit1 = ENET_INIT_PARAM_MAGIC_RES_INIT1;
917 p_init_enet_param->resinit2 = ENET_INIT_PARAM_MAGIC_RES_INIT2;
918 p_init_enet_param->resinit3 = ENET_INIT_PARAM_MAGIC_RES_INIT3;
919 p_init_enet_param->resinit4 = ENET_INIT_PARAM_MAGIC_RES_INIT4;
920 p_init_enet_param->largestexternallookupkeysize = 0;
922 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_rx)
923 << ENET_INIT_PARAM_RGF_SHIFT;
924 p_init_enet_param->rgftgfrxglobal |= ((u32)uec_info->num_threads_tx)
925 << ENET_INIT_PARAM_TGF_SHIFT;
927 /* Init Rx global parameter pointer */
928 p_init_enet_param->rgftgfrxglobal |= uec->rx_glbl_pram_offset |
929 (u32)uec_info->risc_rx;
931 /* Init Rx threads */
932 for (i = 0; i < (thread_rx + 1); i++) {
933 snum = qe_get_snum();
935 printf("%s can not get snum\n", __func__);
942 off = qe_muram_alloc(sizeof(struct uec_thread_rx_pram),
943 UEC_THREAD_RX_PRAM_ALIGNMENT);
946 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
947 off | (u32)uec_info->risc_rx;
948 p_init_enet_param->rxthread[i] = entry_val;
951 /* Init Tx global parameter pointer */
952 p_init_enet_param->txglobal = uec->tx_glbl_pram_offset |
953 (u32)uec_info->risc_tx;
955 /* Init Tx threads */
956 for (i = 0; i < thread_tx; i++) {
957 snum = qe_get_snum();
959 printf("%s can not get snum\n", __func__);
963 off = qe_muram_alloc(sizeof(struct uec_thread_tx_pram),
964 UEC_THREAD_TX_PRAM_ALIGNMENT);
966 entry_val = ((u32)snum << ENET_INIT_PARAM_SNUM_SHIFT) |
967 off | (u32)uec_info->risc_tx;
968 p_init_enet_param->txthread[i] = entry_val;
971 __asm__ __volatile__("sync");
973 /* Issue QE command */
974 command = QE_INIT_TX_RX;
975 cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
976 qe_issue_cmd(command, cecr_subblock, (u8)QE_CR_PROTOCOL_ETHERNET,
977 init_enet_param_offset);
982 static int uec_startup(struct uec_priv *uec)
984 struct uec_inf *uec_info;
985 struct ucc_fast_inf *uf_info;
986 struct ucc_fast_priv *uccf;
994 struct buffer_descriptor *bd;
998 if (!uec || !uec->uec_info) {
999 printf("%s: uec or uec_info not initial\n", __func__);
1003 uec_info = uec->uec_info;
1004 uf_info = &uec_info->uf_info;
1006 /* Check if Rx BD ring len is illegal */
1007 if (uec_info->rx_bd_ring_len < UEC_RX_BD_RING_SIZE_MIN ||
1008 (uec_info->rx_bd_ring_len % UEC_RX_BD_RING_SIZE_ALIGNMENT)) {
1009 printf("%s: Rx BD ring len must be multiple of 4, and > 8.\n",
1014 /* Check if Tx BD ring len is illegal */
1015 if (uec_info->tx_bd_ring_len < UEC_TX_BD_RING_SIZE_MIN) {
1016 printf("%s: Tx BD ring length must not be smaller than 2.\n",
1021 /* Check if MRBLR is illegal */
1022 if (MAX_RXBUF_LEN == 0 || MAX_RXBUF_LEN % UEC_MRBLR_ALIGNMENT) {
1023 printf("%s: max rx buffer length must be mutliple of 128.\n",
1028 /* Both Rx and Tx are stopped */
1029 uec->grace_stopped_rx = 1;
1030 uec->grace_stopped_tx = 1;
1033 if (ucc_fast_init(uf_info, &uccf)) {
1034 printf("%s: failed to init ucc fast\n", __func__);
1041 /* Convert the Tx threads number */
1042 if (uec_convert_threads_num(uec_info->num_threads_tx,
1047 /* Convert the Rx threads number */
1048 if (uec_convert_threads_num(uec_info->num_threads_rx,
1053 uf_regs = uccf->uf_regs;
1055 /* UEC register is following UCC fast registers */
1056 uec_regs = (uec_t *)(&uf_regs->ucc_eth);
1058 /* Save the UEC register pointer to UEC private struct */
1059 uec->uec_regs = uec_regs;
1061 /* Init UPSMR, enable hardware statistics (UCC) */
1062 out_be32(&uec->uccf->uf_regs->upsmr, UPSMR_INIT_VALUE);
1064 /* Init MACCFG1, flow control disable, disable Tx and Rx */
1065 out_be32(&uec_regs->maccfg1, MACCFG1_INIT_VALUE);
1067 /* Init MACCFG2, length check, MAC PAD and CRC enable */
1068 out_be32(&uec_regs->maccfg2, MACCFG2_INIT_VALUE);
1070 /* Setup MAC interface mode */
1071 uec_set_mac_if_mode(uec, uec_info->enet_interface_type,
1074 /* Setup MII management base */
1075 #ifndef CONFIG_eTSEC_MDIO_BUS
1076 uec->uec_mii_regs = (uec_mii_t *)(&uec_regs->miimcfg);
1078 uec->uec_mii_regs = (uec_mii_t *)CONFIG_MIIM_ADDRESS;
1081 /* Setup MII master clock source */
1082 qe_set_mii_clk_src(uec_info->uf_info.ucc_num);
1085 utbipar = in_be32(&uec_regs->utbipar);
1086 utbipar &= ~UTBIPAR_PHY_ADDRESS_MASK;
1088 /* Initialize UTBIPAR address to CONFIG_UTBIPAR_INIT_TBIPA for ALL UEC.
1089 * This frees up the remaining SMI addresses for use.
1091 utbipar |= CONFIG_UTBIPAR_INIT_TBIPA << UTBIPAR_PHY_ADDRESS_SHIFT;
1092 out_be32(&uec_regs->utbipar, utbipar);
1094 /* Configure the TBI for SGMII operation */
1095 if (uec->uec_info->enet_interface_type == PHY_INTERFACE_MODE_SGMII &&
1096 uec->uec_info->speed == SPEED_1000) {
1097 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1098 ENET_TBI_MII_ANA, TBIANA_SETTINGS);
1100 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1101 ENET_TBI_MII_TBICON, TBICON_CLK_SELECT);
1103 uec_write_phy_reg(uec->dev, uec_regs->utbipar,
1104 ENET_TBI_MII_CR, TBICR_SETTINGS);
1107 /* Allocate Tx BDs */
1108 length = ((uec_info->tx_bd_ring_len * SIZEOFBD) /
1109 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) *
1110 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1111 if ((uec_info->tx_bd_ring_len * SIZEOFBD) %
1112 UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) {
1113 length += UEC_TX_BD_RING_SIZE_MEMORY_ALIGNMENT;
1116 align = UEC_TX_BD_RING_ALIGNMENT;
1117 uec->tx_bd_ring_offset = (u32)malloc((u32)(length + align));
1118 if (uec->tx_bd_ring_offset != 0) {
1119 uec->p_tx_bd_ring = (u8 *)((uec->tx_bd_ring_offset + align)
1123 /* Zero all of Tx BDs */
1124 memset((void *)(uec->tx_bd_ring_offset), 0, length + align);
1126 /* Allocate Rx BDs */
1127 length = uec_info->rx_bd_ring_len * SIZEOFBD;
1128 align = UEC_RX_BD_RING_ALIGNMENT;
1129 uec->rx_bd_ring_offset = (u32)(malloc((u32)(length + align)));
1130 if (uec->rx_bd_ring_offset != 0) {
1131 uec->p_rx_bd_ring = (u8 *)((uec->rx_bd_ring_offset + align)
1135 /* Zero all of Rx BDs */
1136 memset((void *)(uec->rx_bd_ring_offset), 0, length + align);
1138 /* Allocate Rx buffer */
1139 length = uec_info->rx_bd_ring_len * MAX_RXBUF_LEN;
1140 align = UEC_RX_DATA_BUF_ALIGNMENT;
1141 uec->rx_buf_offset = (u32)malloc(length + align);
1142 if (uec->rx_buf_offset != 0) {
1143 uec->p_rx_buf = (u8 *)((uec->rx_buf_offset + align)
1147 /* Zero all of the Rx buffer */
1148 memset((void *)(uec->rx_buf_offset), 0, length + align);
1150 /* Init TxBD ring */
1151 bd = (struct buffer_descriptor *)uec->p_tx_bd_ring;
1154 for (i = 0; i < uec_info->tx_bd_ring_len; i++) {
1156 BD_STATUS_SET(bd, 0);
1157 BD_LENGTH_SET(bd, 0);
1160 BD_STATUS_SET((--bd), TX_BD_WRAP);
1162 /* Init RxBD ring */
1163 bd = (struct buffer_descriptor *)uec->p_rx_bd_ring;
1165 buf = uec->p_rx_buf;
1166 for (i = 0; i < uec_info->rx_bd_ring_len; i++) {
1167 BD_DATA_SET(bd, buf);
1168 BD_LENGTH_SET(bd, 0);
1169 BD_STATUS_SET(bd, RX_BD_EMPTY);
1170 buf += MAX_RXBUF_LEN;
1173 BD_STATUS_SET((--bd), RX_BD_WRAP | RX_BD_EMPTY);
1175 /* Init global Tx parameter RAM */
1176 uec_init_tx_parameter(uec, num_threads_tx);
1178 /* Init global Rx parameter RAM */
1179 uec_init_rx_parameter(uec, num_threads_rx);
1181 /* Init ethernet Tx and Rx parameter command */
1182 if (uec_issue_init_enet_rxtx_cmd(uec, num_threads_tx,
1184 printf("%s issue init enet cmd failed\n", __func__);
1191 static int uec_init(struct eth_device *dev, struct bd_info *bd)
1193 struct uec_priv *uec;
1195 struct phy_info *curphy;
1196 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
1197 ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR);
1200 uec = (struct uec_priv *)dev->priv;
1202 if (!uec->the_first_run) {
1203 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
1205 * QE9 and QE12 need to be set for enabling QE MII
1206 * management signals
1208 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE9);
1209 setbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1212 err = init_phy(dev);
1214 printf("%s: Cannot initialize PHY, aborting.\n",
1219 curphy = uec->mii_info->phyinfo;
1221 if (curphy->config_aneg) {
1222 err = curphy->config_aneg(uec->mii_info);
1224 printf("%s: Can't negotiate PHY\n", dev->name);
1229 /* Give PHYs up to 5 sec to report a link */
1232 err = curphy->read_status(uec->mii_info);
1233 if (!(((i-- > 0) && !uec->mii_info->link) || err))
1238 #if defined(CONFIG_ARCH_P1021) || defined(CONFIG_ARCH_P1025)
1239 /* QE12 needs to be released for enabling LBCTL signal*/
1240 clrbits_be32(&gur->pmuxcr, MPC85xx_PMUXCR_QE12);
1244 printf("warning: %s: timeout on PHY link\n", dev->name);
1247 uec->the_first_run = 1;
1250 /* Set up the MAC address */
1251 if (dev->enetaddr[0] & 0x01) {
1252 printf("%s: MacAddress is multcast address\n",
1256 uec_set_mac_address(uec, dev->enetaddr);
1258 err = uec_open(uec, COMM_DIR_RX_AND_TX);
1260 printf("%s: cannot enable UEC device\n", dev->name);
1266 return uec->mii_info->link ? 0 : -1;
1269 static void uec_halt(struct eth_device *dev)
1271 struct uec_priv *uec = (struct uec_priv *)dev->priv;
1273 uec_stop(uec, COMM_DIR_RX_AND_TX);
1276 static int uec_send(struct eth_device *dev, void *buf, int len)
1278 struct uec_priv *uec;
1279 struct ucc_fast_priv *uccf;
1280 struct buffer_descriptor *bd;
1285 uec = (struct uec_priv *)dev->priv;
1289 /* Find an empty TxBD */
1290 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
1292 printf("%s: tx buffer not ready\n", dev->name);
1298 BD_DATA_SET(bd, buf);
1299 BD_LENGTH_SET(bd, len);
1300 status = BD_STATUS(bd);
1302 status |= (TX_BD_READY | TX_BD_LAST);
1303 BD_STATUS_SET(bd, status);
1305 /* Tell UCC to transmit the buffer */
1306 ucc_fast_transmit_on_demand(uccf);
1308 /* Wait for buffer to be transmitted */
1309 for (i = 0; BD_STATUS(bd) & TX_BD_READY; i++) {
1311 printf("%s: tx error\n", dev->name);
1316 /* Ok, the buffer be transimitted */
1317 BD_ADVANCE(bd, status, uec->p_tx_bd_ring);
1324 static int uec_recv(struct eth_device *dev)
1326 struct uec_priv *uec = dev->priv;
1327 struct buffer_descriptor *bd;
1333 status = BD_STATUS(bd);
1335 while (!(status & RX_BD_EMPTY)) {
1336 if (!(status & RX_BD_ERROR)) {
1338 len = BD_LENGTH(bd);
1339 net_process_received_packet(data, len);
1341 printf("%s: Rx error\n", dev->name);
1344 BD_LENGTH_SET(bd, 0);
1345 BD_STATUS_SET(bd, status | RX_BD_EMPTY);
1346 BD_ADVANCE(bd, status, uec->p_rx_bd_ring);
1347 status = BD_STATUS(bd);
1354 int uec_initialize(struct bd_info *bis, struct uec_inf *uec_info)
1356 struct eth_device *dev;
1358 struct uec_priv *uec;
1361 dev = (struct eth_device *)malloc(sizeof(struct eth_device));
1364 memset(dev, 0, sizeof(struct eth_device));
1366 /* Allocate the UEC private struct */
1367 uec = (struct uec_priv *)malloc(sizeof(struct uec_priv));
1371 memset(uec, 0, sizeof(struct uec_priv));
1373 /* Adjust uec_info */
1374 #if (MAX_QE_RISC == 4)
1375 uec_info->risc_tx = QE_RISC_ALLOCATION_FOUR_RISCS;
1376 uec_info->risc_rx = QE_RISC_ALLOCATION_FOUR_RISCS;
1379 devlist[uec_info->uf_info.ucc_num] = dev;
1381 uec->uec_info = uec_info;
1384 sprintf(dev->name, "UEC%d", uec_info->uf_info.ucc_num);
1386 dev->priv = (void *)uec;
1387 dev->init = uec_init;
1388 dev->halt = uec_halt;
1389 dev->send = uec_send;
1390 dev->recv = uec_recv;
1392 /* Clear the ethnet address */
1393 for (i = 0; i < 6; i++)
1394 dev->enetaddr[i] = 0;
1398 err = uec_startup(uec);
1400 printf("%s: Cannot configure net device, aborting.", dev->name);
1404 #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
1406 struct mii_dev *mdiodev = mdio_alloc();
1410 strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
1411 mdiodev->read = uec_miiphy_read;
1412 mdiodev->write = uec_miiphy_write;
1414 retval = mdio_register(mdiodev);
1422 int uec_eth_init(struct bd_info *bis, struct uec_inf *uecs, int num)
1426 for (i = 0; i < num; i++)
1427 uec_initialize(bis, &uecs[i]);
1432 int uec_standard_init(struct bd_info *bis)
1434 return uec_eth_init(bis, uec_info, ARRAY_SIZE(uec_info));