]> Git Repo - J-u-boot.git/blame - drivers/net/mvgbe.c
net: Remove <common.h> and add needed includes
[J-u-boot.git] / drivers / net / mvgbe.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
9131589a
PW
2/*
3 * (C) Copyright 2009
4 * Marvell Semiconductor <www.marvell.com>
5 * Written-by: Prafulla Wadaskar <[email protected]>
6 *
7 * (C) Copyright 2003
8 * Ingo Assmus <[email protected]>
9 *
10 * based on - Driver for MV64360X ethernet ports
11 * Copyright (C) 2002 [email protected]
9131589a
PW
12 */
13
fb731076 14#include <dm.h>
f7ae49fc 15#include <log.h>
9131589a
PW
16#include <net.h>
17#include <malloc.h>
18#include <miiphy.h>
5194ed7e 19#include <wait_bit.h>
401d1c4f 20#include <asm/global_data.h>
a7efd719 21#include <asm/io.h>
c05ed00a 22#include <linux/delay.h>
1221ce45 23#include <linux/errno.h>
9131589a 24#include <asm/types.h>
a7efd719 25#include <asm/system.h>
9131589a 26#include <asm/byteorder.h>
36aaa918 27#include <asm/arch/cpu.h>
d44265ad 28
bb0fb4c0 29#if defined(CONFIG_ARCH_KIRKWOOD)
3dc23f78 30#include <asm/arch/soc.h>
b16a3316 31#elif defined(CONFIG_ARCH_ORION5X)
d3c9ffd0 32#include <asm/arch/orion5x.h>
d44265ad
AA
33#endif
34
9b6bcdcb 35#include "mvgbe.h"
9131589a 36
49fa6ed8
AA
37DECLARE_GLOBAL_DATA_PTR;
38
d44265ad
AA
39#define MV_PHY_ADR_REQUEST 0xee
40#define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
f0f98758 41#define MVGBE_PGADR_REG 22
bb1ca3b2 42
cd3ca3ff 43#if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
5194ed7e
CP
44static int smi_wait_ready(struct mvgbe_device *dmvgbe)
45{
46 int ret;
47
48 ret = wait_for_bit_le32(&MVGBE_SMI_REG, MVGBE_PHY_SMI_BUSY_MASK, false,
49 MVGBE_PHY_SMI_TIMEOUT_MS, false);
50 if (ret) {
51 printf("Error: SMI busy timeout\n");
52 return ret;
53 }
54
55 return 0;
56}
57
e9bf75c9
CP
58static int __mvgbe_mdio_read(struct mvgbe_device *dmvgbe, int phy_adr,
59 int devad, int reg_ofs)
9131589a 60{
d44265ad 61 struct mvgbe_registers *regs = dmvgbe->regs;
9131589a 62 u32 smi_reg;
7b05f5e0 63 u32 timeout;
e9bf75c9 64 u16 data = 0;
9131589a
PW
65
66 /* Phyadr read request */
d44265ad
AA
67 if (phy_adr == MV_PHY_ADR_REQUEST &&
68 reg_ofs == MV_PHY_ADR_REQUEST) {
9131589a 69 /* */
5a49f174
JH
70 data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
71 return data;
9131589a
PW
72 }
73 /* check parameters */
74 if (phy_adr > PHYADR_MASK) {
75 printf("Err..(%s) Invalid PHY address %d\n",
1fd92db8 76 __func__, phy_adr);
9131589a
PW
77 return -EFAULT;
78 }
79 if (reg_ofs > PHYREG_MASK) {
80 printf("Err..(%s) Invalid register offset %d\n",
1fd92db8 81 __func__, reg_ofs);
9131589a
PW
82 return -EFAULT;
83 }
84
9131589a 85 /* wait till the SMI is not busy */
5194ed7e
CP
86 if (smi_wait_ready(dmvgbe) < 0)
87 return -EFAULT;
9131589a
PW
88
89 /* fill the phy address and regiser offset and read opcode */
d44265ad
AA
90 smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
91 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
92 | MVGBE_PHY_SMI_OPCODE_READ;
9131589a
PW
93
94 /* write the smi register */
d44265ad 95 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
9131589a
PW
96
97 /*wait till read value is ready */
d44265ad 98 timeout = MVGBE_PHY_SMI_TIMEOUT;
9131589a
PW
99
100 do {
101 /* read smi register */
d44265ad 102 smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
9131589a
PW
103 if (timeout-- == 0) {
104 printf("Err..(%s) SMI read ready timeout\n",
1fd92db8 105 __func__);
9131589a
PW
106 return -EFAULT;
107 }
d44265ad 108 } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
9131589a
PW
109
110 /* Wait for the data to update in the SMI register */
d44265ad
AA
111 for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
112 ;
9131589a 113
5a49f174 114 data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
9131589a 115
1fd92db8 116 debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
5a49f174 117 data);
9131589a 118
5a49f174 119 return data;
9131589a
PW
120}
121
122/*
e9bf75c9 123 * smi_reg_read - miiphy_read callback function.
9131589a 124 *
e9bf75c9 125 * Returns 16bit phy register value, or -EFAULT on error
9131589a 126 */
e9bf75c9
CP
127static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
128 int reg_ofs)
9131589a 129{
fb731076 130 struct mvgbe_device *dmvgbe = bus->priv;
e9bf75c9
CP
131
132 return __mvgbe_mdio_read(dmvgbe, phy_adr, devad, reg_ofs);
133}
134
135static int __mvgbe_mdio_write(struct mvgbe_device *dmvgbe, int phy_adr,
136 int devad, int reg_ofs, u16 data)
137{
d44265ad 138 struct mvgbe_registers *regs = dmvgbe->regs;
9131589a 139 u32 smi_reg;
9131589a
PW
140
141 /* Phyadr write request*/
d44265ad
AA
142 if (phy_adr == MV_PHY_ADR_REQUEST &&
143 reg_ofs == MV_PHY_ADR_REQUEST) {
144 MVGBE_REG_WR(regs->phyadr, data);
9131589a
PW
145 return 0;
146 }
147
148 /* check parameters */
149 if (phy_adr > PHYADR_MASK) {
1fd92db8 150 printf("Err..(%s) Invalid phy address\n", __func__);
9131589a
PW
151 return -EINVAL;
152 }
153 if (reg_ofs > PHYREG_MASK) {
1fd92db8 154 printf("Err..(%s) Invalid register offset\n", __func__);
5194ed7e 155 return -EFAULT;
9131589a
PW
156 }
157
158 /* wait till the SMI is not busy */
5194ed7e
CP
159 if (smi_wait_ready(dmvgbe) < 0)
160 return -EFAULT;
9131589a
PW
161
162 /* fill the phy addr and reg offset and write opcode and data */
d44265ad
AA
163 smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
164 smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
165 | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
166 smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
9131589a
PW
167
168 /* write the smi register */
d44265ad 169 MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
9131589a
PW
170
171 return 0;
172}
e9bf75c9
CP
173
174/*
175 * smi_reg_write - miiphy_write callback function.
176 *
177 * Returns 0 if write succeed, -EFAULT on error
178 */
179static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
180 int reg_ofs, u16 data)
181{
fb731076 182 struct mvgbe_device *dmvgbe = bus->priv;
e9bf75c9
CP
183
184 return __mvgbe_mdio_write(dmvgbe, phy_adr, devad, reg_ofs, data);
185}
cc79697c 186#endif
9131589a
PW
187
188/* Stop and checks all queues */
189static void stop_queue(u32 * qreg)
190{
191 u32 reg_data;
192
193 reg_data = readl(qreg);
194
195 if (reg_data & 0xFF) {
196 /* Issue stop command for active channels only */
197 writel((reg_data << 8), qreg);
198
199 /* Wait for all queue activity to terminate. */
200 do {
201 /*
202 * Check port cause register that all queues
203 * are stopped
204 */
205 reg_data = readl(qreg);
206 }
207 while (reg_data & 0xFF);
208 }
209}
210
211/*
212 * set_access_control - Config address decode parameters for Ethernet unit
213 *
214 * This function configures the address decode parameters for the Gigabit
215 * Ethernet Controller according the given parameters struct.
216 *
217 * @regs Register struct pointer.
218 * @param Address decode parameter struct.
219 */
d44265ad
AA
220static void set_access_control(struct mvgbe_registers *regs,
221 struct mvgbe_winparam *param)
9131589a
PW
222{
223 u32 access_prot_reg;
224
225 /* Set access control register */
d44265ad 226 access_prot_reg = MVGBE_REG_RD(regs->epap);
9131589a
PW
227 /* clear window permission */
228 access_prot_reg &= (~(3 << (param->win * 2)));
229 access_prot_reg |= (param->access_ctrl << (param->win * 2));
d44265ad 230 MVGBE_REG_WR(regs->epap, access_prot_reg);
9131589a
PW
231
232 /* Set window Size reg (SR) */
d44265ad 233 MVGBE_REG_WR(regs->barsz[param->win].size,
9131589a
PW
234 (((param->size / 0x10000) - 1) << 16));
235
236 /* Set window Base address reg (BA) */
d44265ad 237 MVGBE_REG_WR(regs->barsz[param->win].bar,
9131589a
PW
238 (param->target | param->attrib | param->base_addr));
239 /* High address remap reg (HARR) */
240 if (param->win < 4)
d44265ad 241 MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
9131589a
PW
242
243 /* Base address enable reg (BARER) */
244 if (param->enable == 1)
d44265ad 245 MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
9131589a 246 else
d44265ad 247 MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
9131589a
PW
248}
249
d44265ad 250static void set_dram_access(struct mvgbe_registers *regs)
9131589a 251{
d44265ad 252 struct mvgbe_winparam win_param;
9131589a
PW
253 int i;
254
255 for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
256 /* Set access parameters for DRAM bank i */
257 win_param.win = i; /* Use Ethernet window i */
258 /* Window target - DDR */
d44265ad 259 win_param.target = MVGBE_TARGET_DRAM;
9131589a
PW
260 /* Enable full access */
261 win_param.access_ctrl = EWIN_ACCESS_FULL;
262 win_param.high_addr = 0;
49fa6ed8
AA
263 /* Get bank base and size */
264 win_param.base_addr = gd->bd->bi_dram[i].start;
265 win_param.size = gd->bd->bi_dram[i].size;
9131589a
PW
266 if (win_param.size == 0)
267 win_param.enable = 0;
268 else
269 win_param.enable = 1; /* Enable the access */
270
271 /* Enable DRAM bank */
272 switch (i) {
273 case 0:
274 win_param.attrib = EBAR_DRAM_CS0;
275 break;
276 case 1:
277 win_param.attrib = EBAR_DRAM_CS1;
278 break;
279 case 2:
280 win_param.attrib = EBAR_DRAM_CS2;
281 break;
282 case 3:
283 win_param.attrib = EBAR_DRAM_CS3;
284 break;
285 default:
49fa6ed8 286 /* invalid bank, disable access */
9131589a
PW
287 win_param.enable = 0;
288 win_param.attrib = 0;
289 break;
290 }
291 /* Set the access control for address window(EPAPR) RD/WR */
292 set_access_control(regs, &win_param);
293 }
294}
295
296/*
297 * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
298 *
299 * Go through all the DA filter tables (Unicast, Special Multicast & Other
300 * Multicast) and set each entry to 0.
301 */
d44265ad 302static void port_init_mac_tables(struct mvgbe_registers *regs)
9131589a
PW
303{
304 int table_index;
305
306 /* Clear DA filter unicast table (Ex_dFUT) */
307 for (table_index = 0; table_index < 4; ++table_index)
d44265ad 308 MVGBE_REG_WR(regs->dfut[table_index], 0);
9131589a
PW
309
310 for (table_index = 0; table_index < 64; ++table_index) {
311 /* Clear DA filter special multicast table (Ex_dFSMT) */
d44265ad 312 MVGBE_REG_WR(regs->dfsmt[table_index], 0);
9131589a 313 /* Clear DA filter other multicast table (Ex_dFOMT) */
d44265ad 314 MVGBE_REG_WR(regs->dfomt[table_index], 0);
9131589a
PW
315 }
316}
317
318/*
319 * port_uc_addr - This function Set the port unicast address table
320 *
321 * This function locates the proper entry in the Unicast table for the
322 * specified MAC nibble and sets its properties according to function
323 * parameters.
324 * This function add/removes MAC addresses from the port unicast address
325 * table.
326 *
327 * @uc_nibble Unicast MAC Address last nibble.
328 * @option 0 = Add, 1 = remove address.
329 *
330 * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
331 */
d44265ad 332static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
9131589a
PW
333 int option)
334{
335 u32 unicast_reg;
336 u32 tbl_offset;
337 u32 reg_offset;
338
339 /* Locate the Unicast table entry */
340 uc_nibble = (0xf & uc_nibble);
341 /* Register offset from unicast table base */
342 tbl_offset = (uc_nibble / 4);
343 /* Entry offset within the above register */
344 reg_offset = uc_nibble % 4;
345
346 switch (option) {
347 case REJECT_MAC_ADDR:
348 /*
349 * Clear accepts frame bit at specified unicast
350 * DA table entry
351 */
d44265ad 352 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
9131589a 353 unicast_reg &= (0xFF << (8 * reg_offset));
d44265ad 354 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
9131589a
PW
355 break;
356 case ACCEPT_MAC_ADDR:
357 /* Set accepts frame bit at unicast DA filter table entry */
d44265ad 358 unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
9131589a
PW
359 unicast_reg &= (0xFF << (8 * reg_offset));
360 unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
d44265ad 361 MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
9131589a
PW
362 break;
363 default:
364 return 0;
365 }
366 return 1;
367}
368
369/*
370 * port_uc_addr_set - This function Set the port Unicast address.
371 */
e9bf75c9 372static void port_uc_addr_set(struct mvgbe_device *dmvgbe, u8 *p_addr)
9131589a 373{
e9bf75c9 374 struct mvgbe_registers *regs = dmvgbe->regs;
9131589a
PW
375 u32 mac_h;
376 u32 mac_l;
377
378 mac_l = (p_addr[4] << 8) | (p_addr[5]);
379 mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
380 (p_addr[3] << 0);
381
d44265ad
AA
382 MVGBE_REG_WR(regs->macal, mac_l);
383 MVGBE_REG_WR(regs->macah, mac_h);
9131589a
PW
384
385 /* Accept frames of this address */
386 port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
387}
388
389/*
d44265ad 390 * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
9131589a 391 */
d44265ad 392static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
9131589a 393{
d44265ad 394 struct mvgbe_rxdesc *p_rx_desc;
9131589a
PW
395 int i;
396
397 /* initialize the Rx descriptors ring */
d44265ad 398 p_rx_desc = dmvgbe->p_rxdesc;
9131589a
PW
399 for (i = 0; i < RINGSZ; i++) {
400 p_rx_desc->cmd_sts =
d44265ad 401 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
9131589a
PW
402 p_rx_desc->buf_size = PKTSIZE_ALIGN;
403 p_rx_desc->byte_cnt = 0;
d44265ad 404 p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
9131589a 405 if (i == (RINGSZ - 1))
d44265ad 406 p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
9131589a 407 else {
d44265ad
AA
408 p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
409 ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
9131589a
PW
410 p_rx_desc = p_rx_desc->nxtdesc_p;
411 }
412 }
d44265ad 413 dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
9131589a
PW
414}
415
fb731076
CP
416static int __mvgbe_init(struct mvgbe_device *dmvgbe, u8 *enetaddr,
417 const char *name)
9131589a 418{
d44265ad 419 struct mvgbe_registers *regs = dmvgbe->regs;
9131589a 420 /* setup RX rings */
d44265ad 421 mvgbe_init_rx_desc_ring(dmvgbe);
9131589a
PW
422
423 /* Clear the ethernet port interrupts */
d44265ad
AA
424 MVGBE_REG_WR(regs->ic, 0);
425 MVGBE_REG_WR(regs->ice, 0);
9131589a 426 /* Unmask RX buffer and TX end interrupt */
d44265ad 427 MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
9131589a 428 /* Unmask phy and link status changes interrupts */
d44265ad 429 MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
9131589a
PW
430
431 set_dram_access(regs);
432 port_init_mac_tables(regs);
fb731076 433 port_uc_addr_set(dmvgbe, enetaddr);
9131589a
PW
434
435 /* Assign port configuration and command. */
d44265ad
AA
436 MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
437 MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
438 MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
9131589a
PW
439
440 /* Assign port SDMA configuration */
d44265ad
AA
441 MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
442 MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
443 MVGBE_REG_WR(regs->tqx[0].tqxtbc,
444 (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
9131589a 445 /* Turn off the port/RXUQ bandwidth limitation */
d44265ad 446 MVGBE_REG_WR(regs->pmtu, 0);
9131589a
PW
447
448 /* Set maximum receive buffer to 9700 bytes */
d44265ad
AA
449 MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
450 | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
9131589a 451
f0588fdf 452 /* Enable port initially */
d44265ad 453 MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
f0588fdf 454
9131589a
PW
455 /*
456 * Set ethernet MTU for leaky bucket mechanism to 0 - this will
457 * disable the leaky bucket mechanism .
458 */
d44265ad 459 MVGBE_REG_WR(regs->pmtu, 0);
9131589a
PW
460
461 /* Assignment of Rx CRDB of given RXUQ */
d44265ad 462 MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
c19a20d5
AA
463 /* ensure previous write is done before enabling Rx DMA */
464 isb();
9131589a 465 /* Enable port Rx. */
d44265ad 466 MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
9131589a 467
9131589a
PW
468 return 0;
469}
470
e9bf75c9
CP
471static void __mvgbe_halt(struct mvgbe_device *dmvgbe)
472{
d44265ad 473 struct mvgbe_registers *regs = dmvgbe->regs;
9131589a
PW
474
475 /* Disable all gigE address decoder */
d44265ad 476 MVGBE_REG_WR(regs->bare, 0x3f);
9131589a
PW
477
478 stop_queue(&regs->tqc);
479 stop_queue(&regs->rqc);
480
f0588fdf 481 /* Disable port */
d44265ad 482 MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
9131589a 483 /* Set port is not reset */
d44265ad 484 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
9131589a
PW
485#ifdef CONFIG_SYS_MII_MODE
486 /* Set MMI interface up */
d44265ad 487 MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
9131589a
PW
488#endif
489 /* Disable & mask ethernet port interrupts */
d44265ad
AA
490 MVGBE_REG_WR(regs->ic, 0);
491 MVGBE_REG_WR(regs->ice, 0);
492 MVGBE_REG_WR(regs->pim, 0);
493 MVGBE_REG_WR(regs->peim, 0);
e9bf75c9
CP
494}
495
fb731076
CP
496static int mvgbe_write_hwaddr(struct udevice *dev)
497{
c69cda25 498 struct eth_pdata *pdata = dev_get_plat(dev);
fb731076
CP
499
500 port_uc_addr_set(dev_get_priv(dev), pdata->enetaddr);
501
502 return 0;
503}
b5ce63ed 504
e9bf75c9
CP
505static int __mvgbe_send(struct mvgbe_device *dmvgbe, void *dataptr,
506 int datasize)
9131589a 507{
d44265ad
AA
508 struct mvgbe_registers *regs = dmvgbe->regs;
509 struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
477fa637 510 void *p = (void *)dataptr;
7b05f5e0 511 u32 cmd_sts;
e6e556c1 512 u32 txuq0_reg_addr;
9131589a 513
477fa637 514 /* Copy buffer if it's misaligned */
9131589a 515 if ((u32) dataptr & 0x07) {
477fa637
SK
516 if (datasize > PKTSIZE_ALIGN) {
517 printf("Non-aligned data too large (%d)\n",
518 datasize);
519 return -1;
520 }
521
d44265ad
AA
522 memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
523 p = dmvgbe->p_aligned_txbuf;
9131589a 524 }
477fa637 525
d44265ad
AA
526 p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
527 p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
528 p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
529 p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
477fa637 530 p_txdesc->buf_ptr = (u8 *) p;
9131589a
PW
531 p_txdesc->byte_cnt = datasize;
532
c19a20d5 533 /* Set this tc desc as zeroth TXUQ */
e6e556c1
AG
534 txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
535 writel((u32) p_txdesc, txuq0_reg_addr);
c19a20d5
AA
536
537 /* ensure tx desc writes above are performed before we start Tx DMA */
538 isb();
539
540 /* Apply send command using zeroth TXUQ */
d44265ad 541 MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
9131589a
PW
542
543 /*
544 * wait for packet xmit completion
545 */
7b05f5e0 546 cmd_sts = readl(&p_txdesc->cmd_sts);
d44265ad 547 while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
9131589a 548 /* return fail if error is detected */
d44265ad
AA
549 if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
550 (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
551 cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
1fd92db8 552 printf("Err..(%s) in xmit packet\n", __func__);
9131589a
PW
553 return -1;
554 }
7b05f5e0 555 cmd_sts = readl(&p_txdesc->cmd_sts);
9131589a
PW
556 };
557 return 0;
558}
559
e9bf75c9
CP
560static int __mvgbe_recv(struct mvgbe_device *dmvgbe, uchar **packetp)
561{
d44265ad 562 struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
7b05f5e0
SK
563 u32 cmd_sts;
564 u32 timeout = 0;
e6e556c1 565 u32 rxdesc_curr_addr;
e9bf75c9
CP
566 unsigned char *data;
567 int rx_bytes = 0;
568
569 *packetp = NULL;
9131589a
PW
570
571 /* wait untill rx packet available or timeout */
572 do {
d44265ad 573 if (timeout < MVGBE_PHY_SMI_TIMEOUT)
9131589a
PW
574 timeout++;
575 else {
1fd92db8 576 debug("%s time out...\n", __func__);
9131589a
PW
577 return -1;
578 }
d44265ad 579 } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
9131589a
PW
580
581 if (p_rxdesc_curr->byte_cnt != 0) {
582 debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
1fd92db8 583 __func__, (u32) p_rxdesc_curr->byte_cnt,
9131589a
PW
584 (u32) p_rxdesc_curr->buf_ptr,
585 (u32) p_rxdesc_curr->cmd_sts);
586 }
587
588 /*
589 * In case received a packet without first/last bits on
590 * OR the error summary bit is on,
591 * the packets needs to be dropeed.
592 */
7b05f5e0
SK
593 cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
594
595 if ((cmd_sts &
d44265ad
AA
596 (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
597 != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
9131589a
PW
598
599 printf("Err..(%s) Dropping packet spread on"
1fd92db8 600 " multiple descriptors\n", __func__);
9131589a 601
d44265ad 602 } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
9131589a
PW
603
604 printf("Err..(%s) Dropping packet with errors\n",
1fd92db8 605 __func__);
9131589a
PW
606
607 } else {
608 /* !!! call higher layer processing */
609 debug("%s: Sending Received packet to"
1fd92db8
JH
610 " upper layer (net_process_received_packet)\n",
611 __func__);
9131589a 612
e9bf75c9
CP
613 data = (p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET);
614 rx_bytes = (int)(p_rxdesc_curr->byte_cnt -
615 RX_BUF_OFFSET);
616
617 *packetp = data;
9131589a
PW
618 }
619 /*
620 * free these descriptors and point next in the ring
621 */
622 p_rxdesc_curr->cmd_sts =
d44265ad 623 MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
9131589a
PW
624 p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
625 p_rxdesc_curr->byte_cnt = 0;
626
e6e556c1
AG
627 rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
628 writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
7b05f5e0 629
e9bf75c9
CP
630 return rx_bytes;
631}
632
c7f15a3e 633#if defined(CONFIG_PHYLIB)
fb731076
CP
634static struct phy_device *__mvgbe_phy_init(struct udevice *dev,
635 struct mii_dev *bus,
636 phy_interface_t phy_interface,
637 int phyid)
fb731076
CP
638{
639 struct phy_device *phydev;
640
641 /* Set phy address of the port */
642 miiphy_write(dev->name, MV_PHY_ADR_REQUEST, MV_PHY_ADR_REQUEST,
643 phyid);
644
f0f98758
TD
645 /* Make sure the selected PHY page is 0 before connecting */
646 miiphy_write(dev->name, phyid, MVGBE_PGADR_REG, 0);
647
fb731076
CP
648 phydev = phy_connect(bus, phyid, dev, phy_interface);
649 if (!phydev) {
650 printf("phy_connect failed\n");
651 return NULL;
652 }
653
654 phy_config(phydev);
655 phy_startup(phydev);
656
657 return phydev;
658}
c7f15a3e 659#endif /* CONFIG_PHYLIB */
cd3ca3ff 660
fb731076
CP
661static int mvgbe_alloc_buffers(struct mvgbe_device *dmvgbe)
662{
663 dmvgbe->p_rxdesc = memalign(PKTALIGN,
664 MV_RXQ_DESC_ALIGNED_SIZE * RINGSZ + 1);
665 if (!dmvgbe->p_rxdesc)
666 goto error1;
667
668 dmvgbe->p_rxbuf = memalign(PKTALIGN,
669 RINGSZ * PKTSIZE_ALIGN + 1);
670 if (!dmvgbe->p_rxbuf)
671 goto error2;
672
673 dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
674 if (!dmvgbe->p_aligned_txbuf)
675 goto error3;
676
677 dmvgbe->p_txdesc = memalign(PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
678 if (!dmvgbe->p_txdesc)
679 goto error4;
680
681 return 0;
682
683error4:
684 free(dmvgbe->p_aligned_txbuf);
685error3:
686 free(dmvgbe->p_rxbuf);
687error2:
688 free(dmvgbe->p_rxdesc);
689error1:
690 return -ENOMEM;
691}
692
fb731076
CP
693static int mvgbe_port_is_fixed_link(struct mvgbe_device *dmvgbe)
694{
695 return dmvgbe->phyaddr > PHY_MAX_ADDR;
696}
697
698static int mvgbe_start(struct udevice *dev)
699{
c69cda25 700 struct eth_pdata *pdata = dev_get_plat(dev);
fb731076
CP
701 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
702 int ret;
703
704 ret = __mvgbe_init(dmvgbe, pdata->enetaddr, dev->name);
705 if (ret)
706 return ret;
707
708 if (!mvgbe_port_is_fixed_link(dmvgbe)) {
709 dmvgbe->phydev = __mvgbe_phy_init(dev, dmvgbe->bus,
710 dmvgbe->phy_interface,
711 dmvgbe->phyaddr);
712 if (!dmvgbe->phydev)
713 return -ENODEV;
714 }
715
716 return 0;
717}
718
719static int mvgbe_send(struct udevice *dev, void *packet, int length)
720{
721 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
722
723 return __mvgbe_send(dmvgbe, packet, length);
724}
725
726static int mvgbe_recv(struct udevice *dev, int flags, uchar **packetp)
727{
728 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
729
730 return __mvgbe_recv(dmvgbe, packetp);
731}
732
733static void mvgbe_stop(struct udevice *dev)
734{
735 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
736
737 __mvgbe_halt(dmvgbe);
738}
739
740static int mvgbe_probe(struct udevice *dev)
741{
c69cda25 742 struct eth_pdata *pdata = dev_get_plat(dev);
fb731076
CP
743 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
744 struct mii_dev *bus;
745 int ret;
746
747 ret = mvgbe_alloc_buffers(dmvgbe);
748 if (ret)
749 return ret;
750
751 dmvgbe->regs = (void __iomem *)pdata->iobase;
752
753 bus = mdio_alloc();
754 if (!bus) {
755 printf("Failed to allocate MDIO bus\n");
756 return -ENOMEM;
757 }
758
759 bus->read = smi_reg_read;
760 bus->write = smi_reg_write;
761 snprintf(bus->name, sizeof(bus->name), dev->name);
762 bus->priv = dmvgbe;
763 dmvgbe->bus = bus;
764
765 ret = mdio_register(bus);
766 if (ret < 0)
767 return ret;
768
769 return 0;
770}
771
772static const struct eth_ops mvgbe_ops = {
773 .start = mvgbe_start,
774 .send = mvgbe_send,
775 .recv = mvgbe_recv,
776 .stop = mvgbe_stop,
777 .write_hwaddr = mvgbe_write_hwaddr,
778};
779
d1998a9f 780static int mvgbe_of_to_plat(struct udevice *dev)
fb731076 781{
c69cda25 782 struct eth_pdata *pdata = dev_get_plat(dev);
fb731076
CP
783 struct mvgbe_device *dmvgbe = dev_get_priv(dev);
784 void *blob = (void *)gd->fdt_blob;
785 int node = dev_of_offset(dev);
fb731076
CP
786 int fl_node;
787 int pnode;
788 unsigned long addr;
789
2548493a 790 pdata->iobase = dev_read_addr(dev);
fb731076
CP
791 pdata->phy_interface = -1;
792
793 pnode = fdt_node_offset_by_compatible(blob, node,
794 "marvell,kirkwood-eth-port");
795
796 /* Get phy-mode / phy_interface from DT */
123ca114 797 pdata->phy_interface = dev_read_phy_mode(dev);
ffb0f6f4 798 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
92f129f4 799 pdata->phy_interface = PHY_INTERFACE_MODE_GMII;
fb731076
CP
800
801 dmvgbe->phy_interface = pdata->phy_interface;
802
803 /* fetch 'fixed-link' property */
804 fl_node = fdt_subnode_offset(blob, pnode, "fixed-link");
805 if (fl_node != -FDT_ERR_NOTFOUND) {
806 /* set phy_addr to invalid value for fixed link */
807 dmvgbe->phyaddr = PHY_MAX_ADDR + 1;
808 dmvgbe->duplex = fdtdec_get_bool(blob, fl_node, "full-duplex");
809 dmvgbe->speed = fdtdec_get_int(blob, fl_node, "speed", 0);
810 } else {
811 /* Now read phyaddr from DT */
812 addr = fdtdec_lookup_phandle(blob, pnode, "phy-handle");
813 if (addr > 0)
814 dmvgbe->phyaddr = fdtdec_get_int(blob, addr, "reg", 0);
815 }
816
817 return 0;
818}
819
820static const struct udevice_id mvgbe_ids[] = {
821 { .compatible = "marvell,kirkwood-eth" },
822 { }
823};
824
825U_BOOT_DRIVER(mvgbe) = {
826 .name = "mvgbe",
827 .id = UCLASS_ETH,
828 .of_match = mvgbe_ids,
d1998a9f 829 .of_to_plat = mvgbe_of_to_plat,
fb731076
CP
830 .probe = mvgbe_probe,
831 .ops = &mvgbe_ops,
41575d8e 832 .priv_auto = sizeof(struct mvgbe_device),
caa4daa2 833 .plat_auto = sizeof(struct eth_pdata),
fb731076 834};
This page took 0.663255 seconds and 4 git commands to generate.