]>
Commit | Line | Data |
---|---|---|
ba94a1bb WD |
1 | /* |
2 | * (C) Copyright 2005-2006 | |
3 | * Stefan Roese, DENX Software Engineering, [email protected]. | |
4 | * | |
5 | * See file CREDITS for list of people who contributed to this | |
6 | * project. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License as | |
10 | * published by the Free Software Foundation; either version 2 of | |
11 | * the License, or (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | |
21 | * MA 02111-1307 USA | |
22 | */ | |
23 | ||
24 | #if 0 | |
25 | #define DEBUG /* define for debug output */ | |
26 | #endif | |
27 | ||
28 | #include <config.h> | |
29 | #include <common.h> | |
30 | #include <net.h> | |
31 | #include <miiphy.h> | |
32 | #include <malloc.h> | |
33 | #include <asm/processor.h> | |
34 | #include <asm/arch-ixp/ixp425.h> | |
35 | ||
36 | #include <IxOsal.h> | |
37 | #include <IxEthAcc.h> | |
38 | #include <IxEthDB.h> | |
39 | #include <IxNpeDl.h> | |
40 | #include <IxQMgr.h> | |
41 | #include <IxNpeMh.h> | |
42 | #include <ix_ossl.h> | |
43 | #include <IxFeatureCtrl.h> | |
44 | ||
45 | #include <npe.h> | |
46 | ||
47 | #ifdef CONFIG_IXP4XX_NPE | |
48 | ||
49 | static IxQMgrDispatcherFuncPtr qDispatcherFunc = NULL; | |
50 | static int npe_exists[NPE_NUM_PORTS]; | |
51 | static int npe_used[NPE_NUM_PORTS]; | |
52 | ||
53 | /* A little extra so we can align to cacheline. */ | |
54 | static u8 npe_alloc_pool[NPE_MEM_POOL_SIZE + CFG_CACHELINE_SIZE - 1]; | |
55 | static u8 *npe_alloc_end; | |
56 | static u8 *npe_alloc_free; | |
57 | ||
58 | static void *npe_alloc(int size) | |
59 | { | |
60 | static int count = 0; | |
61 | void *p = NULL; | |
62 | ||
63 | size = (size + (CFG_CACHELINE_SIZE-1)) & ~(CFG_CACHELINE_SIZE-1); | |
64 | count++; | |
65 | ||
66 | if ((npe_alloc_free + size) < npe_alloc_end) { | |
67 | p = npe_alloc_free; | |
68 | npe_alloc_free += size; | |
69 | } else { | |
70 | printf("%s: failed (count=%d, size=%d)!\n", count, size); | |
71 | } | |
72 | return p; | |
73 | } | |
74 | ||
75 | /* Not interrupt safe! */ | |
76 | static void mbuf_enqueue(IX_OSAL_MBUF **q, IX_OSAL_MBUF *new) | |
77 | { | |
78 | IX_OSAL_MBUF *m = *q; | |
79 | ||
80 | IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(new) = NULL; | |
81 | ||
82 | if (m) { | |
83 | while(IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m)) | |
84 | m = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m); | |
85 | IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = new; | |
86 | } else | |
87 | *q = new; | |
88 | } | |
89 | ||
90 | /* Not interrupt safe! */ | |
91 | static IX_OSAL_MBUF *mbuf_dequeue(IX_OSAL_MBUF **q) | |
92 | { | |
93 | IX_OSAL_MBUF *m = *q; | |
94 | if (m) | |
95 | *q = IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m); | |
96 | return m; | |
97 | } | |
98 | ||
99 | static void reset_tx_mbufs(struct npe* p_npe) | |
100 | { | |
101 | IX_OSAL_MBUF *m; | |
102 | int i; | |
103 | ||
104 | p_npe->txQHead = NULL; | |
105 | ||
106 | for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS; i++) { | |
107 | m = &p_npe->tx_mbufs[i]; | |
108 | ||
109 | memset(m, 0, sizeof(*m)); | |
110 | ||
111 | IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->tx_pkts[i * NPE_PKT_SIZE]; | |
112 | IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE; | |
113 | mbuf_enqueue(&p_npe->txQHead, m); | |
114 | } | |
115 | } | |
116 | ||
117 | static void reset_rx_mbufs(struct npe* p_npe) | |
118 | { | |
119 | IX_OSAL_MBUF *m; | |
120 | int i; | |
121 | ||
122 | p_npe->rxQHead = NULL; | |
123 | ||
124 | HAL_DCACHE_INVALIDATE(p_npe->rx_pkts, NPE_PKT_SIZE * | |
125 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS); | |
126 | ||
127 | for (i = 0; i < CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS; i++) { | |
128 | m = &p_npe->rx_mbufs[i]; | |
129 | ||
130 | memset(m, 0, sizeof(*m)); | |
131 | ||
132 | IX_OSAL_MBUF_MDATA(m) = (void *)&p_npe->rx_pkts[i * NPE_PKT_SIZE]; | |
133 | IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE; | |
134 | ||
135 | if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) { | |
136 | printf("ixEthAccPortRxFreeReplenish failed for port %d\n", p_npe->eth_id); | |
137 | break; | |
138 | } | |
139 | } | |
140 | } | |
141 | ||
142 | static void init_rx_mbufs(struct npe* p_npe) | |
143 | { | |
144 | p_npe->rxQHead = NULL; | |
145 | ||
146 | p_npe->rx_pkts = npe_alloc(NPE_PKT_SIZE * | |
147 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS); | |
148 | if (p_npe->rx_pkts == NULL) { | |
149 | printf("alloc of packets failed.\n"); | |
150 | return; | |
151 | } | |
152 | ||
153 | p_npe->rx_mbufs = (IX_OSAL_MBUF *) | |
154 | npe_alloc(sizeof(IX_OSAL_MBUF) * | |
155 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_RX_DESCRIPTORS); | |
156 | if (p_npe->rx_mbufs == NULL) { | |
157 | printf("alloc of mbufs failed.\n"); | |
158 | return; | |
159 | } | |
160 | ||
161 | reset_rx_mbufs(p_npe); | |
162 | } | |
163 | ||
164 | static void init_tx_mbufs(struct npe* p_npe) | |
165 | { | |
166 | p_npe->tx_pkts = npe_alloc(NPE_PKT_SIZE * | |
167 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS); | |
168 | if (p_npe->tx_pkts == NULL) { | |
169 | printf("alloc of packets failed.\n"); | |
170 | return; | |
171 | } | |
172 | ||
173 | p_npe->tx_mbufs = (IX_OSAL_MBUF *) | |
174 | npe_alloc(sizeof(IX_OSAL_MBUF) * | |
175 | CONFIG_DEVS_ETH_INTEL_NPE_MAX_TX_DESCRIPTORS); | |
176 | if (p_npe->tx_mbufs == NULL) { | |
177 | printf("alloc of mbufs failed.\n"); | |
178 | return; | |
179 | } | |
180 | ||
181 | reset_tx_mbufs(p_npe); | |
182 | } | |
183 | ||
184 | /* Convert IX_ETH_PORT_n to IX_NPEMH_NPEID_NPEx */ | |
185 | static int __eth_to_npe(int eth_id) | |
186 | { | |
187 | switch(eth_id) { | |
188 | case IX_ETH_PORT_1: | |
189 | return IX_NPEMH_NPEID_NPEB; | |
190 | ||
191 | case IX_ETH_PORT_2: | |
192 | return IX_NPEMH_NPEID_NPEC; | |
193 | ||
194 | case IX_ETH_PORT_3: | |
195 | return IX_NPEMH_NPEID_NPEA; | |
196 | } | |
197 | return 0; | |
198 | } | |
199 | ||
200 | /* Poll the CSR machinery. */ | |
201 | static void npe_poll(int eth_id) | |
202 | { | |
203 | if (qDispatcherFunc != NULL) { | |
204 | ixNpeMhMessagesReceive(__eth_to_npe(eth_id)); | |
205 | (*qDispatcherFunc)(IX_QMGR_QUELOW_GROUP); | |
206 | } | |
207 | } | |
208 | ||
209 | /* ethAcc RX callback */ | |
210 | static void npe_rx_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid) | |
211 | { | |
212 | struct npe* p_npe = (struct npe *)cbTag; | |
213 | ||
214 | if (IX_OSAL_MBUF_MLEN(m) > 0) { | |
215 | mbuf_enqueue(&p_npe->rxQHead, m); | |
216 | ||
217 | if (p_npe->rx_write == ((p_npe->rx_read-1) & (PKTBUFSRX-1))) { | |
218 | debug("Rx overflow: rx_write=%d rx_read=%d\n", | |
219 | p_npe->rx_write, p_npe->rx_read); | |
220 | } else { | |
221 | debug("Received message #%d (len=%d)\n", p_npe->rx_write, | |
222 | IX_OSAL_MBUF_MLEN(m)); | |
223 | memcpy((void *)NetRxPackets[p_npe->rx_write], IX_OSAL_MBUF_MDATA(m), | |
224 | IX_OSAL_MBUF_MLEN(m)); | |
225 | p_npe->rx_len[p_npe->rx_write] = IX_OSAL_MBUF_MLEN(m); | |
226 | p_npe->rx_write++; | |
227 | if (p_npe->rx_write == PKTBUFSRX) | |
228 | p_npe->rx_write = 0; | |
229 | ||
230 | #ifdef CONFIG_PRINT_RX_FRAMES | |
231 | { | |
232 | u8 *ptr = IX_OSAL_MBUF_MDATA(m); | |
233 | int i; | |
234 | ||
235 | for (i=0; i<60; i++) { | |
236 | debug("%02x ", *ptr++); | |
237 | } | |
238 | debug("\n"); | |
239 | } | |
240 | #endif | |
241 | } | |
242 | ||
243 | m = mbuf_dequeue(&p_npe->rxQHead); | |
244 | } else { | |
245 | debug("Received frame with length 0!!!\n"); | |
246 | m = mbuf_dequeue(&p_npe->rxQHead); | |
247 | } | |
248 | ||
249 | /* Now return mbuf to NPE */ | |
250 | IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE; | |
251 | IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL; | |
252 | IX_OSAL_MBUF_FLAGS(m) = 0; | |
253 | ||
254 | if(ixEthAccPortRxFreeReplenish(p_npe->eth_id, m) != IX_SUCCESS) { | |
255 | debug("npe_rx_callback: Error returning mbuf.\n"); | |
256 | } | |
257 | } | |
258 | ||
259 | /* ethAcc TX callback */ | |
260 | static void npe_tx_callback(u32 cbTag, IX_OSAL_MBUF *m) | |
261 | { | |
262 | struct npe* p_npe = (struct npe *)cbTag; | |
263 | ||
264 | debug("%s\n", __FUNCTION__); | |
265 | ||
266 | IX_OSAL_MBUF_MLEN(m) = IX_OSAL_MBUF_PKT_LEN(m) = NPE_PKT_SIZE; | |
267 | IX_OSAL_MBUF_NEXT_BUFFER_IN_PKT_PTR(m) = NULL; | |
268 | IX_OSAL_MBUF_FLAGS(m) = 0; | |
269 | ||
270 | mbuf_enqueue(&p_npe->txQHead, m); | |
271 | } | |
272 | ||
273 | ||
274 | static int npe_set_mac_address(struct eth_device *dev) | |
275 | { | |
276 | struct npe *p_npe = (struct npe *)dev->priv; | |
277 | IxEthAccMacAddr npeMac; | |
278 | ||
279 | debug("%s\n", __FUNCTION__); | |
280 | ||
281 | /* Set MAC address */ | |
282 | memcpy(npeMac.macAddress, dev->enetaddr, 6); | |
283 | ||
284 | if (ixEthAccPortUnicastMacAddressSet(p_npe->eth_id, &npeMac) != IX_ETH_ACC_SUCCESS) { | |
285 | printf("Error setting unicast address! %02x:%02x:%02x:%02x:%02x:%02x\n", | |
286 | npeMac.macAddress[0], npeMac.macAddress[1], | |
287 | npeMac.macAddress[2], npeMac.macAddress[3], | |
288 | npeMac.macAddress[4], npeMac.macAddress[5]); | |
289 | return 0; | |
290 | } | |
291 | ||
292 | return 1; | |
293 | } | |
294 | ||
295 | /* Boot-time CSR library initialization. */ | |
296 | static int npe_csr_load(void) | |
297 | { | |
298 | int i; | |
299 | ||
300 | if (ixQMgrInit() != IX_SUCCESS) { | |
301 | debug("Error initialising queue manager!\n"); | |
302 | return 0; | |
303 | } | |
304 | ||
305 | ixQMgrDispatcherLoopGet(&qDispatcherFunc); | |
306 | ||
307 | if(ixNpeMhInitialize(IX_NPEMH_NPEINTERRUPTS_YES) != IX_SUCCESS) { | |
308 | printf("Error initialising NPE Message handler!\n"); | |
309 | return 0; | |
310 | } | |
311 | ||
312 | if (npe_used[IX_ETH_PORT_1] && npe_exists[IX_ETH_PORT_1] && | |
313 | ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS) | |
314 | != IX_SUCCESS) { | |
315 | printf("Error downloading firmware to NPE-B!\n"); | |
316 | return 0; | |
317 | } | |
318 | ||
319 | if (npe_used[IX_ETH_PORT_2] && npe_exists[IX_ETH_PORT_2] && | |
320 | ixNpeDlNpeInitAndStart(IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS) | |
321 | != IX_SUCCESS) { | |
322 | printf("Error downloading firmware to NPE-C!\n"); | |
323 | return 0; | |
324 | } | |
325 | ||
326 | /* don't need this for U-Boot */ | |
327 | ixFeatureCtrlSwConfigurationWrite(IX_FEATURECTRL_ETH_LEARNING, FALSE); | |
328 | ||
329 | if (ixEthAccInit() != IX_ETH_ACC_SUCCESS) { | |
330 | printf("Error initialising Ethernet access driver!\n"); | |
331 | return 0; | |
332 | } | |
333 | ||
334 | for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) { | |
335 | if (!npe_used[i] || !npe_exists[i]) | |
336 | continue; | |
337 | if (ixEthAccPortInit(i) != IX_ETH_ACC_SUCCESS) { | |
338 | printf("Error initialising Ethernet port%d!\n", i); | |
339 | } | |
340 | if (ixEthAccTxSchedulingDisciplineSet(i, FIFO_NO_PRIORITY) != IX_ETH_ACC_SUCCESS) { | |
341 | printf("Error setting scheduling discipline for port %d.\n", i); | |
342 | } | |
343 | if (ixEthAccPortRxFrameAppendFCSDisable(i) != IX_ETH_ACC_SUCCESS) { | |
344 | printf("Error disabling RX FCS for port %d.\n", i); | |
345 | } | |
346 | if (ixEthAccPortTxFrameAppendFCSEnable(i) != IX_ETH_ACC_SUCCESS) { | |
347 | printf("Error enabling TX FCS for port %d.\n", i); | |
348 | } | |
349 | } | |
350 | ||
351 | return 1; | |
352 | } | |
353 | ||
354 | static int npe_init(struct eth_device *dev, bd_t * bis) | |
355 | { | |
356 | struct npe *p_npe = (struct npe *)dev->priv; | |
357 | int i; | |
358 | u16 reg_short; | |
359 | int speed; | |
360 | int duplex; | |
361 | ||
362 | debug("%s: 1\n", __FUNCTION__); | |
363 | ||
364 | miiphy_read (dev->name, p_npe->phy_no, PHY_BMSR, ®_short); | |
365 | ||
366 | /* | |
367 | * Wait if PHY is capable of autonegotiation and autonegotiation is not complete | |
368 | */ | |
369 | if ((reg_short & PHY_BMSR_AUTN_ABLE) && !(reg_short & PHY_BMSR_AUTN_COMP)) { | |
370 | puts ("Waiting for PHY auto negotiation to complete"); | |
371 | i = 0; | |
372 | while (!(reg_short & PHY_BMSR_AUTN_COMP)) { | |
373 | /* | |
374 | * Timeout reached ? | |
375 | */ | |
376 | if (i > PHY_AUTONEGOTIATE_TIMEOUT) { | |
377 | puts (" TIMEOUT !\n"); | |
378 | break; | |
379 | } | |
380 | ||
381 | if ((i++ % 1000) == 0) { | |
382 | putc ('.'); | |
383 | miiphy_read (dev->name, p_npe->phy_no, PHY_BMSR, ®_short); | |
384 | } | |
385 | udelay (1000); /* 1 ms */ | |
386 | } | |
387 | puts (" done\n"); | |
388 | udelay (500000); /* another 500 ms (results in faster booting) */ | |
389 | } | |
390 | ||
391 | speed = miiphy_speed (dev->name, p_npe->phy_no); | |
392 | duplex = miiphy_duplex (dev->name, p_npe->phy_no); | |
393 | ||
394 | if (p_npe->print_speed) { | |
395 | p_npe->print_speed = 0; | |
396 | printf ("ENET Speed is %d Mbps - %s duplex connection\n", | |
397 | (int) speed, (duplex == HALF) ? "HALF" : "FULL"); | |
398 | } | |
399 | ||
400 | npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool); | |
401 | npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool + | |
402 | CFG_CACHELINE_SIZE - 1) & ~(CFG_CACHELINE_SIZE - 1)); | |
403 | ||
404 | /* initialize mbuf pool */ | |
405 | init_rx_mbufs(p_npe); | |
406 | init_tx_mbufs(p_npe); | |
407 | ||
408 | if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_callback, | |
409 | (u32)p_npe) != IX_ETH_ACC_SUCCESS) { | |
410 | printf("can't register RX callback!\n"); | |
422b1a01 | 411 | return -1; |
ba94a1bb WD |
412 | } |
413 | ||
414 | if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_callback, | |
415 | (u32)p_npe) != IX_ETH_ACC_SUCCESS) { | |
416 | printf("can't register TX callback!\n"); | |
422b1a01 | 417 | return -1; |
ba94a1bb WD |
418 | } |
419 | ||
420 | npe_set_mac_address(dev); | |
421 | ||
422 | if (ixEthAccPortEnable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) { | |
423 | printf("can't enable port!\n"); | |
422b1a01 | 424 | return -1; |
ba94a1bb WD |
425 | } |
426 | ||
427 | p_npe->active = 1; | |
428 | ||
422b1a01 | 429 | return 0; |
ba94a1bb WD |
430 | } |
431 | ||
432 | #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */ | |
433 | /* Uninitialize CSR library. */ | |
434 | static void npe_csr_unload(void) | |
435 | { | |
436 | ixEthAccUnload(); | |
437 | ixEthDBUnload(); | |
438 | ixNpeMhUnload(); | |
439 | ixQMgrUnload(); | |
440 | } | |
441 | ||
442 | /* callback which is used by ethAcc to recover RX buffers when stopping */ | |
443 | static void npe_rx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m, IxEthAccPortId portid) | |
444 | { | |
445 | debug("%s\n", __FUNCTION__); | |
446 | } | |
447 | ||
448 | /* callback which is used by ethAcc to recover TX buffers when stopping */ | |
449 | static void npe_tx_stop_callback(u32 cbTag, IX_OSAL_MBUF *m) | |
450 | { | |
451 | debug("%s\n", __FUNCTION__); | |
452 | } | |
453 | #endif | |
454 | ||
455 | static void npe_halt(struct eth_device *dev) | |
456 | { | |
457 | struct npe *p_npe = (struct npe *)dev->priv; | |
458 | int i; | |
459 | ||
460 | debug("%s\n", __FUNCTION__); | |
461 | ||
462 | /* Delay to give time for recovery of mbufs */ | |
463 | for (i = 0; i < 100; i++) { | |
464 | npe_poll(p_npe->eth_id); | |
465 | udelay(100); | |
466 | } | |
467 | ||
468 | #if 0 /* test-only: probably have to deal with it when booting linux (for a clean state) */ | |
469 | if (ixEthAccPortRxCallbackRegister(p_npe->eth_id, npe_rx_stop_callback, | |
470 | (u32)p_npe) != IX_ETH_ACC_SUCCESS) { | |
471 | debug("Error registering rx callback!\n"); | |
472 | } | |
473 | ||
474 | if (ixEthAccPortTxDoneCallbackRegister(p_npe->eth_id, npe_tx_stop_callback, | |
475 | (u32)p_npe) != IX_ETH_ACC_SUCCESS) { | |
476 | debug("Error registering tx callback!\n"); | |
477 | } | |
478 | ||
479 | if (ixEthAccPortDisable(p_npe->eth_id) != IX_ETH_ACC_SUCCESS) { | |
480 | debug("npe_stop: Error disabling NPEB!\n"); | |
481 | } | |
482 | ||
483 | /* Delay to give time for recovery of mbufs */ | |
484 | for (i = 0; i < 100; i++) { | |
485 | npe_poll(p_npe->eth_id); | |
486 | udelay(10000); | |
487 | } | |
488 | ||
489 | /* | |
490 | * For U-Boot only, we are probably launching Linux or other OS that | |
491 | * needs a clean slate for its NPE library. | |
492 | */ | |
493 | #if 0 /* test-only */ | |
494 | for (i = 0; i < IX_ETH_ACC_NUMBER_OF_PORTS; i++) { | |
495 | if (npe_used[i] && npe_exists[i]) | |
496 | if (ixNpeDlNpeStopAndReset(__eth_to_npe(i)) != IX_SUCCESS) | |
497 | printf("Failed to stop and reset NPE B.\n"); | |
498 | } | |
499 | #endif | |
500 | ||
501 | #endif | |
502 | p_npe->active = 0; | |
503 | } | |
504 | ||
505 | ||
506 | static int npe_send(struct eth_device *dev, volatile void *packet, int len) | |
507 | { | |
508 | struct npe *p_npe = (struct npe *)dev->priv; | |
509 | u8 *dest; | |
510 | int err; | |
511 | IX_OSAL_MBUF *m; | |
512 | ||
513 | debug("%s\n", __FUNCTION__); | |
514 | m = mbuf_dequeue(&p_npe->txQHead); | |
515 | dest = IX_OSAL_MBUF_MDATA(m); | |
516 | IX_OSAL_MBUF_PKT_LEN(m) = IX_OSAL_MBUF_MLEN(m) = len; | |
517 | IX_OSAL_MBUF_NEXT_PKT_IN_CHAIN_PTR(m) = NULL; | |
518 | ||
519 | memcpy(dest, (char *)packet, len); | |
520 | ||
521 | if ((err = ixEthAccPortTxFrameSubmit(p_npe->eth_id, m, IX_ETH_ACC_TX_DEFAULT_PRIORITY)) | |
522 | != IX_ETH_ACC_SUCCESS) { | |
523 | printf("npe_send: Can't submit frame. err[%d]\n", err); | |
524 | mbuf_enqueue(&p_npe->txQHead, m); | |
525 | return 0; | |
526 | } | |
527 | ||
528 | #ifdef DEBUG_PRINT_TX_FRAMES | |
529 | { | |
530 | u8 *ptr = IX_OSAL_MBUF_MDATA(m); | |
531 | int i; | |
532 | ||
533 | for (i=0; i<IX_OSAL_MBUF_MLEN(m); i++) { | |
534 | printf("%02x ", *ptr++); | |
535 | } | |
536 | printf(" (tx-len=%d)\n", IX_OSAL_MBUF_MLEN(m)); | |
537 | } | |
538 | #endif | |
539 | ||
540 | npe_poll(p_npe->eth_id); | |
541 | ||
542 | return len; | |
543 | } | |
544 | ||
545 | static int npe_rx(struct eth_device *dev) | |
546 | { | |
547 | struct npe *p_npe = (struct npe *)dev->priv; | |
548 | ||
549 | debug("%s\n", __FUNCTION__); | |
550 | npe_poll(p_npe->eth_id); | |
551 | ||
552 | debug("%s: rx_write=%d rx_read=%d\n", __FUNCTION__, p_npe->rx_write, p_npe->rx_read); | |
553 | while (p_npe->rx_write != p_npe->rx_read) { | |
554 | debug("Reading message #%d\n", p_npe->rx_read); | |
555 | NetReceive(NetRxPackets[p_npe->rx_read], p_npe->rx_len[p_npe->rx_read]); | |
556 | p_npe->rx_read++; | |
557 | if (p_npe->rx_read == PKTBUFSRX) | |
558 | p_npe->rx_read = 0; | |
559 | } | |
560 | ||
561 | return 0; | |
562 | } | |
563 | ||
564 | int npe_initialize(bd_t * bis) | |
565 | { | |
566 | static int virgin = 0; | |
567 | struct eth_device *dev; | |
568 | int eth_num = 0; | |
569 | struct npe *p_npe = NULL; | |
570 | ||
571 | for (eth_num = 0; eth_num < CFG_NPE_NUMS; eth_num++) { | |
572 | ||
573 | /* See if we can actually bring up the interface, otherwise, skip it */ | |
574 | switch (eth_num) { | |
575 | default: /* fall through */ | |
576 | case 0: | |
577 | if (memcmp (bis->bi_enetaddr, "\0\0\0\0\0\0", 6) == 0) { | |
578 | continue; | |
579 | } | |
580 | break; | |
581 | #ifdef CONFIG_HAS_ETH1 | |
582 | case 1: | |
583 | if (memcmp (bis->bi_enet1addr, "\0\0\0\0\0\0", 6) == 0) { | |
584 | continue; | |
585 | } | |
586 | break; | |
587 | #endif | |
588 | } | |
589 | ||
590 | /* Allocate device structure */ | |
591 | dev = (struct eth_device *)malloc(sizeof(*dev)); | |
592 | if (dev == NULL) { | |
593 | printf ("%s: Cannot allocate eth_device %d\n", __FUNCTION__, eth_num); | |
594 | return -1; | |
595 | } | |
596 | memset(dev, 0, sizeof(*dev)); | |
597 | ||
598 | /* Allocate our private use data */ | |
599 | p_npe = (struct npe *)malloc(sizeof(struct npe)); | |
600 | if (p_npe == NULL) { | |
601 | printf("%s: Cannot allocate private hw data for eth_device %d", | |
602 | __FUNCTION__, eth_num); | |
603 | free(dev); | |
604 | return -1; | |
605 | } | |
606 | memset(p_npe, 0, sizeof(struct npe)); | |
607 | ||
608 | switch (eth_num) { | |
609 | default: /* fall through */ | |
610 | case 0: | |
611 | memcpy(dev->enetaddr, bis->bi_enetaddr, 6); | |
612 | p_npe->eth_id = 0; | |
613 | p_npe->phy_no = CONFIG_PHY_ADDR; | |
614 | break; | |
615 | ||
616 | #ifdef CONFIG_HAS_ETH1 | |
617 | case 1: | |
618 | memcpy(dev->enetaddr, bis->bi_enet1addr, 6); | |
619 | p_npe->eth_id = 1; | |
620 | p_npe->phy_no = CONFIG_PHY1_ADDR; | |
621 | break; | |
622 | #endif | |
623 | } | |
624 | ||
625 | sprintf(dev->name, "NPE%d", eth_num); | |
626 | dev->priv = (void *)p_npe; | |
627 | dev->init = npe_init; | |
628 | dev->halt = npe_halt; | |
629 | dev->send = npe_send; | |
630 | dev->recv = npe_rx; | |
631 | ||
632 | p_npe->print_speed = 1; | |
633 | ||
634 | if (0 == virgin) { | |
635 | virgin = 1; | |
636 | ||
637 | if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP42X) { | |
638 | switch (ixFeatureCtrlProductIdRead() & IX_FEATURE_CTRL_SILICON_STEPPING_MASK) { | |
639 | case IX_FEATURE_CTRL_SILICON_TYPE_B0: | |
640 | /* | |
641 | * If it is B0 Silicon, we only enable port when its corresponding | |
642 | * Eth Coprocessor is available. | |
643 | */ | |
644 | if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) == | |
645 | IX_FEATURE_CTRL_COMPONENT_ENABLED) | |
646 | npe_exists[IX_ETH_PORT_1] = TRUE; | |
647 | ||
648 | if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) == | |
649 | IX_FEATURE_CTRL_COMPONENT_ENABLED) | |
650 | npe_exists[IX_ETH_PORT_2] = TRUE; | |
651 | break; | |
652 | case IX_FEATURE_CTRL_SILICON_TYPE_A0: | |
653 | /* | |
654 | * If it is A0 Silicon, we enable both as both Eth Coprocessors | |
655 | * are available. | |
656 | */ | |
657 | npe_exists[IX_ETH_PORT_1] = TRUE; | |
658 | npe_exists[IX_ETH_PORT_2] = TRUE; | |
659 | break; | |
660 | } | |
661 | } else if (ixFeatureCtrlDeviceRead() == IX_FEATURE_CTRL_DEVICE_TYPE_IXP46X) { | |
662 | if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH0) == | |
663 | IX_FEATURE_CTRL_COMPONENT_ENABLED) | |
664 | npe_exists[IX_ETH_PORT_1] = TRUE; | |
665 | ||
666 | if (ixFeatureCtrlComponentCheck(IX_FEATURECTRL_ETH1) == | |
667 | IX_FEATURE_CTRL_COMPONENT_ENABLED) | |
668 | npe_exists[IX_ETH_PORT_2] = TRUE; | |
669 | } | |
670 | ||
671 | npe_used[IX_ETH_PORT_1] = 1; | |
672 | npe_used[IX_ETH_PORT_2] = 1; | |
673 | ||
674 | npe_alloc_end = npe_alloc_pool + sizeof(npe_alloc_pool); | |
675 | npe_alloc_free = (u8 *)(((unsigned)npe_alloc_pool + | |
676 | CFG_CACHELINE_SIZE - 1) | |
677 | & ~(CFG_CACHELINE_SIZE - 1)); | |
678 | ||
679 | if (!npe_csr_load()) | |
680 | return 0; | |
681 | } | |
682 | ||
683 | eth_register(dev); | |
684 | ||
3a1ed1e1 | 685 | #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) |
ba94a1bb WD |
686 | miiphy_register(dev->name, npe_miiphy_read, npe_miiphy_write); |
687 | #endif | |
688 | ||
689 | } /* end for each supported device */ | |
690 | ||
691 | return 1; | |
692 | } | |
693 | ||
694 | #endif /* CONFIG_IXP4XX_NPE */ |