]>
Commit | Line | Data |
---|---|---|
ce973b14 | 1 | /* |
047584ce | 2 | * Copyright (C) 2006-2009 Freescale Semicondutor, Inc. All rights reserved. |
ce973b14 LY |
3 | * |
4 | * Author: Shlomi Gridish <[email protected]> | |
18a8e864 | 5 | * Li Yang <[email protected]> |
ce973b14 LY |
6 | * |
7 | * Description: | |
8 | * QE UCC Gigabit Ethernet Driver | |
9 | * | |
ce973b14 LY |
10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the | |
12 | * Free Software Foundation; either version 2 of the License, or (at your | |
13 | * option) any later version. | |
14 | */ | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/errno.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/stddef.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/netdevice.h> | |
22 | #include <linux/etherdevice.h> | |
23 | #include <linux/skbuff.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/mm.h> | |
ce973b14 | 26 | #include <linux/dma-mapping.h> |
ce973b14 | 27 | #include <linux/mii.h> |
728de4c9 | 28 | #include <linux/phy.h> |
df19b6b0 | 29 | #include <linux/workqueue.h> |
0b9da337 | 30 | #include <linux/of_mdio.h> |
55b6c8e9 | 31 | #include <linux/of_platform.h> |
ce973b14 LY |
32 | |
33 | #include <asm/uaccess.h> | |
34 | #include <asm/irq.h> | |
35 | #include <asm/io.h> | |
36 | #include <asm/immap_qe.h> | |
37 | #include <asm/qe.h> | |
38 | #include <asm/ucc.h> | |
39 | #include <asm/ucc_fast.h> | |
40 | ||
41 | #include "ucc_geth.h" | |
1577ecef | 42 | #include "fsl_pq_mdio.h" |
ce973b14 LY |
43 | |
44 | #undef DEBUG | |
45 | ||
ce973b14 LY |
46 | #define ugeth_printk(level, format, arg...) \ |
47 | printk(level format "\n", ## arg) | |
48 | ||
49 | #define ugeth_dbg(format, arg...) \ | |
50 | ugeth_printk(KERN_DEBUG , format , ## arg) | |
51 | #define ugeth_err(format, arg...) \ | |
52 | ugeth_printk(KERN_ERR , format , ## arg) | |
53 | #define ugeth_info(format, arg...) \ | |
54 | ugeth_printk(KERN_INFO , format , ## arg) | |
55 | #define ugeth_warn(format, arg...) \ | |
56 | ugeth_printk(KERN_WARNING , format , ## arg) | |
57 | ||
58 | #ifdef UGETH_VERBOSE_DEBUG | |
59 | #define ugeth_vdbg ugeth_dbg | |
60 | #else | |
61 | #define ugeth_vdbg(fmt, args...) do { } while (0) | |
62 | #endif /* UGETH_VERBOSE_DEBUG */ | |
890de95e | 63 | #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 |
ce973b14 | 64 | |
88a15f2e | 65 | |
ce973b14 LY |
66 | static DEFINE_SPINLOCK(ugeth_lock); |
67 | ||
890de95e LY |
68 | static struct { |
69 | u32 msg_enable; | |
70 | } debug = { -1 }; | |
71 | ||
72 | module_param_named(debug, debug.msg_enable, int, 0); | |
73 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); | |
74 | ||
18a8e864 | 75 | static struct ucc_geth_info ugeth_primary_info = { |
ce973b14 LY |
76 | .uf_info = { |
77 | .bd_mem_part = MEM_PART_SYSTEM, | |
78 | .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, | |
79 | .max_rx_buf_length = 1536, | |
728de4c9 | 80 | /* adjusted at startup if max-speed 1000 */ |
ce973b14 LY |
81 | .urfs = UCC_GETH_URFS_INIT, |
82 | .urfet = UCC_GETH_URFET_INIT, | |
83 | .urfset = UCC_GETH_URFSET_INIT, | |
84 | .utfs = UCC_GETH_UTFS_INIT, | |
85 | .utfet = UCC_GETH_UTFET_INIT, | |
86 | .utftt = UCC_GETH_UTFTT_INIT, | |
ce973b14 LY |
87 | .ufpt = 256, |
88 | .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, | |
89 | .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, | |
90 | .tenc = UCC_FAST_TX_ENCODING_NRZ, | |
91 | .renc = UCC_FAST_RX_ENCODING_NRZ, | |
92 | .tcrc = UCC_FAST_16_BIT_CRC, | |
93 | .synl = UCC_FAST_SYNC_LEN_NOT_USED, | |
94 | }, | |
95 | .numQueuesTx = 1, | |
96 | .numQueuesRx = 1, | |
97 | .extendedFilteringChainPointer = ((uint32_t) NULL), | |
98 | .typeorlen = 3072 /*1536 */ , | |
99 | .nonBackToBackIfgPart1 = 0x40, | |
100 | .nonBackToBackIfgPart2 = 0x60, | |
101 | .miminumInterFrameGapEnforcement = 0x50, | |
102 | .backToBackInterFrameGap = 0x60, | |
103 | .mblinterval = 128, | |
104 | .nortsrbytetime = 5, | |
105 | .fracsiz = 1, | |
106 | .strictpriorityq = 0xff, | |
107 | .altBebTruncation = 0xa, | |
108 | .excessDefer = 1, | |
109 | .maxRetransmission = 0xf, | |
110 | .collisionWindow = 0x37, | |
111 | .receiveFlowControl = 1, | |
ac421852 | 112 | .transmitFlowControl = 1, |
ce973b14 LY |
113 | .maxGroupAddrInHash = 4, |
114 | .maxIndAddrInHash = 4, | |
115 | .prel = 7, | |
116 | .maxFrameLength = 1518, | |
117 | .minFrameLength = 64, | |
118 | .maxD1Length = 1520, | |
119 | .maxD2Length = 1520, | |
120 | .vlantype = 0x8100, | |
121 | .ecamptr = ((uint32_t) NULL), | |
122 | .eventRegMask = UCCE_OTHER, | |
123 | .pausePeriod = 0xf000, | |
124 | .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, | |
125 | .bdRingLenTx = { | |
126 | TX_BD_RING_LEN, | |
127 | TX_BD_RING_LEN, | |
128 | TX_BD_RING_LEN, | |
129 | TX_BD_RING_LEN, | |
130 | TX_BD_RING_LEN, | |
131 | TX_BD_RING_LEN, | |
132 | TX_BD_RING_LEN, | |
133 | TX_BD_RING_LEN}, | |
134 | ||
135 | .bdRingLenRx = { | |
136 | RX_BD_RING_LEN, | |
137 | RX_BD_RING_LEN, | |
138 | RX_BD_RING_LEN, | |
139 | RX_BD_RING_LEN, | |
140 | RX_BD_RING_LEN, | |
141 | RX_BD_RING_LEN, | |
142 | RX_BD_RING_LEN, | |
143 | RX_BD_RING_LEN}, | |
144 | ||
145 | .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, | |
146 | .largestexternallookupkeysize = | |
147 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, | |
ac421852 LY |
148 | .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | |
149 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | | |
150 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, | |
ce973b14 LY |
151 | .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, |
152 | .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, | |
153 | .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, | |
154 | .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, | |
155 | .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, | |
ffea31ed JT |
156 | .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, |
157 | .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, | |
ce973b14 LY |
158 | .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, |
159 | .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | |
160 | }; | |
161 | ||
18a8e864 | 162 | static struct ucc_geth_info ugeth_info[8]; |
ce973b14 LY |
163 | |
164 | #ifdef DEBUG | |
165 | static void mem_disp(u8 *addr, int size) | |
166 | { | |
167 | u8 *i; | |
168 | int size16Aling = (size >> 4) << 4; | |
169 | int size4Aling = (size >> 2) << 2; | |
170 | int notAlign = 0; | |
171 | if (size % 16) | |
172 | notAlign = 1; | |
173 | ||
174 | for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) | |
175 | printk("0x%08x: %08x %08x %08x %08x\r\n", | |
176 | (u32) i, | |
177 | *((u32 *) (i)), | |
178 | *((u32 *) (i + 4)), | |
179 | *((u32 *) (i + 8)), *((u32 *) (i + 12))); | |
180 | if (notAlign == 1) | |
181 | printk("0x%08x: ", (u32) i); | |
182 | for (; (u32) i < (u32) addr + size4Aling; i += 4) | |
183 | printk("%08x ", *((u32 *) (i))); | |
184 | for (; (u32) i < (u32) addr + size; i++) | |
185 | printk("%02x", *((u8 *) (i))); | |
186 | if (notAlign == 1) | |
187 | printk("\r\n"); | |
188 | } | |
189 | #endif /* DEBUG */ | |
190 | ||
ce973b14 LY |
191 | static struct list_head *dequeue(struct list_head *lh) |
192 | { | |
193 | unsigned long flags; | |
194 | ||
1083cfe1 | 195 | spin_lock_irqsave(&ugeth_lock, flags); |
ce973b14 LY |
196 | if (!list_empty(lh)) { |
197 | struct list_head *node = lh->next; | |
198 | list_del(node); | |
1083cfe1 | 199 | spin_unlock_irqrestore(&ugeth_lock, flags); |
ce973b14 LY |
200 | return node; |
201 | } else { | |
1083cfe1 | 202 | spin_unlock_irqrestore(&ugeth_lock, flags); |
ce973b14 LY |
203 | return NULL; |
204 | } | |
205 | } | |
206 | ||
6fee40e9 AF |
207 | static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, |
208 | u8 __iomem *bd) | |
ce973b14 LY |
209 | { |
210 | struct sk_buff *skb = NULL; | |
211 | ||
50f238fd AV |
212 | skb = __skb_dequeue(&ugeth->rx_recycle); |
213 | if (!skb) | |
214 | skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + | |
215 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | |
ce973b14 LY |
216 | if (skb == NULL) |
217 | return NULL; | |
218 | ||
219 | /* We need the data buffer to be aligned properly. We will reserve | |
220 | * as many bytes as needed to align the data properly | |
221 | */ | |
222 | skb_reserve(skb, | |
223 | UCC_GETH_RX_DATA_BUF_ALIGNMENT - | |
224 | (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - | |
225 | 1))); | |
226 | ||
da1aa63e | 227 | skb->dev = ugeth->ndev; |
ce973b14 | 228 | |
6fee40e9 | 229 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
da1aa63e | 230 | dma_map_single(ugeth->dev, |
ce973b14 LY |
231 | skb->data, |
232 | ugeth->ug_info->uf_info.max_rx_buf_length + | |
233 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | |
234 | DMA_FROM_DEVICE)); | |
235 | ||
6fee40e9 AF |
236 | out_be32((u32 __iomem *)bd, |
237 | (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); | |
ce973b14 LY |
238 | |
239 | return skb; | |
240 | } | |
241 | ||
18a8e864 | 242 | static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) |
ce973b14 | 243 | { |
6fee40e9 | 244 | u8 __iomem *bd; |
ce973b14 LY |
245 | u32 bd_status; |
246 | struct sk_buff *skb; | |
247 | int i; | |
248 | ||
249 | bd = ugeth->p_rx_bd_ring[rxQ]; | |
250 | i = 0; | |
251 | ||
252 | do { | |
6fee40e9 | 253 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
254 | skb = get_new_skb(ugeth, bd); |
255 | ||
256 | if (!skb) /* If can not allocate data buffer, | |
257 | abort. Cleanup will be elsewhere */ | |
258 | return -ENOMEM; | |
259 | ||
260 | ugeth->rx_skbuff[rxQ][i] = skb; | |
261 | ||
262 | /* advance the BD pointer */ | |
18a8e864 | 263 | bd += sizeof(struct qe_bd); |
ce973b14 LY |
264 | i++; |
265 | } while (!(bd_status & R_W)); | |
266 | ||
267 | return 0; | |
268 | } | |
269 | ||
18a8e864 | 270 | static int fill_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 271 | u32 *p_start, |
ce973b14 LY |
272 | u8 num_entries, |
273 | u32 thread_size, | |
274 | u32 thread_alignment, | |
345f8422 | 275 | unsigned int risc, |
ce973b14 LY |
276 | int skip_page_for_first_entry) |
277 | { | |
278 | u32 init_enet_offset; | |
279 | u8 i; | |
280 | int snum; | |
281 | ||
282 | for (i = 0; i < num_entries; i++) { | |
283 | if ((snum = qe_get_snum()) < 0) { | |
890de95e LY |
284 | if (netif_msg_ifup(ugeth)) |
285 | ugeth_err("fill_init_enet_entries: Can not get SNUM."); | |
ce973b14 LY |
286 | return snum; |
287 | } | |
288 | if ((i == 0) && skip_page_for_first_entry) | |
289 | /* First entry of Rx does not have page */ | |
290 | init_enet_offset = 0; | |
291 | else { | |
292 | init_enet_offset = | |
293 | qe_muram_alloc(thread_size, thread_alignment); | |
4c35630c | 294 | if (IS_ERR_VALUE(init_enet_offset)) { |
890de95e LY |
295 | if (netif_msg_ifup(ugeth)) |
296 | ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); | |
ce973b14 LY |
297 | qe_put_snum((u8) snum); |
298 | return -ENOMEM; | |
299 | } | |
300 | } | |
301 | *(p_start++) = | |
302 | ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset | |
303 | | risc; | |
304 | } | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
18a8e864 | 309 | static int return_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 310 | u32 *p_start, |
ce973b14 | 311 | u8 num_entries, |
345f8422 | 312 | unsigned int risc, |
ce973b14 LY |
313 | int skip_page_for_first_entry) |
314 | { | |
315 | u32 init_enet_offset; | |
316 | u8 i; | |
317 | int snum; | |
318 | ||
319 | for (i = 0; i < num_entries; i++) { | |
6fee40e9 AF |
320 | u32 val = *p_start; |
321 | ||
ce973b14 LY |
322 | /* Check that this entry was actually valid -- |
323 | needed in case failed in allocations */ | |
6fee40e9 | 324 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { |
ce973b14 | 325 | snum = |
6fee40e9 | 326 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> |
ce973b14 LY |
327 | ENET_INIT_PARAM_SNUM_SHIFT; |
328 | qe_put_snum((u8) snum); | |
329 | if (!((i == 0) && skip_page_for_first_entry)) { | |
330 | /* First entry of Rx does not have page */ | |
331 | init_enet_offset = | |
6fee40e9 | 332 | (val & ENET_INIT_PARAM_PTR_MASK); |
ce973b14 LY |
333 | qe_muram_free(init_enet_offset); |
334 | } | |
6fee40e9 | 335 | *p_start++ = 0; |
ce973b14 LY |
336 | } |
337 | } | |
338 | ||
339 | return 0; | |
340 | } | |
341 | ||
342 | #ifdef DEBUG | |
18a8e864 | 343 | static int dump_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 344 | u32 __iomem *p_start, |
ce973b14 LY |
345 | u8 num_entries, |
346 | u32 thread_size, | |
345f8422 | 347 | unsigned int risc, |
ce973b14 LY |
348 | int skip_page_for_first_entry) |
349 | { | |
350 | u32 init_enet_offset; | |
351 | u8 i; | |
352 | int snum; | |
353 | ||
354 | for (i = 0; i < num_entries; i++) { | |
6fee40e9 AF |
355 | u32 val = in_be32(p_start); |
356 | ||
ce973b14 LY |
357 | /* Check that this entry was actually valid -- |
358 | needed in case failed in allocations */ | |
6fee40e9 | 359 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { |
ce973b14 | 360 | snum = |
6fee40e9 | 361 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> |
ce973b14 LY |
362 | ENET_INIT_PARAM_SNUM_SHIFT; |
363 | qe_put_snum((u8) snum); | |
364 | if (!((i == 0) && skip_page_for_first_entry)) { | |
365 | /* First entry of Rx does not have page */ | |
366 | init_enet_offset = | |
367 | (in_be32(p_start) & | |
368 | ENET_INIT_PARAM_PTR_MASK); | |
369 | ugeth_info("Init enet entry %d:", i); | |
370 | ugeth_info("Base address: 0x%08x", | |
371 | (u32) | |
372 | qe_muram_addr(init_enet_offset)); | |
373 | mem_disp(qe_muram_addr(init_enet_offset), | |
374 | thread_size); | |
375 | } | |
376 | p_start++; | |
377 | } | |
378 | } | |
379 | ||
380 | return 0; | |
381 | } | |
382 | #endif | |
383 | ||
18a8e864 | 384 | static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) |
ce973b14 LY |
385 | { |
386 | kfree(enet_addr_cont); | |
387 | } | |
388 | ||
df19b6b0 | 389 | static void set_mac_addr(__be16 __iomem *reg, u8 *mac) |
18a8e864 LY |
390 | { |
391 | out_be16(®[0], ((u16)mac[5] << 8) | mac[4]); | |
392 | out_be16(®[1], ((u16)mac[3] << 8) | mac[2]); | |
393 | out_be16(®[2], ((u16)mac[1] << 8) | mac[0]); | |
394 | } | |
395 | ||
18a8e864 | 396 | static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) |
ce973b14 | 397 | { |
6fee40e9 | 398 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
ce973b14 LY |
399 | |
400 | if (!(paddr_num < NUM_OF_PADDRS)) { | |
b39d66a8 | 401 | ugeth_warn("%s: Illagel paddr_num.", __func__); |
ce973b14 LY |
402 | return -EINVAL; |
403 | } | |
404 | ||
405 | p_82xx_addr_filt = | |
6fee40e9 | 406 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> |
ce973b14 LY |
407 | addressfiltering; |
408 | ||
409 | /* Writing address ff.ff.ff.ff.ff.ff disables address | |
410 | recognition for this register */ | |
411 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); | |
412 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); | |
413 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); | |
414 | ||
415 | return 0; | |
416 | } | |
417 | ||
18a8e864 LY |
418 | static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, |
419 | u8 *p_enet_addr) | |
ce973b14 | 420 | { |
6fee40e9 | 421 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
ce973b14 LY |
422 | u32 cecr_subblock; |
423 | ||
424 | p_82xx_addr_filt = | |
6fee40e9 | 425 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> |
ce973b14 LY |
426 | addressfiltering; |
427 | ||
428 | cecr_subblock = | |
429 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
430 | ||
431 | /* Ethernet frames are defined in Little Endian mode, | |
432 | therefor to insert */ | |
433 | /* the address to the hash (Big Endian mode), we reverse the bytes.*/ | |
18a8e864 LY |
434 | |
435 | set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); | |
ce973b14 LY |
436 | |
437 | qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, | |
18a8e864 | 438 | QE_CR_PROTOCOL_ETHERNET, 0); |
ce973b14 LY |
439 | } |
440 | ||
18a8e864 | 441 | static inline int compare_addr(u8 **addr1, u8 **addr2) |
ce973b14 LY |
442 | { |
443 | return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); | |
444 | } | |
445 | ||
446 | #ifdef DEBUG | |
18a8e864 LY |
447 | static void get_statistics(struct ucc_geth_private *ugeth, |
448 | struct ucc_geth_tx_firmware_statistics * | |
ce973b14 | 449 | tx_firmware_statistics, |
18a8e864 | 450 | struct ucc_geth_rx_firmware_statistics * |
ce973b14 | 451 | rx_firmware_statistics, |
18a8e864 | 452 | struct ucc_geth_hardware_statistics *hardware_statistics) |
ce973b14 | 453 | { |
6fee40e9 AF |
454 | struct ucc_fast __iomem *uf_regs; |
455 | struct ucc_geth __iomem *ug_regs; | |
18a8e864 LY |
456 | struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; |
457 | struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; | |
ce973b14 LY |
458 | |
459 | ug_regs = ugeth->ug_regs; | |
6fee40e9 | 460 | uf_regs = (struct ucc_fast __iomem *) ug_regs; |
ce973b14 LY |
461 | p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; |
462 | p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; | |
463 | ||
464 | /* Tx firmware only if user handed pointer and driver actually | |
465 | gathers Tx firmware statistics */ | |
466 | if (tx_firmware_statistics && p_tx_fw_statistics_pram) { | |
467 | tx_firmware_statistics->sicoltx = | |
468 | in_be32(&p_tx_fw_statistics_pram->sicoltx); | |
469 | tx_firmware_statistics->mulcoltx = | |
470 | in_be32(&p_tx_fw_statistics_pram->mulcoltx); | |
471 | tx_firmware_statistics->latecoltxfr = | |
472 | in_be32(&p_tx_fw_statistics_pram->latecoltxfr); | |
473 | tx_firmware_statistics->frabortduecol = | |
474 | in_be32(&p_tx_fw_statistics_pram->frabortduecol); | |
475 | tx_firmware_statistics->frlostinmactxer = | |
476 | in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); | |
477 | tx_firmware_statistics->carriersenseertx = | |
478 | in_be32(&p_tx_fw_statistics_pram->carriersenseertx); | |
479 | tx_firmware_statistics->frtxok = | |
480 | in_be32(&p_tx_fw_statistics_pram->frtxok); | |
481 | tx_firmware_statistics->txfrexcessivedefer = | |
482 | in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); | |
483 | tx_firmware_statistics->txpkts256 = | |
484 | in_be32(&p_tx_fw_statistics_pram->txpkts256); | |
485 | tx_firmware_statistics->txpkts512 = | |
486 | in_be32(&p_tx_fw_statistics_pram->txpkts512); | |
487 | tx_firmware_statistics->txpkts1024 = | |
488 | in_be32(&p_tx_fw_statistics_pram->txpkts1024); | |
489 | tx_firmware_statistics->txpktsjumbo = | |
490 | in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); | |
491 | } | |
492 | ||
493 | /* Rx firmware only if user handed pointer and driver actually | |
494 | * gathers Rx firmware statistics */ | |
495 | if (rx_firmware_statistics && p_rx_fw_statistics_pram) { | |
496 | int i; | |
497 | rx_firmware_statistics->frrxfcser = | |
498 | in_be32(&p_rx_fw_statistics_pram->frrxfcser); | |
499 | rx_firmware_statistics->fraligner = | |
500 | in_be32(&p_rx_fw_statistics_pram->fraligner); | |
501 | rx_firmware_statistics->inrangelenrxer = | |
502 | in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); | |
503 | rx_firmware_statistics->outrangelenrxer = | |
504 | in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); | |
505 | rx_firmware_statistics->frtoolong = | |
506 | in_be32(&p_rx_fw_statistics_pram->frtoolong); | |
507 | rx_firmware_statistics->runt = | |
508 | in_be32(&p_rx_fw_statistics_pram->runt); | |
509 | rx_firmware_statistics->verylongevent = | |
510 | in_be32(&p_rx_fw_statistics_pram->verylongevent); | |
511 | rx_firmware_statistics->symbolerror = | |
512 | in_be32(&p_rx_fw_statistics_pram->symbolerror); | |
513 | rx_firmware_statistics->dropbsy = | |
514 | in_be32(&p_rx_fw_statistics_pram->dropbsy); | |
515 | for (i = 0; i < 0x8; i++) | |
516 | rx_firmware_statistics->res0[i] = | |
517 | p_rx_fw_statistics_pram->res0[i]; | |
518 | rx_firmware_statistics->mismatchdrop = | |
519 | in_be32(&p_rx_fw_statistics_pram->mismatchdrop); | |
520 | rx_firmware_statistics->underpkts = | |
521 | in_be32(&p_rx_fw_statistics_pram->underpkts); | |
522 | rx_firmware_statistics->pkts256 = | |
523 | in_be32(&p_rx_fw_statistics_pram->pkts256); | |
524 | rx_firmware_statistics->pkts512 = | |
525 | in_be32(&p_rx_fw_statistics_pram->pkts512); | |
526 | rx_firmware_statistics->pkts1024 = | |
527 | in_be32(&p_rx_fw_statistics_pram->pkts1024); | |
528 | rx_firmware_statistics->pktsjumbo = | |
529 | in_be32(&p_rx_fw_statistics_pram->pktsjumbo); | |
530 | rx_firmware_statistics->frlossinmacer = | |
531 | in_be32(&p_rx_fw_statistics_pram->frlossinmacer); | |
532 | rx_firmware_statistics->pausefr = | |
533 | in_be32(&p_rx_fw_statistics_pram->pausefr); | |
534 | for (i = 0; i < 0x4; i++) | |
535 | rx_firmware_statistics->res1[i] = | |
536 | p_rx_fw_statistics_pram->res1[i]; | |
537 | rx_firmware_statistics->removevlan = | |
538 | in_be32(&p_rx_fw_statistics_pram->removevlan); | |
539 | rx_firmware_statistics->replacevlan = | |
540 | in_be32(&p_rx_fw_statistics_pram->replacevlan); | |
541 | rx_firmware_statistics->insertvlan = | |
542 | in_be32(&p_rx_fw_statistics_pram->insertvlan); | |
543 | } | |
544 | ||
545 | /* Hardware only if user handed pointer and driver actually | |
546 | gathers hardware statistics */ | |
3bc53427 TT |
547 | if (hardware_statistics && |
548 | (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { | |
ce973b14 LY |
549 | hardware_statistics->tx64 = in_be32(&ug_regs->tx64); |
550 | hardware_statistics->tx127 = in_be32(&ug_regs->tx127); | |
551 | hardware_statistics->tx255 = in_be32(&ug_regs->tx255); | |
552 | hardware_statistics->rx64 = in_be32(&ug_regs->rx64); | |
553 | hardware_statistics->rx127 = in_be32(&ug_regs->rx127); | |
554 | hardware_statistics->rx255 = in_be32(&ug_regs->rx255); | |
555 | hardware_statistics->txok = in_be32(&ug_regs->txok); | |
556 | hardware_statistics->txcf = in_be16(&ug_regs->txcf); | |
557 | hardware_statistics->tmca = in_be32(&ug_regs->tmca); | |
558 | hardware_statistics->tbca = in_be32(&ug_regs->tbca); | |
559 | hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); | |
560 | hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); | |
561 | hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); | |
562 | hardware_statistics->rmca = in_be32(&ug_regs->rmca); | |
563 | hardware_statistics->rbca = in_be32(&ug_regs->rbca); | |
564 | } | |
565 | } | |
566 | ||
18a8e864 | 567 | static void dump_bds(struct ucc_geth_private *ugeth) |
ce973b14 LY |
568 | { |
569 | int i; | |
570 | int length; | |
571 | ||
572 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | |
573 | if (ugeth->p_tx_bd_ring[i]) { | |
574 | length = | |
575 | (ugeth->ug_info->bdRingLenTx[i] * | |
18a8e864 | 576 | sizeof(struct qe_bd)); |
ce973b14 LY |
577 | ugeth_info("TX BDs[%d]", i); |
578 | mem_disp(ugeth->p_tx_bd_ring[i], length); | |
579 | } | |
580 | } | |
581 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
582 | if (ugeth->p_rx_bd_ring[i]) { | |
583 | length = | |
584 | (ugeth->ug_info->bdRingLenRx[i] * | |
18a8e864 | 585 | sizeof(struct qe_bd)); |
ce973b14 LY |
586 | ugeth_info("RX BDs[%d]", i); |
587 | mem_disp(ugeth->p_rx_bd_ring[i], length); | |
588 | } | |
589 | } | |
590 | } | |
591 | ||
18a8e864 | 592 | static void dump_regs(struct ucc_geth_private *ugeth) |
ce973b14 LY |
593 | { |
594 | int i; | |
595 | ||
596 | ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); | |
597 | ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); | |
598 | ||
599 | ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", | |
600 | (u32) & ugeth->ug_regs->maccfg1, | |
601 | in_be32(&ugeth->ug_regs->maccfg1)); | |
602 | ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", | |
603 | (u32) & ugeth->ug_regs->maccfg2, | |
604 | in_be32(&ugeth->ug_regs->maccfg2)); | |
605 | ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", | |
606 | (u32) & ugeth->ug_regs->ipgifg, | |
607 | in_be32(&ugeth->ug_regs->ipgifg)); | |
608 | ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", | |
609 | (u32) & ugeth->ug_regs->hafdup, | |
610 | in_be32(&ugeth->ug_regs->hafdup)); | |
ce973b14 LY |
611 | ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", |
612 | (u32) & ugeth->ug_regs->ifctl, | |
613 | in_be32(&ugeth->ug_regs->ifctl)); | |
614 | ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", | |
615 | (u32) & ugeth->ug_regs->ifstat, | |
616 | in_be32(&ugeth->ug_regs->ifstat)); | |
617 | ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", | |
618 | (u32) & ugeth->ug_regs->macstnaddr1, | |
619 | in_be32(&ugeth->ug_regs->macstnaddr1)); | |
620 | ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", | |
621 | (u32) & ugeth->ug_regs->macstnaddr2, | |
622 | in_be32(&ugeth->ug_regs->macstnaddr2)); | |
623 | ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", | |
624 | (u32) & ugeth->ug_regs->uempr, | |
625 | in_be32(&ugeth->ug_regs->uempr)); | |
626 | ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", | |
627 | (u32) & ugeth->ug_regs->utbipar, | |
628 | in_be32(&ugeth->ug_regs->utbipar)); | |
629 | ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", | |
630 | (u32) & ugeth->ug_regs->uescr, | |
631 | in_be16(&ugeth->ug_regs->uescr)); | |
632 | ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", | |
633 | (u32) & ugeth->ug_regs->tx64, | |
634 | in_be32(&ugeth->ug_regs->tx64)); | |
635 | ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", | |
636 | (u32) & ugeth->ug_regs->tx127, | |
637 | in_be32(&ugeth->ug_regs->tx127)); | |
638 | ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", | |
639 | (u32) & ugeth->ug_regs->tx255, | |
640 | in_be32(&ugeth->ug_regs->tx255)); | |
641 | ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", | |
642 | (u32) & ugeth->ug_regs->rx64, | |
643 | in_be32(&ugeth->ug_regs->rx64)); | |
644 | ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", | |
645 | (u32) & ugeth->ug_regs->rx127, | |
646 | in_be32(&ugeth->ug_regs->rx127)); | |
647 | ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", | |
648 | (u32) & ugeth->ug_regs->rx255, | |
649 | in_be32(&ugeth->ug_regs->rx255)); | |
650 | ugeth_info("txok : addr - 0x%08x, val - 0x%08x", | |
651 | (u32) & ugeth->ug_regs->txok, | |
652 | in_be32(&ugeth->ug_regs->txok)); | |
653 | ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", | |
654 | (u32) & ugeth->ug_regs->txcf, | |
655 | in_be16(&ugeth->ug_regs->txcf)); | |
656 | ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", | |
657 | (u32) & ugeth->ug_regs->tmca, | |
658 | in_be32(&ugeth->ug_regs->tmca)); | |
659 | ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", | |
660 | (u32) & ugeth->ug_regs->tbca, | |
661 | in_be32(&ugeth->ug_regs->tbca)); | |
662 | ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", | |
663 | (u32) & ugeth->ug_regs->rxfok, | |
664 | in_be32(&ugeth->ug_regs->rxfok)); | |
665 | ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", | |
666 | (u32) & ugeth->ug_regs->rxbok, | |
667 | in_be32(&ugeth->ug_regs->rxbok)); | |
668 | ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", | |
669 | (u32) & ugeth->ug_regs->rbyt, | |
670 | in_be32(&ugeth->ug_regs->rbyt)); | |
671 | ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", | |
672 | (u32) & ugeth->ug_regs->rmca, | |
673 | in_be32(&ugeth->ug_regs->rmca)); | |
674 | ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", | |
675 | (u32) & ugeth->ug_regs->rbca, | |
676 | in_be32(&ugeth->ug_regs->rbca)); | |
677 | ugeth_info("scar : addr - 0x%08x, val - 0x%08x", | |
678 | (u32) & ugeth->ug_regs->scar, | |
679 | in_be32(&ugeth->ug_regs->scar)); | |
680 | ugeth_info("scam : addr - 0x%08x, val - 0x%08x", | |
681 | (u32) & ugeth->ug_regs->scam, | |
682 | in_be32(&ugeth->ug_regs->scam)); | |
683 | ||
684 | if (ugeth->p_thread_data_tx) { | |
685 | int numThreadsTxNumerical; | |
686 | switch (ugeth->ug_info->numThreadsTx) { | |
687 | case UCC_GETH_NUM_OF_THREADS_1: | |
688 | numThreadsTxNumerical = 1; | |
689 | break; | |
690 | case UCC_GETH_NUM_OF_THREADS_2: | |
691 | numThreadsTxNumerical = 2; | |
692 | break; | |
693 | case UCC_GETH_NUM_OF_THREADS_4: | |
694 | numThreadsTxNumerical = 4; | |
695 | break; | |
696 | case UCC_GETH_NUM_OF_THREADS_6: | |
697 | numThreadsTxNumerical = 6; | |
698 | break; | |
699 | case UCC_GETH_NUM_OF_THREADS_8: | |
700 | numThreadsTxNumerical = 8; | |
701 | break; | |
702 | default: | |
703 | numThreadsTxNumerical = 0; | |
704 | break; | |
705 | } | |
706 | ||
707 | ugeth_info("Thread data TXs:"); | |
708 | ugeth_info("Base address: 0x%08x", | |
709 | (u32) ugeth->p_thread_data_tx); | |
710 | for (i = 0; i < numThreadsTxNumerical; i++) { | |
711 | ugeth_info("Thread data TX[%d]:", i); | |
712 | ugeth_info("Base address: 0x%08x", | |
713 | (u32) & ugeth->p_thread_data_tx[i]); | |
714 | mem_disp((u8 *) & ugeth->p_thread_data_tx[i], | |
18a8e864 | 715 | sizeof(struct ucc_geth_thread_data_tx)); |
ce973b14 LY |
716 | } |
717 | } | |
718 | if (ugeth->p_thread_data_rx) { | |
719 | int numThreadsRxNumerical; | |
720 | switch (ugeth->ug_info->numThreadsRx) { | |
721 | case UCC_GETH_NUM_OF_THREADS_1: | |
722 | numThreadsRxNumerical = 1; | |
723 | break; | |
724 | case UCC_GETH_NUM_OF_THREADS_2: | |
725 | numThreadsRxNumerical = 2; | |
726 | break; | |
727 | case UCC_GETH_NUM_OF_THREADS_4: | |
728 | numThreadsRxNumerical = 4; | |
729 | break; | |
730 | case UCC_GETH_NUM_OF_THREADS_6: | |
731 | numThreadsRxNumerical = 6; | |
732 | break; | |
733 | case UCC_GETH_NUM_OF_THREADS_8: | |
734 | numThreadsRxNumerical = 8; | |
735 | break; | |
736 | default: | |
737 | numThreadsRxNumerical = 0; | |
738 | break; | |
739 | } | |
740 | ||
741 | ugeth_info("Thread data RX:"); | |
742 | ugeth_info("Base address: 0x%08x", | |
743 | (u32) ugeth->p_thread_data_rx); | |
744 | for (i = 0; i < numThreadsRxNumerical; i++) { | |
745 | ugeth_info("Thread data RX[%d]:", i); | |
746 | ugeth_info("Base address: 0x%08x", | |
747 | (u32) & ugeth->p_thread_data_rx[i]); | |
748 | mem_disp((u8 *) & ugeth->p_thread_data_rx[i], | |
18a8e864 | 749 | sizeof(struct ucc_geth_thread_data_rx)); |
ce973b14 LY |
750 | } |
751 | } | |
752 | if (ugeth->p_exf_glbl_param) { | |
753 | ugeth_info("EXF global param:"); | |
754 | ugeth_info("Base address: 0x%08x", | |
755 | (u32) ugeth->p_exf_glbl_param); | |
756 | mem_disp((u8 *) ugeth->p_exf_glbl_param, | |
757 | sizeof(*ugeth->p_exf_glbl_param)); | |
758 | } | |
759 | if (ugeth->p_tx_glbl_pram) { | |
760 | ugeth_info("TX global param:"); | |
761 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); | |
762 | ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", | |
763 | (u32) & ugeth->p_tx_glbl_pram->temoder, | |
764 | in_be16(&ugeth->p_tx_glbl_pram->temoder)); | |
765 | ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", | |
766 | (u32) & ugeth->p_tx_glbl_pram->sqptr, | |
767 | in_be32(&ugeth->p_tx_glbl_pram->sqptr)); | |
768 | ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", | |
769 | (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, | |
770 | in_be32(&ugeth->p_tx_glbl_pram-> | |
771 | schedulerbasepointer)); | |
772 | ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", | |
773 | (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, | |
774 | in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); | |
775 | ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", | |
776 | (u32) & ugeth->p_tx_glbl_pram->tstate, | |
777 | in_be32(&ugeth->p_tx_glbl_pram->tstate)); | |
778 | ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", | |
779 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], | |
780 | ugeth->p_tx_glbl_pram->iphoffset[0]); | |
781 | ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", | |
782 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], | |
783 | ugeth->p_tx_glbl_pram->iphoffset[1]); | |
784 | ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", | |
785 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], | |
786 | ugeth->p_tx_glbl_pram->iphoffset[2]); | |
787 | ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", | |
788 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], | |
789 | ugeth->p_tx_glbl_pram->iphoffset[3]); | |
790 | ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", | |
791 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], | |
792 | ugeth->p_tx_glbl_pram->iphoffset[4]); | |
793 | ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", | |
794 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], | |
795 | ugeth->p_tx_glbl_pram->iphoffset[5]); | |
796 | ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", | |
797 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], | |
798 | ugeth->p_tx_glbl_pram->iphoffset[6]); | |
799 | ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", | |
800 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], | |
801 | ugeth->p_tx_glbl_pram->iphoffset[7]); | |
802 | ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", | |
803 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], | |
804 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); | |
805 | ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", | |
806 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], | |
807 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); | |
808 | ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", | |
809 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], | |
810 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); | |
811 | ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", | |
812 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], | |
813 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); | |
814 | ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", | |
815 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], | |
816 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); | |
817 | ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", | |
818 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], | |
819 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); | |
820 | ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", | |
821 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], | |
822 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); | |
823 | ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", | |
824 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], | |
825 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); | |
826 | ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", | |
827 | (u32) & ugeth->p_tx_glbl_pram->tqptr, | |
828 | in_be32(&ugeth->p_tx_glbl_pram->tqptr)); | |
829 | } | |
830 | if (ugeth->p_rx_glbl_pram) { | |
831 | ugeth_info("RX global param:"); | |
832 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); | |
833 | ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", | |
834 | (u32) & ugeth->p_rx_glbl_pram->remoder, | |
835 | in_be32(&ugeth->p_rx_glbl_pram->remoder)); | |
836 | ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", | |
837 | (u32) & ugeth->p_rx_glbl_pram->rqptr, | |
838 | in_be32(&ugeth->p_rx_glbl_pram->rqptr)); | |
839 | ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", | |
840 | (u32) & ugeth->p_rx_glbl_pram->typeorlen, | |
841 | in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); | |
842 | ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", | |
843 | (u32) & ugeth->p_rx_glbl_pram->rxgstpack, | |
844 | ugeth->p_rx_glbl_pram->rxgstpack); | |
845 | ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", | |
846 | (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, | |
847 | in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); | |
848 | ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", | |
849 | (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, | |
850 | in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); | |
851 | ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", | |
852 | (u32) & ugeth->p_rx_glbl_pram->rstate, | |
853 | ugeth->p_rx_glbl_pram->rstate); | |
854 | ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", | |
855 | (u32) & ugeth->p_rx_glbl_pram->mrblr, | |
856 | in_be16(&ugeth->p_rx_glbl_pram->mrblr)); | |
857 | ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", | |
858 | (u32) & ugeth->p_rx_glbl_pram->rbdqptr, | |
859 | in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); | |
860 | ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", | |
861 | (u32) & ugeth->p_rx_glbl_pram->mflr, | |
862 | in_be16(&ugeth->p_rx_glbl_pram->mflr)); | |
863 | ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", | |
864 | (u32) & ugeth->p_rx_glbl_pram->minflr, | |
865 | in_be16(&ugeth->p_rx_glbl_pram->minflr)); | |
866 | ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", | |
867 | (u32) & ugeth->p_rx_glbl_pram->maxd1, | |
868 | in_be16(&ugeth->p_rx_glbl_pram->maxd1)); | |
869 | ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", | |
870 | (u32) & ugeth->p_rx_glbl_pram->maxd2, | |
871 | in_be16(&ugeth->p_rx_glbl_pram->maxd2)); | |
872 | ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", | |
873 | (u32) & ugeth->p_rx_glbl_pram->ecamptr, | |
874 | in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); | |
875 | ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", | |
876 | (u32) & ugeth->p_rx_glbl_pram->l2qt, | |
877 | in_be32(&ugeth->p_rx_glbl_pram->l2qt)); | |
878 | ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", | |
879 | (u32) & ugeth->p_rx_glbl_pram->l3qt[0], | |
880 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); | |
881 | ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", | |
882 | (u32) & ugeth->p_rx_glbl_pram->l3qt[1], | |
883 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); | |
884 | ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", | |
885 | (u32) & ugeth->p_rx_glbl_pram->l3qt[2], | |
886 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); | |
887 | ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", | |
888 | (u32) & ugeth->p_rx_glbl_pram->l3qt[3], | |
889 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); | |
890 | ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", | |
891 | (u32) & ugeth->p_rx_glbl_pram->l3qt[4], | |
892 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); | |
893 | ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", | |
894 | (u32) & ugeth->p_rx_glbl_pram->l3qt[5], | |
895 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); | |
896 | ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", | |
897 | (u32) & ugeth->p_rx_glbl_pram->l3qt[6], | |
898 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); | |
899 | ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", | |
900 | (u32) & ugeth->p_rx_glbl_pram->l3qt[7], | |
901 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); | |
902 | ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", | |
903 | (u32) & ugeth->p_rx_glbl_pram->vlantype, | |
904 | in_be16(&ugeth->p_rx_glbl_pram->vlantype)); | |
905 | ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", | |
906 | (u32) & ugeth->p_rx_glbl_pram->vlantci, | |
907 | in_be16(&ugeth->p_rx_glbl_pram->vlantci)); | |
908 | for (i = 0; i < 64; i++) | |
909 | ugeth_info | |
910 | ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", | |
911 | i, | |
912 | (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], | |
913 | ugeth->p_rx_glbl_pram->addressfiltering[i]); | |
914 | ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", | |
915 | (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, | |
916 | in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); | |
917 | } | |
918 | if (ugeth->p_send_q_mem_reg) { | |
919 | ugeth_info("Send Q memory registers:"); | |
920 | ugeth_info("Base address: 0x%08x", | |
921 | (u32) ugeth->p_send_q_mem_reg); | |
922 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | |
923 | ugeth_info("SQQD[%d]:", i); | |
924 | ugeth_info("Base address: 0x%08x", | |
925 | (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); | |
926 | mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], | |
18a8e864 | 927 | sizeof(struct ucc_geth_send_queue_qd)); |
ce973b14 LY |
928 | } |
929 | } | |
930 | if (ugeth->p_scheduler) { | |
931 | ugeth_info("Scheduler:"); | |
932 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); | |
933 | mem_disp((u8 *) ugeth->p_scheduler, | |
934 | sizeof(*ugeth->p_scheduler)); | |
935 | } | |
936 | if (ugeth->p_tx_fw_statistics_pram) { | |
937 | ugeth_info("TX FW statistics pram:"); | |
938 | ugeth_info("Base address: 0x%08x", | |
939 | (u32) ugeth->p_tx_fw_statistics_pram); | |
940 | mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, | |
941 | sizeof(*ugeth->p_tx_fw_statistics_pram)); | |
942 | } | |
943 | if (ugeth->p_rx_fw_statistics_pram) { | |
944 | ugeth_info("RX FW statistics pram:"); | |
945 | ugeth_info("Base address: 0x%08x", | |
946 | (u32) ugeth->p_rx_fw_statistics_pram); | |
947 | mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, | |
948 | sizeof(*ugeth->p_rx_fw_statistics_pram)); | |
949 | } | |
950 | if (ugeth->p_rx_irq_coalescing_tbl) { | |
951 | ugeth_info("RX IRQ coalescing tables:"); | |
952 | ugeth_info("Base address: 0x%08x", | |
953 | (u32) ugeth->p_rx_irq_coalescing_tbl); | |
954 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
955 | ugeth_info("RX IRQ coalescing table entry[%d]:", i); | |
956 | ugeth_info("Base address: 0x%08x", | |
957 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | |
958 | coalescingentry[i]); | |
959 | ugeth_info | |
960 | ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", | |
961 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | |
962 | coalescingentry[i].interruptcoalescingmaxvalue, | |
963 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | |
964 | coalescingentry[i]. | |
965 | interruptcoalescingmaxvalue)); | |
966 | ugeth_info | |
967 | ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", | |
968 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | |
969 | coalescingentry[i].interruptcoalescingcounter, | |
970 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | |
971 | coalescingentry[i]. | |
972 | interruptcoalescingcounter)); | |
973 | } | |
974 | } | |
975 | if (ugeth->p_rx_bd_qs_tbl) { | |
976 | ugeth_info("RX BD QS tables:"); | |
977 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); | |
978 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
979 | ugeth_info("RX BD QS table[%d]:", i); | |
980 | ugeth_info("Base address: 0x%08x", | |
981 | (u32) & ugeth->p_rx_bd_qs_tbl[i]); | |
982 | ugeth_info | |
983 | ("bdbaseptr : addr - 0x%08x, val - 0x%08x", | |
984 | (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, | |
985 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); | |
986 | ugeth_info | |
987 | ("bdptr : addr - 0x%08x, val - 0x%08x", | |
988 | (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, | |
989 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); | |
990 | ugeth_info | |
991 | ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", | |
992 | (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
993 | in_be32(&ugeth->p_rx_bd_qs_tbl[i]. | |
994 | externalbdbaseptr)); | |
995 | ugeth_info | |
996 | ("externalbdptr : addr - 0x%08x, val - 0x%08x", | |
997 | (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, | |
998 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); | |
999 | ugeth_info("ucode RX Prefetched BDs:"); | |
1000 | ugeth_info("Base address: 0x%08x", | |
1001 | (u32) | |
1002 | qe_muram_addr(in_be32 | |
1003 | (&ugeth->p_rx_bd_qs_tbl[i]. | |
1004 | bdbaseptr))); | |
1005 | mem_disp((u8 *) | |
1006 | qe_muram_addr(in_be32 | |
1007 | (&ugeth->p_rx_bd_qs_tbl[i]. | |
1008 | bdbaseptr)), | |
18a8e864 | 1009 | sizeof(struct ucc_geth_rx_prefetched_bds)); |
ce973b14 LY |
1010 | } |
1011 | } | |
1012 | if (ugeth->p_init_enet_param_shadow) { | |
1013 | int size; | |
1014 | ugeth_info("Init enet param shadow:"); | |
1015 | ugeth_info("Base address: 0x%08x", | |
1016 | (u32) ugeth->p_init_enet_param_shadow); | |
1017 | mem_disp((u8 *) ugeth->p_init_enet_param_shadow, | |
1018 | sizeof(*ugeth->p_init_enet_param_shadow)); | |
1019 | ||
18a8e864 | 1020 | size = sizeof(struct ucc_geth_thread_rx_pram); |
ce973b14 LY |
1021 | if (ugeth->ug_info->rxExtendedFiltering) { |
1022 | size += | |
1023 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | |
1024 | if (ugeth->ug_info->largestexternallookupkeysize == | |
1025 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | |
1026 | size += | |
1027 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | |
1028 | if (ugeth->ug_info->largestexternallookupkeysize == | |
1029 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | |
1030 | size += | |
1031 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | |
1032 | } | |
1033 | ||
1034 | dump_init_enet_entries(ugeth, | |
1035 | &(ugeth->p_init_enet_param_shadow-> | |
1036 | txthread[0]), | |
1037 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | |
18a8e864 | 1038 | sizeof(struct ucc_geth_thread_tx_pram), |
ce973b14 LY |
1039 | ugeth->ug_info->riscTx, 0); |
1040 | dump_init_enet_entries(ugeth, | |
1041 | &(ugeth->p_init_enet_param_shadow-> | |
1042 | rxthread[0]), | |
1043 | ENET_INIT_PARAM_MAX_ENTRIES_RX, size, | |
1044 | ugeth->ug_info->riscRx, 1); | |
1045 | } | |
1046 | } | |
1047 | #endif /* DEBUG */ | |
1048 | ||
6fee40e9 AF |
1049 | static void init_default_reg_vals(u32 __iomem *upsmr_register, |
1050 | u32 __iomem *maccfg1_register, | |
1051 | u32 __iomem *maccfg2_register) | |
ce973b14 LY |
1052 | { |
1053 | out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); | |
1054 | out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); | |
1055 | out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); | |
1056 | } | |
1057 | ||
1058 | static int init_half_duplex_params(int alt_beb, | |
1059 | int back_pressure_no_backoff, | |
1060 | int no_backoff, | |
1061 | int excess_defer, | |
1062 | u8 alt_beb_truncation, | |
1063 | u8 max_retransmissions, | |
1064 | u8 collision_window, | |
6fee40e9 | 1065 | u32 __iomem *hafdup_register) |
ce973b14 LY |
1066 | { |
1067 | u32 value = 0; | |
1068 | ||
1069 | if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || | |
1070 | (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || | |
1071 | (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) | |
1072 | return -EINVAL; | |
1073 | ||
1074 | value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); | |
1075 | ||
1076 | if (alt_beb) | |
1077 | value |= HALFDUP_ALT_BEB; | |
1078 | if (back_pressure_no_backoff) | |
1079 | value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; | |
1080 | if (no_backoff) | |
1081 | value |= HALFDUP_NO_BACKOFF; | |
1082 | if (excess_defer) | |
1083 | value |= HALFDUP_EXCESSIVE_DEFER; | |
1084 | ||
1085 | value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); | |
1086 | ||
1087 | value |= collision_window; | |
1088 | ||
1089 | out_be32(hafdup_register, value); | |
1090 | return 0; | |
1091 | } | |
1092 | ||
1093 | static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, | |
1094 | u8 non_btb_ipg, | |
1095 | u8 min_ifg, | |
1096 | u8 btb_ipg, | |
6fee40e9 | 1097 | u32 __iomem *ipgifg_register) |
ce973b14 LY |
1098 | { |
1099 | u32 value = 0; | |
1100 | ||
1101 | /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back | |
1102 | IPG part 2 */ | |
1103 | if (non_btb_cs_ipg > non_btb_ipg) | |
1104 | return -EINVAL; | |
1105 | ||
1106 | if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || | |
1107 | (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || | |
1108 | /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ | |
1109 | (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) | |
1110 | return -EINVAL; | |
1111 | ||
1112 | value |= | |
1113 | ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & | |
1114 | IPGIFG_NBTB_CS_IPG_MASK); | |
1115 | value |= | |
1116 | ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & | |
1117 | IPGIFG_NBTB_IPG_MASK); | |
1118 | value |= | |
1119 | ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & | |
1120 | IPGIFG_MIN_IFG_MASK); | |
1121 | value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); | |
1122 | ||
1123 | out_be32(ipgifg_register, value); | |
1124 | return 0; | |
1125 | } | |
1126 | ||
ac421852 | 1127 | int init_flow_control_params(u32 automatic_flow_control_mode, |
ce973b14 LY |
1128 | int rx_flow_control_enable, |
1129 | int tx_flow_control_enable, | |
1130 | u16 pause_period, | |
1131 | u16 extension_field, | |
6fee40e9 AF |
1132 | u32 __iomem *upsmr_register, |
1133 | u32 __iomem *uempr_register, | |
1134 | u32 __iomem *maccfg1_register) | |
ce973b14 LY |
1135 | { |
1136 | u32 value = 0; | |
1137 | ||
1138 | /* Set UEMPR register */ | |
1139 | value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; | |
1140 | value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; | |
1141 | out_be32(uempr_register, value); | |
1142 | ||
1143 | /* Set UPSMR register */ | |
3bc53427 | 1144 | setbits32(upsmr_register, automatic_flow_control_mode); |
ce973b14 LY |
1145 | |
1146 | value = in_be32(maccfg1_register); | |
1147 | if (rx_flow_control_enable) | |
1148 | value |= MACCFG1_FLOW_RX; | |
1149 | if (tx_flow_control_enable) | |
1150 | value |= MACCFG1_FLOW_TX; | |
1151 | out_be32(maccfg1_register, value); | |
1152 | ||
1153 | return 0; | |
1154 | } | |
1155 | ||
1156 | static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, | |
1157 | int auto_zero_hardware_statistics, | |
6fee40e9 AF |
1158 | u32 __iomem *upsmr_register, |
1159 | u16 __iomem *uescr_register) | |
ce973b14 | 1160 | { |
ce973b14 | 1161 | u16 uescr_value = 0; |
3bc53427 | 1162 | |
ce973b14 | 1163 | /* Enable hardware statistics gathering if requested */ |
3bc53427 TT |
1164 | if (enable_hardware_statistics) |
1165 | setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); | |
ce973b14 LY |
1166 | |
1167 | /* Clear hardware statistics counters */ | |
1168 | uescr_value = in_be16(uescr_register); | |
1169 | uescr_value |= UESCR_CLRCNT; | |
1170 | /* Automatically zero hardware statistics counters on read, | |
1171 | if requested */ | |
1172 | if (auto_zero_hardware_statistics) | |
1173 | uescr_value |= UESCR_AUTOZ; | |
1174 | out_be16(uescr_register, uescr_value); | |
1175 | ||
1176 | return 0; | |
1177 | } | |
1178 | ||
1179 | static int init_firmware_statistics_gathering_mode(int | |
1180 | enable_tx_firmware_statistics, | |
1181 | int enable_rx_firmware_statistics, | |
6fee40e9 | 1182 | u32 __iomem *tx_rmon_base_ptr, |
ce973b14 | 1183 | u32 tx_firmware_statistics_structure_address, |
6fee40e9 | 1184 | u32 __iomem *rx_rmon_base_ptr, |
ce973b14 | 1185 | u32 rx_firmware_statistics_structure_address, |
6fee40e9 AF |
1186 | u16 __iomem *temoder_register, |
1187 | u32 __iomem *remoder_register) | |
ce973b14 LY |
1188 | { |
1189 | /* Note: this function does not check if */ | |
1190 | /* the parameters it receives are NULL */ | |
ce973b14 LY |
1191 | |
1192 | if (enable_tx_firmware_statistics) { | |
1193 | out_be32(tx_rmon_base_ptr, | |
1194 | tx_firmware_statistics_structure_address); | |
3bc53427 | 1195 | setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); |
ce973b14 LY |
1196 | } |
1197 | ||
1198 | if (enable_rx_firmware_statistics) { | |
1199 | out_be32(rx_rmon_base_ptr, | |
1200 | rx_firmware_statistics_structure_address); | |
3bc53427 | 1201 | setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); |
ce973b14 LY |
1202 | } |
1203 | ||
1204 | return 0; | |
1205 | } | |
1206 | ||
1207 | static int init_mac_station_addr_regs(u8 address_byte_0, | |
1208 | u8 address_byte_1, | |
1209 | u8 address_byte_2, | |
1210 | u8 address_byte_3, | |
1211 | u8 address_byte_4, | |
1212 | u8 address_byte_5, | |
6fee40e9 AF |
1213 | u32 __iomem *macstnaddr1_register, |
1214 | u32 __iomem *macstnaddr2_register) | |
ce973b14 LY |
1215 | { |
1216 | u32 value = 0; | |
1217 | ||
1218 | /* Example: for a station address of 0x12345678ABCD, */ | |
1219 | /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ | |
1220 | ||
1221 | /* MACSTNADDR1 Register: */ | |
1222 | ||
1223 | /* 0 7 8 15 */ | |
1224 | /* station address byte 5 station address byte 4 */ | |
1225 | /* 16 23 24 31 */ | |
1226 | /* station address byte 3 station address byte 2 */ | |
1227 | value |= (u32) ((address_byte_2 << 0) & 0x000000FF); | |
1228 | value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); | |
1229 | value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); | |
1230 | value |= (u32) ((address_byte_5 << 24) & 0xFF000000); | |
1231 | ||
1232 | out_be32(macstnaddr1_register, value); | |
1233 | ||
1234 | /* MACSTNADDR2 Register: */ | |
1235 | ||
1236 | /* 0 7 8 15 */ | |
1237 | /* station address byte 1 station address byte 0 */ | |
1238 | /* 16 23 24 31 */ | |
1239 | /* reserved reserved */ | |
1240 | value = 0; | |
1241 | value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); | |
1242 | value |= (u32) ((address_byte_1 << 24) & 0xFF000000); | |
1243 | ||
1244 | out_be32(macstnaddr2_register, value); | |
1245 | ||
1246 | return 0; | |
1247 | } | |
1248 | ||
ce973b14 | 1249 | static int init_check_frame_length_mode(int length_check, |
6fee40e9 | 1250 | u32 __iomem *maccfg2_register) |
ce973b14 LY |
1251 | { |
1252 | u32 value = 0; | |
1253 | ||
1254 | value = in_be32(maccfg2_register); | |
1255 | ||
1256 | if (length_check) | |
1257 | value |= MACCFG2_LC; | |
1258 | else | |
1259 | value &= ~MACCFG2_LC; | |
1260 | ||
1261 | out_be32(maccfg2_register, value); | |
1262 | return 0; | |
1263 | } | |
1264 | ||
1265 | static int init_preamble_length(u8 preamble_length, | |
6fee40e9 | 1266 | u32 __iomem *maccfg2_register) |
ce973b14 | 1267 | { |
ce973b14 LY |
1268 | if ((preamble_length < 3) || (preamble_length > 7)) |
1269 | return -EINVAL; | |
1270 | ||
3bc53427 TT |
1271 | clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, |
1272 | preamble_length << MACCFG2_PREL_SHIFT); | |
1273 | ||
ce973b14 LY |
1274 | return 0; |
1275 | } | |
1276 | ||
ce973b14 LY |
1277 | static int init_rx_parameters(int reject_broadcast, |
1278 | int receive_short_frames, | |
6fee40e9 | 1279 | int promiscuous, u32 __iomem *upsmr_register) |
ce973b14 LY |
1280 | { |
1281 | u32 value = 0; | |
1282 | ||
1283 | value = in_be32(upsmr_register); | |
1284 | ||
1285 | if (reject_broadcast) | |
3bc53427 | 1286 | value |= UCC_GETH_UPSMR_BRO; |
ce973b14 | 1287 | else |
3bc53427 | 1288 | value &= ~UCC_GETH_UPSMR_BRO; |
ce973b14 LY |
1289 | |
1290 | if (receive_short_frames) | |
3bc53427 | 1291 | value |= UCC_GETH_UPSMR_RSH; |
ce973b14 | 1292 | else |
3bc53427 | 1293 | value &= ~UCC_GETH_UPSMR_RSH; |
ce973b14 LY |
1294 | |
1295 | if (promiscuous) | |
3bc53427 | 1296 | value |= UCC_GETH_UPSMR_PRO; |
ce973b14 | 1297 | else |
3bc53427 | 1298 | value &= ~UCC_GETH_UPSMR_PRO; |
ce973b14 LY |
1299 | |
1300 | out_be32(upsmr_register, value); | |
1301 | ||
1302 | return 0; | |
1303 | } | |
1304 | ||
1305 | static int init_max_rx_buff_len(u16 max_rx_buf_len, | |
6fee40e9 | 1306 | u16 __iomem *mrblr_register) |
ce973b14 LY |
1307 | { |
1308 | /* max_rx_buf_len value must be a multiple of 128 */ | |
8e95a202 JP |
1309 | if ((max_rx_buf_len == 0) || |
1310 | (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) | |
ce973b14 LY |
1311 | return -EINVAL; |
1312 | ||
1313 | out_be16(mrblr_register, max_rx_buf_len); | |
1314 | return 0; | |
1315 | } | |
1316 | ||
1317 | static int init_min_frame_len(u16 min_frame_length, | |
6fee40e9 AF |
1318 | u16 __iomem *minflr_register, |
1319 | u16 __iomem *mrblr_register) | |
ce973b14 LY |
1320 | { |
1321 | u16 mrblr_value = 0; | |
1322 | ||
1323 | mrblr_value = in_be16(mrblr_register); | |
1324 | if (min_frame_length >= (mrblr_value - 4)) | |
1325 | return -EINVAL; | |
1326 | ||
1327 | out_be16(minflr_register, min_frame_length); | |
1328 | return 0; | |
1329 | } | |
1330 | ||
18a8e864 | 1331 | static int adjust_enet_interface(struct ucc_geth_private *ugeth) |
ce973b14 | 1332 | { |
18a8e864 | 1333 | struct ucc_geth_info *ug_info; |
6fee40e9 AF |
1334 | struct ucc_geth __iomem *ug_regs; |
1335 | struct ucc_fast __iomem *uf_regs; | |
728de4c9 KP |
1336 | int ret_val; |
1337 | u32 upsmr, maccfg2, tbiBaseAddress; | |
ce973b14 LY |
1338 | u16 value; |
1339 | ||
b39d66a8 | 1340 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 LY |
1341 | |
1342 | ug_info = ugeth->ug_info; | |
1343 | ug_regs = ugeth->ug_regs; | |
1344 | uf_regs = ugeth->uccf->uf_regs; | |
1345 | ||
ce973b14 LY |
1346 | /* Set MACCFG2 */ |
1347 | maccfg2 = in_be32(&ug_regs->maccfg2); | |
1348 | maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; | |
728de4c9 KP |
1349 | if ((ugeth->max_speed == SPEED_10) || |
1350 | (ugeth->max_speed == SPEED_100)) | |
ce973b14 | 1351 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; |
728de4c9 | 1352 | else if (ugeth->max_speed == SPEED_1000) |
ce973b14 LY |
1353 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; |
1354 | maccfg2 |= ug_info->padAndCrc; | |
1355 | out_be32(&ug_regs->maccfg2, maccfg2); | |
1356 | ||
1357 | /* Set UPSMR */ | |
1358 | upsmr = in_be32(&uf_regs->upsmr); | |
3bc53427 TT |
1359 | upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | |
1360 | UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); | |
728de4c9 KP |
1361 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || |
1362 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | |
1363 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | |
bd0ceaab KP |
1364 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1365 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | |
728de4c9 | 1366 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
cef309cf HS |
1367 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII) |
1368 | upsmr |= UCC_GETH_UPSMR_RPM; | |
728de4c9 KP |
1369 | switch (ugeth->max_speed) { |
1370 | case SPEED_10: | |
3bc53427 | 1371 | upsmr |= UCC_GETH_UPSMR_R10M; |
728de4c9 KP |
1372 | /* FALLTHROUGH */ |
1373 | case SPEED_100: | |
1374 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) | |
3bc53427 | 1375 | upsmr |= UCC_GETH_UPSMR_RMM; |
728de4c9 KP |
1376 | } |
1377 | } | |
1378 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || | |
1379 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | |
3bc53427 | 1380 | upsmr |= UCC_GETH_UPSMR_TBIM; |
728de4c9 | 1381 | } |
047584ce HW |
1382 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)) |
1383 | upsmr |= UCC_GETH_UPSMR_SGMM; | |
1384 | ||
ce973b14 LY |
1385 | out_be32(&uf_regs->upsmr, upsmr); |
1386 | ||
ce973b14 LY |
1387 | /* Disable autonegotiation in tbi mode, because by default it |
1388 | comes up in autonegotiation mode. */ | |
1389 | /* Note that this depends on proper setting in utbipar register. */ | |
728de4c9 KP |
1390 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || |
1391 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | |
ce973b14 LY |
1392 | tbiBaseAddress = in_be32(&ug_regs->utbipar); |
1393 | tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; | |
1394 | tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; | |
728de4c9 KP |
1395 | value = ugeth->phydev->bus->read(ugeth->phydev->bus, |
1396 | (u8) tbiBaseAddress, ENET_TBI_MII_CR); | |
ce973b14 | 1397 | value &= ~0x1000; /* Turn off autonegotiation */ |
728de4c9 KP |
1398 | ugeth->phydev->bus->write(ugeth->phydev->bus, |
1399 | (u8) tbiBaseAddress, ENET_TBI_MII_CR, value); | |
ce973b14 LY |
1400 | } |
1401 | ||
1402 | init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); | |
1403 | ||
1404 | ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); | |
1405 | if (ret_val != 0) { | |
890de95e LY |
1406 | if (netif_msg_probe(ugeth)) |
1407 | ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", | |
b39d66a8 | 1408 | __func__); |
ce973b14 LY |
1409 | return ret_val; |
1410 | } | |
1411 | ||
1412 | return 0; | |
1413 | } | |
1414 | ||
7de8ee78 AV |
1415 | static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) |
1416 | { | |
1417 | struct ucc_fast_private *uccf; | |
1418 | u32 cecr_subblock; | |
1419 | u32 temp; | |
1420 | int i = 10; | |
1421 | ||
1422 | uccf = ugeth->uccf; | |
1423 | ||
1424 | /* Mask GRACEFUL STOP TX interrupt bit and clear it */ | |
1425 | clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); | |
1426 | out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ | |
1427 | ||
1428 | /* Issue host command */ | |
1429 | cecr_subblock = | |
1430 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
1431 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, | |
1432 | QE_CR_PROTOCOL_ETHERNET, 0); | |
1433 | ||
1434 | /* Wait for command to complete */ | |
1435 | do { | |
1436 | msleep(10); | |
1437 | temp = in_be32(uccf->p_ucce); | |
1438 | } while (!(temp & UCC_GETH_UCCE_GRA) && --i); | |
1439 | ||
1440 | uccf->stopped_tx = 1; | |
1441 | ||
1442 | return 0; | |
1443 | } | |
1444 | ||
1445 | static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth) | |
1446 | { | |
1447 | struct ucc_fast_private *uccf; | |
1448 | u32 cecr_subblock; | |
1449 | u8 temp; | |
1450 | int i = 10; | |
1451 | ||
1452 | uccf = ugeth->uccf; | |
1453 | ||
1454 | /* Clear acknowledge bit */ | |
1455 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); | |
1456 | temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; | |
1457 | out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); | |
1458 | ||
1459 | /* Keep issuing command and checking acknowledge bit until | |
1460 | it is asserted, according to spec */ | |
1461 | do { | |
1462 | /* Issue host command */ | |
1463 | cecr_subblock = | |
1464 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. | |
1465 | ucc_num); | |
1466 | qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, | |
1467 | QE_CR_PROTOCOL_ETHERNET, 0); | |
1468 | msleep(10); | |
1469 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); | |
1470 | } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); | |
1471 | ||
1472 | uccf->stopped_rx = 1; | |
1473 | ||
1474 | return 0; | |
1475 | } | |
1476 | ||
1477 | static int ugeth_restart_tx(struct ucc_geth_private *ugeth) | |
1478 | { | |
1479 | struct ucc_fast_private *uccf; | |
1480 | u32 cecr_subblock; | |
1481 | ||
1482 | uccf = ugeth->uccf; | |
1483 | ||
1484 | cecr_subblock = | |
1485 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
1486 | qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); | |
1487 | uccf->stopped_tx = 0; | |
1488 | ||
1489 | return 0; | |
1490 | } | |
1491 | ||
1492 | static int ugeth_restart_rx(struct ucc_geth_private *ugeth) | |
1493 | { | |
1494 | struct ucc_fast_private *uccf; | |
1495 | u32 cecr_subblock; | |
1496 | ||
1497 | uccf = ugeth->uccf; | |
1498 | ||
1499 | cecr_subblock = | |
1500 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
1501 | qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, | |
1502 | 0); | |
1503 | uccf->stopped_rx = 0; | |
1504 | ||
1505 | return 0; | |
1506 | } | |
1507 | ||
1508 | static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) | |
1509 | { | |
1510 | struct ucc_fast_private *uccf; | |
1511 | int enabled_tx, enabled_rx; | |
1512 | ||
1513 | uccf = ugeth->uccf; | |
1514 | ||
1515 | /* check if the UCC number is in range. */ | |
1516 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | |
1517 | if (netif_msg_probe(ugeth)) | |
1518 | ugeth_err("%s: ucc_num out of range.", __func__); | |
1519 | return -EINVAL; | |
1520 | } | |
1521 | ||
1522 | enabled_tx = uccf->enabled_tx; | |
1523 | enabled_rx = uccf->enabled_rx; | |
1524 | ||
1525 | /* Get Tx and Rx going again, in case this channel was actively | |
1526 | disabled. */ | |
1527 | if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) | |
1528 | ugeth_restart_tx(ugeth); | |
1529 | if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) | |
1530 | ugeth_restart_rx(ugeth); | |
1531 | ||
1532 | ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ | |
1533 | ||
1534 | return 0; | |
1535 | ||
1536 | } | |
1537 | ||
1538 | static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode) | |
1539 | { | |
1540 | struct ucc_fast_private *uccf; | |
1541 | ||
1542 | uccf = ugeth->uccf; | |
1543 | ||
1544 | /* check if the UCC number is in range. */ | |
1545 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | |
1546 | if (netif_msg_probe(ugeth)) | |
1547 | ugeth_err("%s: ucc_num out of range.", __func__); | |
1548 | return -EINVAL; | |
1549 | } | |
1550 | ||
1551 | /* Stop any transmissions */ | |
1552 | if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) | |
1553 | ugeth_graceful_stop_tx(ugeth); | |
1554 | ||
1555 | /* Stop any receptions */ | |
1556 | if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) | |
1557 | ugeth_graceful_stop_rx(ugeth); | |
1558 | ||
1559 | ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ | |
1560 | ||
1561 | return 0; | |
1562 | } | |
1563 | ||
864fdf88 AV |
1564 | static void ugeth_quiesce(struct ucc_geth_private *ugeth) |
1565 | { | |
08b5e1c9 AV |
1566 | /* Prevent any further xmits, plus detach the device. */ |
1567 | netif_device_detach(ugeth->ndev); | |
1568 | ||
1569 | /* Wait for any current xmits to finish. */ | |
864fdf88 AV |
1570 | netif_tx_disable(ugeth->ndev); |
1571 | ||
1572 | /* Disable the interrupt to avoid NAPI rescheduling. */ | |
1573 | disable_irq(ugeth->ug_info->uf_info.irq); | |
1574 | ||
1575 | /* Stop NAPI, and possibly wait for its completion. */ | |
1576 | napi_disable(&ugeth->napi); | |
1577 | } | |
1578 | ||
1579 | static void ugeth_activate(struct ucc_geth_private *ugeth) | |
1580 | { | |
1581 | napi_enable(&ugeth->napi); | |
1582 | enable_irq(ugeth->ug_info->uf_info.irq); | |
08b5e1c9 | 1583 | netif_device_attach(ugeth->ndev); |
864fdf88 AV |
1584 | } |
1585 | ||
ce973b14 LY |
1586 | /* Called every time the controller might need to be made |
1587 | * aware of new link state. The PHY code conveys this | |
1588 | * information through variables in the ugeth structure, and this | |
1589 | * function converts those variables into the appropriate | |
1590 | * register values, and can bring down the device if needed. | |
1591 | */ | |
728de4c9 | 1592 | |
ce973b14 LY |
1593 | static void adjust_link(struct net_device *dev) |
1594 | { | |
18a8e864 | 1595 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
6fee40e9 AF |
1596 | struct ucc_geth __iomem *ug_regs; |
1597 | struct ucc_fast __iomem *uf_regs; | |
728de4c9 | 1598 | struct phy_device *phydev = ugeth->phydev; |
728de4c9 | 1599 | int new_state = 0; |
ce973b14 LY |
1600 | |
1601 | ug_regs = ugeth->ug_regs; | |
728de4c9 | 1602 | uf_regs = ugeth->uccf->uf_regs; |
ce973b14 | 1603 | |
728de4c9 KP |
1604 | if (phydev->link) { |
1605 | u32 tempval = in_be32(&ug_regs->maccfg2); | |
1606 | u32 upsmr = in_be32(&uf_regs->upsmr); | |
ce973b14 LY |
1607 | /* Now we make sure that we can be in full duplex mode. |
1608 | * If not, we operate in half-duplex mode. */ | |
728de4c9 KP |
1609 | if (phydev->duplex != ugeth->oldduplex) { |
1610 | new_state = 1; | |
1611 | if (!(phydev->duplex)) | |
ce973b14 | 1612 | tempval &= ~(MACCFG2_FDX); |
728de4c9 | 1613 | else |
ce973b14 | 1614 | tempval |= MACCFG2_FDX; |
728de4c9 | 1615 | ugeth->oldduplex = phydev->duplex; |
ce973b14 LY |
1616 | } |
1617 | ||
728de4c9 KP |
1618 | if (phydev->speed != ugeth->oldspeed) { |
1619 | new_state = 1; | |
1620 | switch (phydev->speed) { | |
1621 | case SPEED_1000: | |
1622 | tempval = ((tempval & | |
1623 | ~(MACCFG2_INTERFACE_MODE_MASK)) | | |
1624 | MACCFG2_INTERFACE_MODE_BYTE); | |
a1862a53 | 1625 | break; |
728de4c9 KP |
1626 | case SPEED_100: |
1627 | case SPEED_10: | |
1628 | tempval = ((tempval & | |
1629 | ~(MACCFG2_INTERFACE_MODE_MASK)) | | |
1630 | MACCFG2_INTERFACE_MODE_NIBBLE); | |
1631 | /* if reduced mode, re-set UPSMR.R10M */ | |
1632 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || | |
1633 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | |
1634 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | |
bd0ceaab KP |
1635 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1636 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | |
728de4c9 KP |
1637 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1638 | if (phydev->speed == SPEED_10) | |
3bc53427 | 1639 | upsmr |= UCC_GETH_UPSMR_R10M; |
728de4c9 | 1640 | else |
3bc53427 | 1641 | upsmr &= ~UCC_GETH_UPSMR_R10M; |
728de4c9 | 1642 | } |
ce973b14 LY |
1643 | break; |
1644 | default: | |
728de4c9 KP |
1645 | if (netif_msg_link(ugeth)) |
1646 | ugeth_warn( | |
1647 | "%s: Ack! Speed (%d) is not 10/100/1000!", | |
1648 | dev->name, phydev->speed); | |
ce973b14 LY |
1649 | break; |
1650 | } | |
728de4c9 | 1651 | ugeth->oldspeed = phydev->speed; |
ce973b14 LY |
1652 | } |
1653 | ||
1654 | if (!ugeth->oldlink) { | |
728de4c9 | 1655 | new_state = 1; |
ce973b14 | 1656 | ugeth->oldlink = 1; |
ce973b14 | 1657 | } |
08fafd84 AV |
1658 | |
1659 | if (new_state) { | |
1660 | /* | |
1661 | * To change the MAC configuration we need to disable | |
1662 | * the controller. To do so, we have to either grab | |
1663 | * ugeth->lock, which is a bad idea since 'graceful | |
1664 | * stop' commands might take quite a while, or we can | |
1665 | * quiesce driver's activity. | |
1666 | */ | |
1667 | ugeth_quiesce(ugeth); | |
1668 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | |
1669 | ||
1670 | out_be32(&ug_regs->maccfg2, tempval); | |
1671 | out_be32(&uf_regs->upsmr, upsmr); | |
1672 | ||
1673 | ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | |
1674 | ugeth_activate(ugeth); | |
1675 | } | |
728de4c9 KP |
1676 | } else if (ugeth->oldlink) { |
1677 | new_state = 1; | |
ce973b14 LY |
1678 | ugeth->oldlink = 0; |
1679 | ugeth->oldspeed = 0; | |
1680 | ugeth->oldduplex = -1; | |
ce973b14 | 1681 | } |
728de4c9 KP |
1682 | |
1683 | if (new_state && netif_msg_link(ugeth)) | |
1684 | phy_print_status(phydev); | |
ce973b14 LY |
1685 | } |
1686 | ||
fb1001f3 HW |
1687 | /* Initialize TBI PHY interface for communicating with the |
1688 | * SERDES lynx PHY on the chip. We communicate with this PHY | |
1689 | * through the MDIO bus on each controller, treating it as a | |
1690 | * "normal" PHY at the address found in the UTBIPA register. We assume | |
1691 | * that the UTBIPA register is valid. Either the MDIO bus code will set | |
1692 | * it to a value that doesn't conflict with other PHYs on the bus, or the | |
1693 | * value doesn't matter, as there are no other PHYs on the bus. | |
1694 | */ | |
1695 | static void uec_configure_serdes(struct net_device *dev) | |
1696 | { | |
1697 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
1698 | struct ucc_geth_info *ug_info = ugeth->ug_info; | |
1699 | struct phy_device *tbiphy; | |
1700 | ||
1701 | if (!ug_info->tbi_node) { | |
1702 | dev_warn(&dev->dev, "SGMII mode requires that the device " | |
1703 | "tree specify a tbi-handle\n"); | |
1704 | return; | |
1705 | } | |
1706 | ||
1707 | tbiphy = of_phy_find_device(ug_info->tbi_node); | |
1708 | if (!tbiphy) { | |
1709 | dev_err(&dev->dev, "error: Could not get TBI device\n"); | |
1710 | return; | |
1711 | } | |
1712 | ||
1713 | /* | |
1714 | * If the link is already up, we must already be ok, and don't need to | |
1715 | * configure and reset the TBI<->SerDes link. Maybe U-Boot configured | |
1716 | * everything for us? Resetting it takes the link down and requires | |
1717 | * several seconds for it to come back. | |
1718 | */ | |
1719 | if (phy_read(tbiphy, ENET_TBI_MII_SR) & TBISR_LSTATUS) | |
1720 | return; | |
1721 | ||
1722 | /* Single clk mode, mii mode off(for serdes communication) */ | |
1723 | phy_write(tbiphy, ENET_TBI_MII_ANA, TBIANA_SETTINGS); | |
1724 | ||
1725 | phy_write(tbiphy, ENET_TBI_MII_TBICON, TBICON_CLK_SELECT); | |
1726 | ||
1727 | phy_write(tbiphy, ENET_TBI_MII_CR, TBICR_SETTINGS); | |
1728 | } | |
1729 | ||
ce973b14 LY |
1730 | /* Configure the PHY for dev. |
1731 | * returns 0 if success. -1 if failure | |
1732 | */ | |
1733 | static int init_phy(struct net_device *dev) | |
1734 | { | |
728de4c9 | 1735 | struct ucc_geth_private *priv = netdev_priv(dev); |
61fa9dcf | 1736 | struct ucc_geth_info *ug_info = priv->ug_info; |
728de4c9 | 1737 | struct phy_device *phydev; |
ce973b14 | 1738 | |
728de4c9 KP |
1739 | priv->oldlink = 0; |
1740 | priv->oldspeed = 0; | |
1741 | priv->oldduplex = -1; | |
ce973b14 | 1742 | |
0b9da337 GL |
1743 | phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0, |
1744 | priv->phy_interface); | |
3104a6ff AV |
1745 | if (!phydev) |
1746 | phydev = of_phy_connect_fixed_link(dev, &adjust_link, | |
1747 | priv->phy_interface); | |
0b9da337 | 1748 | if (!phydev) { |
3104a6ff | 1749 | dev_err(&dev->dev, "Could not attach to PHY\n"); |
0b9da337 | 1750 | return -ENODEV; |
ce973b14 LY |
1751 | } |
1752 | ||
047584ce HW |
1753 | if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) |
1754 | uec_configure_serdes(dev); | |
1755 | ||
728de4c9 | 1756 | phydev->supported &= (ADVERTISED_10baseT_Half | |
ce973b14 LY |
1757 | ADVERTISED_10baseT_Full | |
1758 | ADVERTISED_100baseT_Half | | |
728de4c9 | 1759 | ADVERTISED_100baseT_Full); |
ce973b14 | 1760 | |
728de4c9 KP |
1761 | if (priv->max_speed == SPEED_1000) |
1762 | phydev->supported |= ADVERTISED_1000baseT_Full; | |
ce973b14 | 1763 | |
728de4c9 | 1764 | phydev->advertising = phydev->supported; |
68dc44af | 1765 | |
728de4c9 | 1766 | priv->phydev = phydev; |
ce973b14 LY |
1767 | |
1768 | return 0; | |
ce973b14 LY |
1769 | } |
1770 | ||
18a8e864 | 1771 | static void ugeth_dump_regs(struct ucc_geth_private *ugeth) |
ce973b14 LY |
1772 | { |
1773 | #ifdef DEBUG | |
1774 | ucc_fast_dump_regs(ugeth->uccf); | |
1775 | dump_regs(ugeth); | |
1776 | dump_bds(ugeth); | |
1777 | #endif | |
1778 | } | |
1779 | ||
18a8e864 | 1780 | static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * |
ce973b14 | 1781 | ugeth, |
18a8e864 | 1782 | enum enet_addr_type |
ce973b14 LY |
1783 | enet_addr_type) |
1784 | { | |
6fee40e9 | 1785 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
18a8e864 LY |
1786 | struct ucc_fast_private *uccf; |
1787 | enum comm_dir comm_dir; | |
ce973b14 LY |
1788 | struct list_head *p_lh; |
1789 | u16 i, num; | |
6fee40e9 AF |
1790 | u32 __iomem *addr_h; |
1791 | u32 __iomem *addr_l; | |
ce973b14 LY |
1792 | u8 *p_counter; |
1793 | ||
1794 | uccf = ugeth->uccf; | |
1795 | ||
1796 | p_82xx_addr_filt = | |
6fee40e9 AF |
1797 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) |
1798 | ugeth->p_rx_glbl_pram->addressfiltering; | |
ce973b14 LY |
1799 | |
1800 | if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { | |
1801 | addr_h = &(p_82xx_addr_filt->gaddr_h); | |
1802 | addr_l = &(p_82xx_addr_filt->gaddr_l); | |
1803 | p_lh = &ugeth->group_hash_q; | |
1804 | p_counter = &(ugeth->numGroupAddrInHash); | |
1805 | } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { | |
1806 | addr_h = &(p_82xx_addr_filt->iaddr_h); | |
1807 | addr_l = &(p_82xx_addr_filt->iaddr_l); | |
1808 | p_lh = &ugeth->ind_hash_q; | |
1809 | p_counter = &(ugeth->numIndAddrInHash); | |
1810 | } else | |
1811 | return -EINVAL; | |
1812 | ||
1813 | comm_dir = 0; | |
1814 | if (uccf->enabled_tx) | |
1815 | comm_dir |= COMM_DIR_TX; | |
1816 | if (uccf->enabled_rx) | |
1817 | comm_dir |= COMM_DIR_RX; | |
1818 | if (comm_dir) | |
1819 | ugeth_disable(ugeth, comm_dir); | |
1820 | ||
1821 | /* Clear the hash table. */ | |
1822 | out_be32(addr_h, 0x00000000); | |
1823 | out_be32(addr_l, 0x00000000); | |
1824 | ||
1825 | if (!p_lh) | |
1826 | return 0; | |
1827 | ||
1828 | num = *p_counter; | |
1829 | ||
1830 | /* Delete all remaining CQ elements */ | |
1831 | for (i = 0; i < num; i++) | |
1832 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); | |
1833 | ||
1834 | *p_counter = 0; | |
1835 | ||
1836 | if (comm_dir) | |
1837 | ugeth_enable(ugeth, comm_dir); | |
1838 | ||
1839 | return 0; | |
1840 | } | |
1841 | ||
18a8e864 | 1842 | static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, |
ce973b14 LY |
1843 | u8 paddr_num) |
1844 | { | |
1845 | ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ | |
1846 | return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ | |
1847 | } | |
1848 | ||
18a8e864 | 1849 | static void ucc_geth_memclean(struct ucc_geth_private *ugeth) |
ce973b14 LY |
1850 | { |
1851 | u16 i, j; | |
6fee40e9 | 1852 | u8 __iomem *bd; |
ce973b14 LY |
1853 | |
1854 | if (!ugeth) | |
1855 | return; | |
1856 | ||
80a9fad8 | 1857 | if (ugeth->uccf) { |
ce973b14 | 1858 | ucc_fast_free(ugeth->uccf); |
80a9fad8 AV |
1859 | ugeth->uccf = NULL; |
1860 | } | |
ce973b14 LY |
1861 | |
1862 | if (ugeth->p_thread_data_tx) { | |
1863 | qe_muram_free(ugeth->thread_dat_tx_offset); | |
1864 | ugeth->p_thread_data_tx = NULL; | |
1865 | } | |
1866 | if (ugeth->p_thread_data_rx) { | |
1867 | qe_muram_free(ugeth->thread_dat_rx_offset); | |
1868 | ugeth->p_thread_data_rx = NULL; | |
1869 | } | |
1870 | if (ugeth->p_exf_glbl_param) { | |
1871 | qe_muram_free(ugeth->exf_glbl_param_offset); | |
1872 | ugeth->p_exf_glbl_param = NULL; | |
1873 | } | |
1874 | if (ugeth->p_rx_glbl_pram) { | |
1875 | qe_muram_free(ugeth->rx_glbl_pram_offset); | |
1876 | ugeth->p_rx_glbl_pram = NULL; | |
1877 | } | |
1878 | if (ugeth->p_tx_glbl_pram) { | |
1879 | qe_muram_free(ugeth->tx_glbl_pram_offset); | |
1880 | ugeth->p_tx_glbl_pram = NULL; | |
1881 | } | |
1882 | if (ugeth->p_send_q_mem_reg) { | |
1883 | qe_muram_free(ugeth->send_q_mem_reg_offset); | |
1884 | ugeth->p_send_q_mem_reg = NULL; | |
1885 | } | |
1886 | if (ugeth->p_scheduler) { | |
1887 | qe_muram_free(ugeth->scheduler_offset); | |
1888 | ugeth->p_scheduler = NULL; | |
1889 | } | |
1890 | if (ugeth->p_tx_fw_statistics_pram) { | |
1891 | qe_muram_free(ugeth->tx_fw_statistics_pram_offset); | |
1892 | ugeth->p_tx_fw_statistics_pram = NULL; | |
1893 | } | |
1894 | if (ugeth->p_rx_fw_statistics_pram) { | |
1895 | qe_muram_free(ugeth->rx_fw_statistics_pram_offset); | |
1896 | ugeth->p_rx_fw_statistics_pram = NULL; | |
1897 | } | |
1898 | if (ugeth->p_rx_irq_coalescing_tbl) { | |
1899 | qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); | |
1900 | ugeth->p_rx_irq_coalescing_tbl = NULL; | |
1901 | } | |
1902 | if (ugeth->p_rx_bd_qs_tbl) { | |
1903 | qe_muram_free(ugeth->rx_bd_qs_tbl_offset); | |
1904 | ugeth->p_rx_bd_qs_tbl = NULL; | |
1905 | } | |
1906 | if (ugeth->p_init_enet_param_shadow) { | |
1907 | return_init_enet_entries(ugeth, | |
1908 | &(ugeth->p_init_enet_param_shadow-> | |
1909 | rxthread[0]), | |
1910 | ENET_INIT_PARAM_MAX_ENTRIES_RX, | |
1911 | ugeth->ug_info->riscRx, 1); | |
1912 | return_init_enet_entries(ugeth, | |
1913 | &(ugeth->p_init_enet_param_shadow-> | |
1914 | txthread[0]), | |
1915 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | |
1916 | ugeth->ug_info->riscTx, 0); | |
1917 | kfree(ugeth->p_init_enet_param_shadow); | |
1918 | ugeth->p_init_enet_param_shadow = NULL; | |
1919 | } | |
1920 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | |
1921 | bd = ugeth->p_tx_bd_ring[i]; | |
3a8205ea NIP |
1922 | if (!bd) |
1923 | continue; | |
ce973b14 LY |
1924 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { |
1925 | if (ugeth->tx_skbuff[i][j]) { | |
da1aa63e | 1926 | dma_unmap_single(ugeth->dev, |
6fee40e9 AF |
1927 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
1928 | (in_be32((u32 __iomem *)bd) & | |
ce973b14 LY |
1929 | BD_LENGTH_MASK), |
1930 | DMA_TO_DEVICE); | |
1931 | dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); | |
1932 | ugeth->tx_skbuff[i][j] = NULL; | |
1933 | } | |
1934 | } | |
1935 | ||
1936 | kfree(ugeth->tx_skbuff[i]); | |
1937 | ||
1938 | if (ugeth->p_tx_bd_ring[i]) { | |
1939 | if (ugeth->ug_info->uf_info.bd_mem_part == | |
1940 | MEM_PART_SYSTEM) | |
1941 | kfree((void *)ugeth->tx_bd_ring_offset[i]); | |
1942 | else if (ugeth->ug_info->uf_info.bd_mem_part == | |
1943 | MEM_PART_MURAM) | |
1944 | qe_muram_free(ugeth->tx_bd_ring_offset[i]); | |
1945 | ugeth->p_tx_bd_ring[i] = NULL; | |
1946 | } | |
1947 | } | |
1948 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
1949 | if (ugeth->p_rx_bd_ring[i]) { | |
1950 | /* Return existing data buffers in ring */ | |
1951 | bd = ugeth->p_rx_bd_ring[i]; | |
1952 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { | |
1953 | if (ugeth->rx_skbuff[i][j]) { | |
da1aa63e | 1954 | dma_unmap_single(ugeth->dev, |
6fee40e9 | 1955 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
18a8e864 LY |
1956 | ugeth->ug_info-> |
1957 | uf_info.max_rx_buf_length + | |
1958 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | |
1959 | DMA_FROM_DEVICE); | |
1960 | dev_kfree_skb_any( | |
1961 | ugeth->rx_skbuff[i][j]); | |
ce973b14 LY |
1962 | ugeth->rx_skbuff[i][j] = NULL; |
1963 | } | |
18a8e864 | 1964 | bd += sizeof(struct qe_bd); |
ce973b14 LY |
1965 | } |
1966 | ||
1967 | kfree(ugeth->rx_skbuff[i]); | |
1968 | ||
1969 | if (ugeth->ug_info->uf_info.bd_mem_part == | |
1970 | MEM_PART_SYSTEM) | |
1971 | kfree((void *)ugeth->rx_bd_ring_offset[i]); | |
1972 | else if (ugeth->ug_info->uf_info.bd_mem_part == | |
1973 | MEM_PART_MURAM) | |
1974 | qe_muram_free(ugeth->rx_bd_ring_offset[i]); | |
1975 | ugeth->p_rx_bd_ring[i] = NULL; | |
1976 | } | |
1977 | } | |
1978 | while (!list_empty(&ugeth->group_hash_q)) | |
1979 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | |
1980 | (dequeue(&ugeth->group_hash_q))); | |
1981 | while (!list_empty(&ugeth->ind_hash_q)) | |
1982 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | |
1983 | (dequeue(&ugeth->ind_hash_q))); | |
3e73fc9a AV |
1984 | if (ugeth->ug_regs) { |
1985 | iounmap(ugeth->ug_regs); | |
1986 | ugeth->ug_regs = NULL; | |
1987 | } | |
50f238fd AV |
1988 | |
1989 | skb_queue_purge(&ugeth->rx_recycle); | |
ce973b14 LY |
1990 | } |
1991 | ||
1992 | static void ucc_geth_set_multi(struct net_device *dev) | |
1993 | { | |
18a8e864 | 1994 | struct ucc_geth_private *ugeth; |
ce973b14 | 1995 | struct dev_mc_list *dmi; |
6fee40e9 AF |
1996 | struct ucc_fast __iomem *uf_regs; |
1997 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; | |
9030b3dd | 1998 | int i; |
ce973b14 LY |
1999 | |
2000 | ugeth = netdev_priv(dev); | |
2001 | ||
2002 | uf_regs = ugeth->uccf->uf_regs; | |
2003 | ||
2004 | if (dev->flags & IFF_PROMISC) { | |
3bc53427 | 2005 | setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); |
ce973b14 | 2006 | } else { |
3bc53427 | 2007 | clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); |
ce973b14 LY |
2008 | |
2009 | p_82xx_addr_filt = | |
6fee40e9 | 2010 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
ce973b14 LY |
2011 | p_rx_glbl_pram->addressfiltering; |
2012 | ||
2013 | if (dev->flags & IFF_ALLMULTI) { | |
2014 | /* Catch all multicast addresses, so set the | |
2015 | * filter to all 1's. | |
2016 | */ | |
2017 | out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); | |
2018 | out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); | |
2019 | } else { | |
2020 | /* Clear filter and add the addresses in the list. | |
2021 | */ | |
2022 | out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); | |
2023 | out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); | |
2024 | ||
2025 | dmi = dev->mc_list; | |
2026 | ||
2027 | for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { | |
2028 | ||
2029 | /* Only support group multicast for now. | |
2030 | */ | |
2031 | if (!(dmi->dmi_addr[0] & 1)) | |
2032 | continue; | |
2033 | ||
ce973b14 LY |
2034 | /* Ask CPM to run CRC and set bit in |
2035 | * filter mask. | |
2036 | */ | |
9030b3dd | 2037 | hw_add_addr_in_hash(ugeth, dmi->dmi_addr); |
ce973b14 LY |
2038 | } |
2039 | } | |
2040 | } | |
2041 | } | |
2042 | ||
18a8e864 | 2043 | static void ucc_geth_stop(struct ucc_geth_private *ugeth) |
ce973b14 | 2044 | { |
6fee40e9 | 2045 | struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; |
728de4c9 | 2046 | struct phy_device *phydev = ugeth->phydev; |
ce973b14 | 2047 | |
b39d66a8 | 2048 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 LY |
2049 | |
2050 | /* Disable the controller */ | |
2051 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | |
2052 | ||
2053 | /* Tell the kernel the link is down */ | |
728de4c9 | 2054 | phy_stop(phydev); |
ce973b14 LY |
2055 | |
2056 | /* Mask all interrupts */ | |
c6f5047b | 2057 | out_be32(ugeth->uccf->p_uccm, 0x00000000); |
ce973b14 LY |
2058 | |
2059 | /* Clear all interrupts */ | |
2060 | out_be32(ugeth->uccf->p_ucce, 0xffffffff); | |
2061 | ||
2062 | /* Disable Rx and Tx */ | |
3bc53427 | 2063 | clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
ce973b14 | 2064 | |
79675900 AV |
2065 | phy_disconnect(ugeth->phydev); |
2066 | ugeth->phydev = NULL; | |
2067 | ||
ce973b14 LY |
2068 | ucc_geth_memclean(ugeth); |
2069 | } | |
2070 | ||
728de4c9 | 2071 | static int ucc_struct_init(struct ucc_geth_private *ugeth) |
ce973b14 | 2072 | { |
18a8e864 LY |
2073 | struct ucc_geth_info *ug_info; |
2074 | struct ucc_fast_info *uf_info; | |
728de4c9 | 2075 | int i; |
ce973b14 LY |
2076 | |
2077 | ug_info = ugeth->ug_info; | |
2078 | uf_info = &ug_info->uf_info; | |
2079 | ||
2080 | if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || | |
2081 | (uf_info->bd_mem_part == MEM_PART_MURAM))) { | |
890de95e LY |
2082 | if (netif_msg_probe(ugeth)) |
2083 | ugeth_err("%s: Bad memory partition value.", | |
b39d66a8 | 2084 | __func__); |
ce973b14 LY |
2085 | return -EINVAL; |
2086 | } | |
2087 | ||
2088 | /* Rx BD lengths */ | |
2089 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2090 | if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || | |
2091 | (ug_info->bdRingLenRx[i] % | |
2092 | UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { | |
890de95e LY |
2093 | if (netif_msg_probe(ugeth)) |
2094 | ugeth_err | |
2095 | ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", | |
b39d66a8 | 2096 | __func__); |
ce973b14 LY |
2097 | return -EINVAL; |
2098 | } | |
2099 | } | |
2100 | ||
2101 | /* Tx BD lengths */ | |
2102 | for (i = 0; i < ug_info->numQueuesTx; i++) { | |
2103 | if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { | |
890de95e LY |
2104 | if (netif_msg_probe(ugeth)) |
2105 | ugeth_err | |
2106 | ("%s: Tx BD ring length must be no smaller than 2.", | |
b39d66a8 | 2107 | __func__); |
ce973b14 LY |
2108 | return -EINVAL; |
2109 | } | |
2110 | } | |
2111 | ||
2112 | /* mrblr */ | |
2113 | if ((uf_info->max_rx_buf_length == 0) || | |
2114 | (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { | |
890de95e LY |
2115 | if (netif_msg_probe(ugeth)) |
2116 | ugeth_err | |
2117 | ("%s: max_rx_buf_length must be non-zero multiple of 128.", | |
b39d66a8 | 2118 | __func__); |
ce973b14 LY |
2119 | return -EINVAL; |
2120 | } | |
2121 | ||
2122 | /* num Tx queues */ | |
2123 | if (ug_info->numQueuesTx > NUM_TX_QUEUES) { | |
890de95e | 2124 | if (netif_msg_probe(ugeth)) |
b39d66a8 | 2125 | ugeth_err("%s: number of tx queues too large.", __func__); |
ce973b14 LY |
2126 | return -EINVAL; |
2127 | } | |
2128 | ||
2129 | /* num Rx queues */ | |
2130 | if (ug_info->numQueuesRx > NUM_RX_QUEUES) { | |
890de95e | 2131 | if (netif_msg_probe(ugeth)) |
b39d66a8 | 2132 | ugeth_err("%s: number of rx queues too large.", __func__); |
ce973b14 LY |
2133 | return -EINVAL; |
2134 | } | |
2135 | ||
2136 | /* l2qt */ | |
2137 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { | |
2138 | if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { | |
890de95e LY |
2139 | if (netif_msg_probe(ugeth)) |
2140 | ugeth_err | |
2141 | ("%s: VLAN priority table entry must not be" | |
2142 | " larger than number of Rx queues.", | |
b39d66a8 | 2143 | __func__); |
ce973b14 LY |
2144 | return -EINVAL; |
2145 | } | |
2146 | } | |
2147 | ||
2148 | /* l3qt */ | |
2149 | for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { | |
2150 | if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { | |
890de95e LY |
2151 | if (netif_msg_probe(ugeth)) |
2152 | ugeth_err | |
2153 | ("%s: IP priority table entry must not be" | |
2154 | " larger than number of Rx queues.", | |
b39d66a8 | 2155 | __func__); |
ce973b14 LY |
2156 | return -EINVAL; |
2157 | } | |
2158 | } | |
2159 | ||
2160 | if (ug_info->cam && !ug_info->ecamptr) { | |
890de95e LY |
2161 | if (netif_msg_probe(ugeth)) |
2162 | ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", | |
b39d66a8 | 2163 | __func__); |
ce973b14 LY |
2164 | return -EINVAL; |
2165 | } | |
2166 | ||
2167 | if ((ug_info->numStationAddresses != | |
8e95a202 JP |
2168 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1) && |
2169 | ug_info->rxExtendedFiltering) { | |
890de95e LY |
2170 | if (netif_msg_probe(ugeth)) |
2171 | ugeth_err("%s: Number of station addresses greater than 1 " | |
2172 | "not allowed in extended parsing mode.", | |
b39d66a8 | 2173 | __func__); |
ce973b14 LY |
2174 | return -EINVAL; |
2175 | } | |
2176 | ||
2177 | /* Generate uccm_mask for receive */ | |
2178 | uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ | |
2179 | for (i = 0; i < ug_info->numQueuesRx; i++) | |
3bc53427 | 2180 | uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); |
ce973b14 LY |
2181 | |
2182 | for (i = 0; i < ug_info->numQueuesTx; i++) | |
3bc53427 | 2183 | uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); |
ce973b14 | 2184 | /* Initialize the general fast UCC block. */ |
728de4c9 | 2185 | if (ucc_fast_init(uf_info, &ugeth->uccf)) { |
890de95e | 2186 | if (netif_msg_probe(ugeth)) |
b39d66a8 | 2187 | ugeth_err("%s: Failed to init uccf.", __func__); |
ce973b14 LY |
2188 | return -ENOMEM; |
2189 | } | |
728de4c9 | 2190 | |
345f8422 HW |
2191 | /* read the number of risc engines, update the riscTx and riscRx |
2192 | * if there are 4 riscs in QE | |
2193 | */ | |
2194 | if (qe_get_num_of_risc() == 4) { | |
2195 | ug_info->riscTx = QE_RISC_ALLOCATION_FOUR_RISCS; | |
2196 | ug_info->riscRx = QE_RISC_ALLOCATION_FOUR_RISCS; | |
2197 | } | |
2198 | ||
3e73fc9a AV |
2199 | ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); |
2200 | if (!ugeth->ug_regs) { | |
2201 | if (netif_msg_probe(ugeth)) | |
2202 | ugeth_err("%s: Failed to ioremap regs.", __func__); | |
2203 | return -ENOMEM; | |
2204 | } | |
728de4c9 | 2205 | |
50f238fd AV |
2206 | skb_queue_head_init(&ugeth->rx_recycle); |
2207 | ||
728de4c9 KP |
2208 | return 0; |
2209 | } | |
2210 | ||
2211 | static int ucc_geth_startup(struct ucc_geth_private *ugeth) | |
2212 | { | |
6fee40e9 AF |
2213 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
2214 | struct ucc_geth_init_pram __iomem *p_init_enet_pram; | |
728de4c9 KP |
2215 | struct ucc_fast_private *uccf; |
2216 | struct ucc_geth_info *ug_info; | |
2217 | struct ucc_fast_info *uf_info; | |
6fee40e9 AF |
2218 | struct ucc_fast __iomem *uf_regs; |
2219 | struct ucc_geth __iomem *ug_regs; | |
728de4c9 KP |
2220 | int ret_val = -EINVAL; |
2221 | u32 remoder = UCC_GETH_REMODER_INIT; | |
3bc53427 | 2222 | u32 init_enet_pram_offset, cecr_subblock, command; |
728de4c9 KP |
2223 | u32 ifstat, i, j, size, l2qt, l3qt, length; |
2224 | u16 temoder = UCC_GETH_TEMODER_INIT; | |
2225 | u16 test; | |
2226 | u8 function_code = 0; | |
6fee40e9 AF |
2227 | u8 __iomem *bd; |
2228 | u8 __iomem *endOfRing; | |
728de4c9 KP |
2229 | u8 numThreadsRxNumerical, numThreadsTxNumerical; |
2230 | ||
b39d66a8 | 2231 | ugeth_vdbg("%s: IN", __func__); |
728de4c9 KP |
2232 | uccf = ugeth->uccf; |
2233 | ug_info = ugeth->ug_info; | |
2234 | uf_info = &ug_info->uf_info; | |
2235 | uf_regs = uccf->uf_regs; | |
2236 | ug_regs = ugeth->ug_regs; | |
ce973b14 LY |
2237 | |
2238 | switch (ug_info->numThreadsRx) { | |
2239 | case UCC_GETH_NUM_OF_THREADS_1: | |
2240 | numThreadsRxNumerical = 1; | |
2241 | break; | |
2242 | case UCC_GETH_NUM_OF_THREADS_2: | |
2243 | numThreadsRxNumerical = 2; | |
2244 | break; | |
2245 | case UCC_GETH_NUM_OF_THREADS_4: | |
2246 | numThreadsRxNumerical = 4; | |
2247 | break; | |
2248 | case UCC_GETH_NUM_OF_THREADS_6: | |
2249 | numThreadsRxNumerical = 6; | |
2250 | break; | |
2251 | case UCC_GETH_NUM_OF_THREADS_8: | |
2252 | numThreadsRxNumerical = 8; | |
2253 | break; | |
2254 | default: | |
890de95e LY |
2255 | if (netif_msg_ifup(ugeth)) |
2256 | ugeth_err("%s: Bad number of Rx threads value.", | |
b39d66a8 | 2257 | __func__); |
ce973b14 LY |
2258 | return -EINVAL; |
2259 | break; | |
2260 | } | |
2261 | ||
2262 | switch (ug_info->numThreadsTx) { | |
2263 | case UCC_GETH_NUM_OF_THREADS_1: | |
2264 | numThreadsTxNumerical = 1; | |
2265 | break; | |
2266 | case UCC_GETH_NUM_OF_THREADS_2: | |
2267 | numThreadsTxNumerical = 2; | |
2268 | break; | |
2269 | case UCC_GETH_NUM_OF_THREADS_4: | |
2270 | numThreadsTxNumerical = 4; | |
2271 | break; | |
2272 | case UCC_GETH_NUM_OF_THREADS_6: | |
2273 | numThreadsTxNumerical = 6; | |
2274 | break; | |
2275 | case UCC_GETH_NUM_OF_THREADS_8: | |
2276 | numThreadsTxNumerical = 8; | |
2277 | break; | |
2278 | default: | |
890de95e LY |
2279 | if (netif_msg_ifup(ugeth)) |
2280 | ugeth_err("%s: Bad number of Tx threads value.", | |
b39d66a8 | 2281 | __func__); |
ce973b14 LY |
2282 | return -EINVAL; |
2283 | break; | |
2284 | } | |
2285 | ||
2286 | /* Calculate rx_extended_features */ | |
2287 | ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || | |
2288 | ug_info->ipAddressAlignment || | |
2289 | (ug_info->numStationAddresses != | |
2290 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1); | |
2291 | ||
2292 | ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || | |
8e95a202 JP |
2293 | (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) || |
2294 | (ug_info->vlanOperationNonTagged != | |
2295 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); | |
ce973b14 | 2296 | |
ce973b14 LY |
2297 | init_default_reg_vals(&uf_regs->upsmr, |
2298 | &ug_regs->maccfg1, &ug_regs->maccfg2); | |
2299 | ||
2300 | /* Set UPSMR */ | |
2301 | /* For more details see the hardware spec. */ | |
2302 | init_rx_parameters(ug_info->bro, | |
2303 | ug_info->rsh, ug_info->pro, &uf_regs->upsmr); | |
2304 | ||
2305 | /* We're going to ignore other registers for now, */ | |
2306 | /* except as needed to get up and running */ | |
2307 | ||
2308 | /* Set MACCFG1 */ | |
2309 | /* For more details see the hardware spec. */ | |
2310 | init_flow_control_params(ug_info->aufc, | |
2311 | ug_info->receiveFlowControl, | |
ac421852 | 2312 | ug_info->transmitFlowControl, |
ce973b14 LY |
2313 | ug_info->pausePeriod, |
2314 | ug_info->extensionField, | |
2315 | &uf_regs->upsmr, | |
2316 | &ug_regs->uempr, &ug_regs->maccfg1); | |
2317 | ||
3bc53427 | 2318 | setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
ce973b14 LY |
2319 | |
2320 | /* Set IPGIFG */ | |
2321 | /* For more details see the hardware spec. */ | |
2322 | ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, | |
2323 | ug_info->nonBackToBackIfgPart2, | |
2324 | ug_info-> | |
2325 | miminumInterFrameGapEnforcement, | |
2326 | ug_info->backToBackInterFrameGap, | |
2327 | &ug_regs->ipgifg); | |
2328 | if (ret_val != 0) { | |
890de95e LY |
2329 | if (netif_msg_ifup(ugeth)) |
2330 | ugeth_err("%s: IPGIFG initialization parameter too large.", | |
b39d66a8 | 2331 | __func__); |
ce973b14 LY |
2332 | return ret_val; |
2333 | } | |
2334 | ||
2335 | /* Set HAFDUP */ | |
2336 | /* For more details see the hardware spec. */ | |
2337 | ret_val = init_half_duplex_params(ug_info->altBeb, | |
2338 | ug_info->backPressureNoBackoff, | |
2339 | ug_info->noBackoff, | |
2340 | ug_info->excessDefer, | |
2341 | ug_info->altBebTruncation, | |
2342 | ug_info->maxRetransmission, | |
2343 | ug_info->collisionWindow, | |
2344 | &ug_regs->hafdup); | |
2345 | if (ret_val != 0) { | |
890de95e LY |
2346 | if (netif_msg_ifup(ugeth)) |
2347 | ugeth_err("%s: Half Duplex initialization parameter too large.", | |
b39d66a8 | 2348 | __func__); |
ce973b14 LY |
2349 | return ret_val; |
2350 | } | |
2351 | ||
2352 | /* Set IFSTAT */ | |
2353 | /* For more details see the hardware spec. */ | |
2354 | /* Read only - resets upon read */ | |
2355 | ifstat = in_be32(&ug_regs->ifstat); | |
2356 | ||
2357 | /* Clear UEMPR */ | |
2358 | /* For more details see the hardware spec. */ | |
2359 | out_be32(&ug_regs->uempr, 0); | |
2360 | ||
2361 | /* Set UESCR */ | |
2362 | /* For more details see the hardware spec. */ | |
2363 | init_hw_statistics_gathering_mode((ug_info->statisticsMode & | |
2364 | UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), | |
2365 | 0, &uf_regs->upsmr, &ug_regs->uescr); | |
2366 | ||
2367 | /* Allocate Tx bds */ | |
2368 | for (j = 0; j < ug_info->numQueuesTx; j++) { | |
2369 | /* Allocate in multiple of | |
2370 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, | |
2371 | according to spec */ | |
18a8e864 | 2372 | length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) |
ce973b14 LY |
2373 | / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) |
2374 | * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | |
18a8e864 | 2375 | if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % |
ce973b14 LY |
2376 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) |
2377 | length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | |
2378 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { | |
2379 | u32 align = 4; | |
2380 | if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) | |
2381 | align = UCC_GETH_TX_BD_RING_ALIGNMENT; | |
2382 | ugeth->tx_bd_ring_offset[j] = | |
6fee40e9 | 2383 | (u32) kmalloc((u32) (length + align), GFP_KERNEL); |
04b588d7 | 2384 | |
ce973b14 LY |
2385 | if (ugeth->tx_bd_ring_offset[j] != 0) |
2386 | ugeth->p_tx_bd_ring[j] = | |
6fee40e9 | 2387 | (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + |
ce973b14 LY |
2388 | align) & ~(align - 1)); |
2389 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | |
2390 | ugeth->tx_bd_ring_offset[j] = | |
2391 | qe_muram_alloc(length, | |
2392 | UCC_GETH_TX_BD_RING_ALIGNMENT); | |
4c35630c | 2393 | if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) |
ce973b14 | 2394 | ugeth->p_tx_bd_ring[j] = |
6fee40e9 | 2395 | (u8 __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2396 | tx_bd_ring_offset[j]); |
2397 | } | |
2398 | if (!ugeth->p_tx_bd_ring[j]) { | |
890de95e LY |
2399 | if (netif_msg_ifup(ugeth)) |
2400 | ugeth_err | |
2401 | ("%s: Can not allocate memory for Tx bd rings.", | |
b39d66a8 | 2402 | __func__); |
ce973b14 LY |
2403 | return -ENOMEM; |
2404 | } | |
2405 | /* Zero unused end of bd ring, according to spec */ | |
6fee40e9 AF |
2406 | memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + |
2407 | ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, | |
18a8e864 | 2408 | length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); |
ce973b14 LY |
2409 | } |
2410 | ||
2411 | /* Allocate Rx bds */ | |
2412 | for (j = 0; j < ug_info->numQueuesRx; j++) { | |
18a8e864 | 2413 | length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); |
ce973b14 LY |
2414 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { |
2415 | u32 align = 4; | |
2416 | if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) | |
2417 | align = UCC_GETH_RX_BD_RING_ALIGNMENT; | |
2418 | ugeth->rx_bd_ring_offset[j] = | |
6fee40e9 | 2419 | (u32) kmalloc((u32) (length + align), GFP_KERNEL); |
ce973b14 LY |
2420 | if (ugeth->rx_bd_ring_offset[j] != 0) |
2421 | ugeth->p_rx_bd_ring[j] = | |
6fee40e9 | 2422 | (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + |
ce973b14 LY |
2423 | align) & ~(align - 1)); |
2424 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | |
2425 | ugeth->rx_bd_ring_offset[j] = | |
2426 | qe_muram_alloc(length, | |
2427 | UCC_GETH_RX_BD_RING_ALIGNMENT); | |
4c35630c | 2428 | if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) |
ce973b14 | 2429 | ugeth->p_rx_bd_ring[j] = |
6fee40e9 | 2430 | (u8 __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2431 | rx_bd_ring_offset[j]); |
2432 | } | |
2433 | if (!ugeth->p_rx_bd_ring[j]) { | |
890de95e LY |
2434 | if (netif_msg_ifup(ugeth)) |
2435 | ugeth_err | |
2436 | ("%s: Can not allocate memory for Rx bd rings.", | |
b39d66a8 | 2437 | __func__); |
ce973b14 LY |
2438 | return -ENOMEM; |
2439 | } | |
2440 | } | |
2441 | ||
2442 | /* Init Tx bds */ | |
2443 | for (j = 0; j < ug_info->numQueuesTx; j++) { | |
2444 | /* Setup the skbuff rings */ | |
04b588d7 AD |
2445 | ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * |
2446 | ugeth->ug_info->bdRingLenTx[j], | |
2447 | GFP_KERNEL); | |
ce973b14 LY |
2448 | |
2449 | if (ugeth->tx_skbuff[j] == NULL) { | |
890de95e LY |
2450 | if (netif_msg_ifup(ugeth)) |
2451 | ugeth_err("%s: Could not allocate tx_skbuff", | |
b39d66a8 | 2452 | __func__); |
ce973b14 LY |
2453 | return -ENOMEM; |
2454 | } | |
2455 | ||
2456 | for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) | |
2457 | ugeth->tx_skbuff[j][i] = NULL; | |
2458 | ||
2459 | ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; | |
2460 | bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; | |
2461 | for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { | |
18a8e864 | 2462 | /* clear bd buffer */ |
6fee40e9 | 2463 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); |
18a8e864 | 2464 | /* set bd status and length */ |
6fee40e9 | 2465 | out_be32((u32 __iomem *)bd, 0); |
18a8e864 | 2466 | bd += sizeof(struct qe_bd); |
ce973b14 | 2467 | } |
18a8e864 LY |
2468 | bd -= sizeof(struct qe_bd); |
2469 | /* set bd status and length */ | |
6fee40e9 | 2470 | out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ |
ce973b14 LY |
2471 | } |
2472 | ||
2473 | /* Init Rx bds */ | |
2474 | for (j = 0; j < ug_info->numQueuesRx; j++) { | |
2475 | /* Setup the skbuff rings */ | |
04b588d7 AD |
2476 | ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * |
2477 | ugeth->ug_info->bdRingLenRx[j], | |
2478 | GFP_KERNEL); | |
ce973b14 LY |
2479 | |
2480 | if (ugeth->rx_skbuff[j] == NULL) { | |
890de95e LY |
2481 | if (netif_msg_ifup(ugeth)) |
2482 | ugeth_err("%s: Could not allocate rx_skbuff", | |
b39d66a8 | 2483 | __func__); |
ce973b14 LY |
2484 | return -ENOMEM; |
2485 | } | |
2486 | ||
2487 | for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) | |
2488 | ugeth->rx_skbuff[j][i] = NULL; | |
2489 | ||
2490 | ugeth->skb_currx[j] = 0; | |
2491 | bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; | |
2492 | for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { | |
18a8e864 | 2493 | /* set bd status and length */ |
6fee40e9 | 2494 | out_be32((u32 __iomem *)bd, R_I); |
18a8e864 | 2495 | /* clear bd buffer */ |
6fee40e9 | 2496 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); |
18a8e864 | 2497 | bd += sizeof(struct qe_bd); |
ce973b14 | 2498 | } |
18a8e864 LY |
2499 | bd -= sizeof(struct qe_bd); |
2500 | /* set bd status and length */ | |
6fee40e9 | 2501 | out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ |
ce973b14 LY |
2502 | } |
2503 | ||
2504 | /* | |
2505 | * Global PRAM | |
2506 | */ | |
2507 | /* Tx global PRAM */ | |
2508 | /* Allocate global tx parameter RAM page */ | |
2509 | ugeth->tx_glbl_pram_offset = | |
18a8e864 | 2510 | qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), |
ce973b14 | 2511 | UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); |
4c35630c | 2512 | if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { |
890de95e LY |
2513 | if (netif_msg_ifup(ugeth)) |
2514 | ugeth_err | |
2515 | ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", | |
b39d66a8 | 2516 | __func__); |
ce973b14 LY |
2517 | return -ENOMEM; |
2518 | } | |
2519 | ugeth->p_tx_glbl_pram = | |
6fee40e9 | 2520 | (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2521 | tx_glbl_pram_offset); |
2522 | /* Zero out p_tx_glbl_pram */ | |
6fee40e9 | 2523 | memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); |
ce973b14 LY |
2524 | |
2525 | /* Fill global PRAM */ | |
2526 | ||
2527 | /* TQPTR */ | |
2528 | /* Size varies with number of Tx threads */ | |
2529 | ugeth->thread_dat_tx_offset = | |
2530 | qe_muram_alloc(numThreadsTxNumerical * | |
18a8e864 | 2531 | sizeof(struct ucc_geth_thread_data_tx) + |
ce973b14 LY |
2532 | 32 * (numThreadsTxNumerical == 1), |
2533 | UCC_GETH_THREAD_DATA_ALIGNMENT); | |
4c35630c | 2534 | if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { |
890de95e LY |
2535 | if (netif_msg_ifup(ugeth)) |
2536 | ugeth_err | |
2537 | ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", | |
b39d66a8 | 2538 | __func__); |
ce973b14 LY |
2539 | return -ENOMEM; |
2540 | } | |
2541 | ||
2542 | ugeth->p_thread_data_tx = | |
6fee40e9 | 2543 | (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2544 | thread_dat_tx_offset); |
2545 | out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); | |
2546 | ||
2547 | /* vtagtable */ | |
2548 | for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) | |
2549 | out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], | |
2550 | ug_info->vtagtable[i]); | |
2551 | ||
2552 | /* iphoffset */ | |
2553 | for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) | |
6fee40e9 AF |
2554 | out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], |
2555 | ug_info->iphoffset[i]); | |
ce973b14 LY |
2556 | |
2557 | /* SQPTR */ | |
2558 | /* Size varies with number of Tx queues */ | |
2559 | ugeth->send_q_mem_reg_offset = | |
2560 | qe_muram_alloc(ug_info->numQueuesTx * | |
18a8e864 | 2561 | sizeof(struct ucc_geth_send_queue_qd), |
ce973b14 | 2562 | UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); |
4c35630c | 2563 | if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { |
890de95e LY |
2564 | if (netif_msg_ifup(ugeth)) |
2565 | ugeth_err | |
2566 | ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", | |
b39d66a8 | 2567 | __func__); |
ce973b14 LY |
2568 | return -ENOMEM; |
2569 | } | |
2570 | ||
2571 | ugeth->p_send_q_mem_reg = | |
6fee40e9 | 2572 | (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2573 | send_q_mem_reg_offset); |
2574 | out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); | |
2575 | ||
2576 | /* Setup the table */ | |
2577 | /* Assume BD rings are already established */ | |
2578 | for (i = 0; i < ug_info->numQueuesTx; i++) { | |
2579 | endOfRing = | |
2580 | ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - | |
18a8e864 | 2581 | 1) * sizeof(struct qe_bd); |
ce973b14 LY |
2582 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { |
2583 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | |
2584 | (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); | |
2585 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | |
2586 | last_bd_completed_address, | |
2587 | (u32) virt_to_phys(endOfRing)); | |
2588 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | |
2589 | MEM_PART_MURAM) { | |
2590 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | |
2591 | (u32) immrbar_virt_to_phys(ugeth-> | |
2592 | p_tx_bd_ring[i])); | |
2593 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | |
2594 | last_bd_completed_address, | |
2595 | (u32) immrbar_virt_to_phys(endOfRing)); | |
2596 | } | |
2597 | } | |
2598 | ||
2599 | /* schedulerbasepointer */ | |
2600 | ||
2601 | if (ug_info->numQueuesTx > 1) { | |
2602 | /* scheduler exists only if more than 1 tx queue */ | |
2603 | ugeth->scheduler_offset = | |
18a8e864 | 2604 | qe_muram_alloc(sizeof(struct ucc_geth_scheduler), |
ce973b14 | 2605 | UCC_GETH_SCHEDULER_ALIGNMENT); |
4c35630c | 2606 | if (IS_ERR_VALUE(ugeth->scheduler_offset)) { |
890de95e LY |
2607 | if (netif_msg_ifup(ugeth)) |
2608 | ugeth_err | |
2609 | ("%s: Can not allocate DPRAM memory for p_scheduler.", | |
b39d66a8 | 2610 | __func__); |
ce973b14 LY |
2611 | return -ENOMEM; |
2612 | } | |
2613 | ||
2614 | ugeth->p_scheduler = | |
6fee40e9 | 2615 | (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2616 | scheduler_offset); |
2617 | out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, | |
2618 | ugeth->scheduler_offset); | |
2619 | /* Zero out p_scheduler */ | |
6fee40e9 | 2620 | memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); |
ce973b14 LY |
2621 | |
2622 | /* Set values in scheduler */ | |
2623 | out_be32(&ugeth->p_scheduler->mblinterval, | |
2624 | ug_info->mblinterval); | |
2625 | out_be16(&ugeth->p_scheduler->nortsrbytetime, | |
2626 | ug_info->nortsrbytetime); | |
6fee40e9 AF |
2627 | out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); |
2628 | out_8(&ugeth->p_scheduler->strictpriorityq, | |
2629 | ug_info->strictpriorityq); | |
2630 | out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); | |
2631 | out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); | |
ce973b14 | 2632 | for (i = 0; i < NUM_TX_QUEUES; i++) |
6fee40e9 AF |
2633 | out_8(&ugeth->p_scheduler->weightfactor[i], |
2634 | ug_info->weightfactor[i]); | |
ce973b14 LY |
2635 | |
2636 | /* Set pointers to cpucount registers in scheduler */ | |
2637 | ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); | |
2638 | ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); | |
2639 | ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); | |
2640 | ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); | |
2641 | ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); | |
2642 | ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); | |
2643 | ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); | |
2644 | ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); | |
2645 | } | |
2646 | ||
2647 | /* schedulerbasepointer */ | |
2648 | /* TxRMON_PTR (statistics) */ | |
2649 | if (ug_info-> | |
2650 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { | |
2651 | ugeth->tx_fw_statistics_pram_offset = | |
2652 | qe_muram_alloc(sizeof | |
18a8e864 | 2653 | (struct ucc_geth_tx_firmware_statistics_pram), |
ce973b14 | 2654 | UCC_GETH_TX_STATISTICS_ALIGNMENT); |
4c35630c | 2655 | if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { |
890de95e LY |
2656 | if (netif_msg_ifup(ugeth)) |
2657 | ugeth_err | |
2658 | ("%s: Can not allocate DPRAM memory for" | |
2659 | " p_tx_fw_statistics_pram.", | |
b39d66a8 | 2660 | __func__); |
ce973b14 LY |
2661 | return -ENOMEM; |
2662 | } | |
2663 | ugeth->p_tx_fw_statistics_pram = | |
6fee40e9 | 2664 | (struct ucc_geth_tx_firmware_statistics_pram __iomem *) |
ce973b14 LY |
2665 | qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); |
2666 | /* Zero out p_tx_fw_statistics_pram */ | |
6fee40e9 | 2667 | memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, |
18a8e864 | 2668 | 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); |
ce973b14 LY |
2669 | } |
2670 | ||
2671 | /* temoder */ | |
2672 | /* Already has speed set */ | |
2673 | ||
2674 | if (ug_info->numQueuesTx > 1) | |
2675 | temoder |= TEMODER_SCHEDULER_ENABLE; | |
2676 | if (ug_info->ipCheckSumGenerate) | |
2677 | temoder |= TEMODER_IP_CHECKSUM_GENERATE; | |
2678 | temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); | |
2679 | out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); | |
2680 | ||
2681 | test = in_be16(&ugeth->p_tx_glbl_pram->temoder); | |
2682 | ||
2683 | /* Function code register value to be used later */ | |
6b0b594b | 2684 | function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; |
ce973b14 LY |
2685 | /* Required for QE */ |
2686 | ||
2687 | /* function code register */ | |
2688 | out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); | |
2689 | ||
2690 | /* Rx global PRAM */ | |
2691 | /* Allocate global rx parameter RAM page */ | |
2692 | ugeth->rx_glbl_pram_offset = | |
18a8e864 | 2693 | qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), |
ce973b14 | 2694 | UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); |
4c35630c | 2695 | if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { |
890de95e LY |
2696 | if (netif_msg_ifup(ugeth)) |
2697 | ugeth_err | |
2698 | ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", | |
b39d66a8 | 2699 | __func__); |
ce973b14 LY |
2700 | return -ENOMEM; |
2701 | } | |
2702 | ugeth->p_rx_glbl_pram = | |
6fee40e9 | 2703 | (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2704 | rx_glbl_pram_offset); |
2705 | /* Zero out p_rx_glbl_pram */ | |
6fee40e9 | 2706 | memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); |
ce973b14 LY |
2707 | |
2708 | /* Fill global PRAM */ | |
2709 | ||
2710 | /* RQPTR */ | |
2711 | /* Size varies with number of Rx threads */ | |
2712 | ugeth->thread_dat_rx_offset = | |
2713 | qe_muram_alloc(numThreadsRxNumerical * | |
18a8e864 | 2714 | sizeof(struct ucc_geth_thread_data_rx), |
ce973b14 | 2715 | UCC_GETH_THREAD_DATA_ALIGNMENT); |
4c35630c | 2716 | if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { |
890de95e LY |
2717 | if (netif_msg_ifup(ugeth)) |
2718 | ugeth_err | |
2719 | ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", | |
b39d66a8 | 2720 | __func__); |
ce973b14 LY |
2721 | return -ENOMEM; |
2722 | } | |
2723 | ||
2724 | ugeth->p_thread_data_rx = | |
6fee40e9 | 2725 | (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2726 | thread_dat_rx_offset); |
2727 | out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); | |
2728 | ||
2729 | /* typeorlen */ | |
2730 | out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); | |
2731 | ||
2732 | /* rxrmonbaseptr (statistics) */ | |
2733 | if (ug_info-> | |
2734 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { | |
2735 | ugeth->rx_fw_statistics_pram_offset = | |
2736 | qe_muram_alloc(sizeof | |
18a8e864 | 2737 | (struct ucc_geth_rx_firmware_statistics_pram), |
ce973b14 | 2738 | UCC_GETH_RX_STATISTICS_ALIGNMENT); |
4c35630c | 2739 | if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { |
890de95e LY |
2740 | if (netif_msg_ifup(ugeth)) |
2741 | ugeth_err | |
2742 | ("%s: Can not allocate DPRAM memory for" | |
b39d66a8 | 2743 | " p_rx_fw_statistics_pram.", __func__); |
ce973b14 LY |
2744 | return -ENOMEM; |
2745 | } | |
2746 | ugeth->p_rx_fw_statistics_pram = | |
6fee40e9 | 2747 | (struct ucc_geth_rx_firmware_statistics_pram __iomem *) |
ce973b14 LY |
2748 | qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); |
2749 | /* Zero out p_rx_fw_statistics_pram */ | |
6fee40e9 | 2750 | memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, |
18a8e864 | 2751 | sizeof(struct ucc_geth_rx_firmware_statistics_pram)); |
ce973b14 LY |
2752 | } |
2753 | ||
2754 | /* intCoalescingPtr */ | |
2755 | ||
2756 | /* Size varies with number of Rx queues */ | |
2757 | ugeth->rx_irq_coalescing_tbl_offset = | |
2758 | qe_muram_alloc(ug_info->numQueuesRx * | |
7563907e MB |
2759 | sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) |
2760 | + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); | |
4c35630c | 2761 | if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { |
890de95e LY |
2762 | if (netif_msg_ifup(ugeth)) |
2763 | ugeth_err | |
2764 | ("%s: Can not allocate DPRAM memory for" | |
b39d66a8 | 2765 | " p_rx_irq_coalescing_tbl.", __func__); |
ce973b14 LY |
2766 | return -ENOMEM; |
2767 | } | |
2768 | ||
2769 | ugeth->p_rx_irq_coalescing_tbl = | |
6fee40e9 | 2770 | (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) |
ce973b14 LY |
2771 | qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); |
2772 | out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, | |
2773 | ugeth->rx_irq_coalescing_tbl_offset); | |
2774 | ||
2775 | /* Fill interrupt coalescing table */ | |
2776 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2777 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | |
2778 | interruptcoalescingmaxvalue, | |
2779 | ug_info->interruptcoalescingmaxvalue[i]); | |
2780 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | |
2781 | interruptcoalescingcounter, | |
2782 | ug_info->interruptcoalescingmaxvalue[i]); | |
2783 | } | |
2784 | ||
2785 | /* MRBLR */ | |
2786 | init_max_rx_buff_len(uf_info->max_rx_buf_length, | |
2787 | &ugeth->p_rx_glbl_pram->mrblr); | |
2788 | /* MFLR */ | |
2789 | out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); | |
2790 | /* MINFLR */ | |
2791 | init_min_frame_len(ug_info->minFrameLength, | |
2792 | &ugeth->p_rx_glbl_pram->minflr, | |
2793 | &ugeth->p_rx_glbl_pram->mrblr); | |
2794 | /* MAXD1 */ | |
2795 | out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); | |
2796 | /* MAXD2 */ | |
2797 | out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); | |
2798 | ||
2799 | /* l2qt */ | |
2800 | l2qt = 0; | |
2801 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) | |
2802 | l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); | |
2803 | out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); | |
2804 | ||
2805 | /* l3qt */ | |
2806 | for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { | |
2807 | l3qt = 0; | |
2808 | for (i = 0; i < 8; i++) | |
2809 | l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); | |
18a8e864 | 2810 | out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); |
ce973b14 LY |
2811 | } |
2812 | ||
2813 | /* vlantype */ | |
2814 | out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); | |
2815 | ||
2816 | /* vlantci */ | |
2817 | out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); | |
2818 | ||
2819 | /* ecamptr */ | |
2820 | out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); | |
2821 | ||
2822 | /* RBDQPTR */ | |
2823 | /* Size varies with number of Rx queues */ | |
2824 | ugeth->rx_bd_qs_tbl_offset = | |
2825 | qe_muram_alloc(ug_info->numQueuesRx * | |
18a8e864 LY |
2826 | (sizeof(struct ucc_geth_rx_bd_queues_entry) + |
2827 | sizeof(struct ucc_geth_rx_prefetched_bds)), | |
ce973b14 | 2828 | UCC_GETH_RX_BD_QUEUES_ALIGNMENT); |
4c35630c | 2829 | if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { |
890de95e LY |
2830 | if (netif_msg_ifup(ugeth)) |
2831 | ugeth_err | |
2832 | ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", | |
b39d66a8 | 2833 | __func__); |
ce973b14 LY |
2834 | return -ENOMEM; |
2835 | } | |
2836 | ||
2837 | ugeth->p_rx_bd_qs_tbl = | |
6fee40e9 | 2838 | (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2839 | rx_bd_qs_tbl_offset); |
2840 | out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); | |
2841 | /* Zero out p_rx_bd_qs_tbl */ | |
6fee40e9 | 2842 | memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, |
ce973b14 | 2843 | 0, |
18a8e864 LY |
2844 | ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + |
2845 | sizeof(struct ucc_geth_rx_prefetched_bds))); | |
ce973b14 LY |
2846 | |
2847 | /* Setup the table */ | |
2848 | /* Assume BD rings are already established */ | |
2849 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2850 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { | |
2851 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
2852 | (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); | |
2853 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | |
2854 | MEM_PART_MURAM) { | |
2855 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
2856 | (u32) immrbar_virt_to_phys(ugeth-> | |
2857 | p_rx_bd_ring[i])); | |
2858 | } | |
2859 | /* rest of fields handled by QE */ | |
2860 | } | |
2861 | ||
2862 | /* remoder */ | |
2863 | /* Already has speed set */ | |
2864 | ||
2865 | if (ugeth->rx_extended_features) | |
2866 | remoder |= REMODER_RX_EXTENDED_FEATURES; | |
2867 | if (ug_info->rxExtendedFiltering) | |
2868 | remoder |= REMODER_RX_EXTENDED_FILTERING; | |
2869 | if (ug_info->dynamicMaxFrameLength) | |
2870 | remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; | |
2871 | if (ug_info->dynamicMinFrameLength) | |
2872 | remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; | |
2873 | remoder |= | |
2874 | ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; | |
2875 | remoder |= | |
2876 | ug_info-> | |
2877 | vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; | |
2878 | remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; | |
2879 | remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); | |
2880 | if (ug_info->ipCheckSumCheck) | |
2881 | remoder |= REMODER_IP_CHECKSUM_CHECK; | |
2882 | if (ug_info->ipAddressAlignment) | |
2883 | remoder |= REMODER_IP_ADDRESS_ALIGNMENT; | |
2884 | out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); | |
2885 | ||
2886 | /* Note that this function must be called */ | |
2887 | /* ONLY AFTER p_tx_fw_statistics_pram */ | |
2888 | /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ | |
2889 | init_firmware_statistics_gathering_mode((ug_info-> | |
2890 | statisticsMode & | |
2891 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), | |
2892 | (ug_info->statisticsMode & | |
2893 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), | |
2894 | &ugeth->p_tx_glbl_pram->txrmonbaseptr, | |
2895 | ugeth->tx_fw_statistics_pram_offset, | |
2896 | &ugeth->p_rx_glbl_pram->rxrmonbaseptr, | |
2897 | ugeth->rx_fw_statistics_pram_offset, | |
2898 | &ugeth->p_tx_glbl_pram->temoder, | |
2899 | &ugeth->p_rx_glbl_pram->remoder); | |
2900 | ||
2901 | /* function code register */ | |
6fee40e9 | 2902 | out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); |
ce973b14 LY |
2903 | |
2904 | /* initialize extended filtering */ | |
2905 | if (ug_info->rxExtendedFiltering) { | |
2906 | if (!ug_info->extendedFilteringChainPointer) { | |
890de95e LY |
2907 | if (netif_msg_ifup(ugeth)) |
2908 | ugeth_err("%s: Null Extended Filtering Chain Pointer.", | |
b39d66a8 | 2909 | __func__); |
ce973b14 LY |
2910 | return -EINVAL; |
2911 | } | |
2912 | ||
2913 | /* Allocate memory for extended filtering Mode Global | |
2914 | Parameters */ | |
2915 | ugeth->exf_glbl_param_offset = | |
18a8e864 | 2916 | qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), |
ce973b14 | 2917 | UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); |
4c35630c | 2918 | if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { |
890de95e LY |
2919 | if (netif_msg_ifup(ugeth)) |
2920 | ugeth_err | |
2921 | ("%s: Can not allocate DPRAM memory for" | |
b39d66a8 | 2922 | " p_exf_glbl_param.", __func__); |
ce973b14 LY |
2923 | return -ENOMEM; |
2924 | } | |
2925 | ||
2926 | ugeth->p_exf_glbl_param = | |
6fee40e9 | 2927 | (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2928 | exf_glbl_param_offset); |
2929 | out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, | |
2930 | ugeth->exf_glbl_param_offset); | |
2931 | out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, | |
2932 | (u32) ug_info->extendedFilteringChainPointer); | |
2933 | ||
2934 | } else { /* initialize 82xx style address filtering */ | |
2935 | ||
2936 | /* Init individual address recognition registers to disabled */ | |
2937 | ||
2938 | for (j = 0; j < NUM_OF_PADDRS; j++) | |
2939 | ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); | |
2940 | ||
ce973b14 | 2941 | p_82xx_addr_filt = |
6fee40e9 | 2942 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
ce973b14 LY |
2943 | p_rx_glbl_pram->addressfiltering; |
2944 | ||
2945 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | |
2946 | ENET_ADDR_TYPE_GROUP); | |
2947 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | |
2948 | ENET_ADDR_TYPE_INDIVIDUAL); | |
2949 | } | |
2950 | ||
2951 | /* | |
2952 | * Initialize UCC at QE level | |
2953 | */ | |
2954 | ||
2955 | command = QE_INIT_TX_RX; | |
2956 | ||
2957 | /* Allocate shadow InitEnet command parameter structure. | |
2958 | * This is needed because after the InitEnet command is executed, | |
2959 | * the structure in DPRAM is released, because DPRAM is a premium | |
2960 | * resource. | |
2961 | * This shadow structure keeps a copy of what was done so that the | |
2962 | * allocated resources can be released when the channel is freed. | |
2963 | */ | |
2964 | if (!(ugeth->p_init_enet_param_shadow = | |
04b588d7 | 2965 | kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { |
890de95e LY |
2966 | if (netif_msg_ifup(ugeth)) |
2967 | ugeth_err | |
2968 | ("%s: Can not allocate memory for" | |
b39d66a8 | 2969 | " p_UccInitEnetParamShadows.", __func__); |
ce973b14 LY |
2970 | return -ENOMEM; |
2971 | } | |
2972 | /* Zero out *p_init_enet_param_shadow */ | |
2973 | memset((char *)ugeth->p_init_enet_param_shadow, | |
18a8e864 | 2974 | 0, sizeof(struct ucc_geth_init_pram)); |
ce973b14 LY |
2975 | |
2976 | /* Fill shadow InitEnet command parameter structure */ | |
2977 | ||
2978 | ugeth->p_init_enet_param_shadow->resinit1 = | |
2979 | ENET_INIT_PARAM_MAGIC_RES_INIT1; | |
2980 | ugeth->p_init_enet_param_shadow->resinit2 = | |
2981 | ENET_INIT_PARAM_MAGIC_RES_INIT2; | |
2982 | ugeth->p_init_enet_param_shadow->resinit3 = | |
2983 | ENET_INIT_PARAM_MAGIC_RES_INIT3; | |
2984 | ugeth->p_init_enet_param_shadow->resinit4 = | |
2985 | ENET_INIT_PARAM_MAGIC_RES_INIT4; | |
2986 | ugeth->p_init_enet_param_shadow->resinit5 = | |
2987 | ENET_INIT_PARAM_MAGIC_RES_INIT5; | |
2988 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2989 | ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; | |
2990 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2991 | ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; | |
2992 | ||
2993 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2994 | ugeth->rx_glbl_pram_offset | ug_info->riscRx; | |
2995 | if ((ug_info->largestexternallookupkeysize != | |
8e95a202 JP |
2996 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) && |
2997 | (ug_info->largestexternallookupkeysize != | |
2998 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) && | |
2999 | (ug_info->largestexternallookupkeysize != | |
3000 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { | |
890de95e LY |
3001 | if (netif_msg_ifup(ugeth)) |
3002 | ugeth_err("%s: Invalid largest External Lookup Key Size.", | |
b39d66a8 | 3003 | __func__); |
ce973b14 LY |
3004 | return -EINVAL; |
3005 | } | |
3006 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = | |
3007 | ug_info->largestexternallookupkeysize; | |
18a8e864 | 3008 | size = sizeof(struct ucc_geth_thread_rx_pram); |
ce973b14 LY |
3009 | if (ug_info->rxExtendedFiltering) { |
3010 | size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | |
3011 | if (ug_info->largestexternallookupkeysize == | |
3012 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | |
3013 | size += | |
3014 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | |
3015 | if (ug_info->largestexternallookupkeysize == | |
3016 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | |
3017 | size += | |
3018 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | |
3019 | } | |
3020 | ||
3021 | if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> | |
3022 | p_init_enet_param_shadow->rxthread[0]), | |
3023 | (u8) (numThreadsRxNumerical + 1) | |
3024 | /* Rx needs one extra for terminator */ | |
3025 | , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, | |
3026 | ug_info->riscRx, 1)) != 0) { | |
890de95e LY |
3027 | if (netif_msg_ifup(ugeth)) |
3028 | ugeth_err("%s: Can not fill p_init_enet_param_shadow.", | |
b39d66a8 | 3029 | __func__); |
ce973b14 LY |
3030 | return ret_val; |
3031 | } | |
3032 | ||
3033 | ugeth->p_init_enet_param_shadow->txglobal = | |
3034 | ugeth->tx_glbl_pram_offset | ug_info->riscTx; | |
3035 | if ((ret_val = | |
3036 | fill_init_enet_entries(ugeth, | |
3037 | &(ugeth->p_init_enet_param_shadow-> | |
3038 | txthread[0]), numThreadsTxNumerical, | |
18a8e864 | 3039 | sizeof(struct ucc_geth_thread_tx_pram), |
ce973b14 LY |
3040 | UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, |
3041 | ug_info->riscTx, 0)) != 0) { | |
890de95e LY |
3042 | if (netif_msg_ifup(ugeth)) |
3043 | ugeth_err("%s: Can not fill p_init_enet_param_shadow.", | |
b39d66a8 | 3044 | __func__); |
ce973b14 LY |
3045 | return ret_val; |
3046 | } | |
3047 | ||
3048 | /* Load Rx bds with buffers */ | |
3049 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
3050 | if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { | |
890de95e LY |
3051 | if (netif_msg_ifup(ugeth)) |
3052 | ugeth_err("%s: Can not fill Rx bds with buffers.", | |
b39d66a8 | 3053 | __func__); |
ce973b14 LY |
3054 | return ret_val; |
3055 | } | |
3056 | } | |
3057 | ||
3058 | /* Allocate InitEnet command parameter structure */ | |
18a8e864 | 3059 | init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); |
4c35630c | 3060 | if (IS_ERR_VALUE(init_enet_pram_offset)) { |
890de95e LY |
3061 | if (netif_msg_ifup(ugeth)) |
3062 | ugeth_err | |
3063 | ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", | |
b39d66a8 | 3064 | __func__); |
ce973b14 LY |
3065 | return -ENOMEM; |
3066 | } | |
3067 | p_init_enet_pram = | |
6fee40e9 | 3068 | (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); |
ce973b14 LY |
3069 | |
3070 | /* Copy shadow InitEnet command parameter structure into PRAM */ | |
6fee40e9 AF |
3071 | out_8(&p_init_enet_pram->resinit1, |
3072 | ugeth->p_init_enet_param_shadow->resinit1); | |
3073 | out_8(&p_init_enet_pram->resinit2, | |
3074 | ugeth->p_init_enet_param_shadow->resinit2); | |
3075 | out_8(&p_init_enet_pram->resinit3, | |
3076 | ugeth->p_init_enet_param_shadow->resinit3); | |
3077 | out_8(&p_init_enet_pram->resinit4, | |
3078 | ugeth->p_init_enet_param_shadow->resinit4); | |
ce973b14 LY |
3079 | out_be16(&p_init_enet_pram->resinit5, |
3080 | ugeth->p_init_enet_param_shadow->resinit5); | |
6fee40e9 AF |
3081 | out_8(&p_init_enet_pram->largestexternallookupkeysize, |
3082 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); | |
ce973b14 LY |
3083 | out_be32(&p_init_enet_pram->rgftgfrxglobal, |
3084 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal); | |
3085 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) | |
3086 | out_be32(&p_init_enet_pram->rxthread[i], | |
3087 | ugeth->p_init_enet_param_shadow->rxthread[i]); | |
3088 | out_be32(&p_init_enet_pram->txglobal, | |
3089 | ugeth->p_init_enet_param_shadow->txglobal); | |
3090 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) | |
3091 | out_be32(&p_init_enet_pram->txthread[i], | |
3092 | ugeth->p_init_enet_param_shadow->txthread[i]); | |
3093 | ||
3094 | /* Issue QE command */ | |
3095 | cecr_subblock = | |
3096 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
18a8e864 | 3097 | qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, |
ce973b14 LY |
3098 | init_enet_pram_offset); |
3099 | ||
3100 | /* Free InitEnet command parameter */ | |
3101 | qe_muram_free(init_enet_pram_offset); | |
3102 | ||
3103 | return 0; | |
3104 | } | |
3105 | ||
ce973b14 LY |
3106 | /* This is called by the kernel when a frame is ready for transmission. */ |
3107 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | |
3108 | static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
3109 | { | |
18a8e864 | 3110 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
d5b9049d MR |
3111 | #ifdef CONFIG_UGETH_TX_ON_DEMAND |
3112 | struct ucc_fast_private *uccf; | |
3113 | #endif | |
6fee40e9 | 3114 | u8 __iomem *bd; /* BD pointer */ |
ce973b14 LY |
3115 | u32 bd_status; |
3116 | u8 txQ = 0; | |
22580f89 | 3117 | unsigned long flags; |
ce973b14 | 3118 | |
b39d66a8 | 3119 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3120 | |
22580f89 | 3121 | spin_lock_irqsave(&ugeth->lock, flags); |
ce973b14 | 3122 | |
09f75cd7 | 3123 | dev->stats.tx_bytes += skb->len; |
ce973b14 LY |
3124 | |
3125 | /* Start from the next BD that should be filled */ | |
3126 | bd = ugeth->txBd[txQ]; | |
6fee40e9 | 3127 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3128 | /* Save the skb pointer so we can free it later */ |
3129 | ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; | |
3130 | ||
3131 | /* Update the current skb pointer (wrapping if this was the last) */ | |
3132 | ugeth->skb_curtx[txQ] = | |
3133 | (ugeth->skb_curtx[txQ] + | |
3134 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | |
3135 | ||
3136 | /* set up the buffer descriptor */ | |
6fee40e9 | 3137 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
da1aa63e | 3138 | dma_map_single(ugeth->dev, skb->data, |
7f80202b | 3139 | skb->len, DMA_TO_DEVICE)); |
ce973b14 | 3140 | |
18a8e864 | 3141 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ |
ce973b14 LY |
3142 | |
3143 | bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; | |
3144 | ||
18a8e864 | 3145 | /* set bd status and length */ |
6fee40e9 | 3146 | out_be32((u32 __iomem *)bd, bd_status); |
ce973b14 LY |
3147 | |
3148 | dev->trans_start = jiffies; | |
3149 | ||
3150 | /* Move to next BD in the ring */ | |
3151 | if (!(bd_status & T_W)) | |
a394f013 | 3152 | bd += sizeof(struct qe_bd); |
ce973b14 | 3153 | else |
a394f013 | 3154 | bd = ugeth->p_tx_bd_ring[txQ]; |
ce973b14 LY |
3155 | |
3156 | /* If the next BD still needs to be cleaned up, then the bds | |
3157 | are full. We need to tell the kernel to stop sending us stuff. */ | |
3158 | if (bd == ugeth->confBd[txQ]) { | |
3159 | if (!netif_queue_stopped(dev)) | |
3160 | netif_stop_queue(dev); | |
3161 | } | |
3162 | ||
a394f013 LY |
3163 | ugeth->txBd[txQ] = bd; |
3164 | ||
ce973b14 LY |
3165 | if (ugeth->p_scheduler) { |
3166 | ugeth->cpucount[txQ]++; | |
3167 | /* Indicate to QE that there are more Tx bds ready for | |
3168 | transmission */ | |
3169 | /* This is done by writing a running counter of the bd | |
3170 | count to the scheduler PRAM. */ | |
3171 | out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); | |
3172 | } | |
3173 | ||
d5b9049d MR |
3174 | #ifdef CONFIG_UGETH_TX_ON_DEMAND |
3175 | uccf = ugeth->uccf; | |
3176 | out_be16(uccf->p_utodr, UCC_FAST_TOD); | |
3177 | #endif | |
22580f89 | 3178 | spin_unlock_irqrestore(&ugeth->lock, flags); |
ce973b14 | 3179 | |
6ed10654 | 3180 | return NETDEV_TX_OK; |
ce973b14 LY |
3181 | } |
3182 | ||
18a8e864 | 3183 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) |
ce973b14 LY |
3184 | { |
3185 | struct sk_buff *skb; | |
6fee40e9 | 3186 | u8 __iomem *bd; |
ce973b14 LY |
3187 | u16 length, howmany = 0; |
3188 | u32 bd_status; | |
3189 | u8 *bdBuffer; | |
4b8fdefa | 3190 | struct net_device *dev; |
ce973b14 | 3191 | |
b39d66a8 | 3192 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3193 | |
da1aa63e | 3194 | dev = ugeth->ndev; |
88a15f2e | 3195 | |
ce973b14 LY |
3196 | /* collect received buffers */ |
3197 | bd = ugeth->rxBd[rxQ]; | |
3198 | ||
6fee40e9 | 3199 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3200 | |
3201 | /* while there are received buffers and BD is full (~R_E) */ | |
3202 | while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { | |
6fee40e9 | 3203 | bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); |
ce973b14 LY |
3204 | length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); |
3205 | skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; | |
3206 | ||
3207 | /* determine whether buffer is first, last, first and last | |
3208 | (single buffer frame) or middle (not first and not last) */ | |
3209 | if (!skb || | |
3210 | (!(bd_status & (R_F | R_L))) || | |
3211 | (bd_status & R_ERRORS_FATAL)) { | |
890de95e LY |
3212 | if (netif_msg_rx_err(ugeth)) |
3213 | ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", | |
b39d66a8 | 3214 | __func__, __LINE__, (u32) skb); |
50f238fd AV |
3215 | if (skb) { |
3216 | skb->data = skb->head + NET_SKB_PAD; | |
3217 | __skb_queue_head(&ugeth->rx_recycle, skb); | |
3218 | } | |
ce973b14 LY |
3219 | |
3220 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; | |
09f75cd7 | 3221 | dev->stats.rx_dropped++; |
ce973b14 | 3222 | } else { |
09f75cd7 | 3223 | dev->stats.rx_packets++; |
ce973b14 LY |
3224 | howmany++; |
3225 | ||
3226 | /* Prep the skb for the packet */ | |
3227 | skb_put(skb, length); | |
3228 | ||
3229 | /* Tell the skb what kind of packet this is */ | |
da1aa63e | 3230 | skb->protocol = eth_type_trans(skb, ugeth->ndev); |
ce973b14 | 3231 | |
09f75cd7 | 3232 | dev->stats.rx_bytes += length; |
ce973b14 | 3233 | /* Send the packet up the stack */ |
ce973b14 | 3234 | netif_receive_skb(skb); |
ce973b14 LY |
3235 | } |
3236 | ||
ce973b14 LY |
3237 | skb = get_new_skb(ugeth, bd); |
3238 | if (!skb) { | |
890de95e | 3239 | if (netif_msg_rx_err(ugeth)) |
b39d66a8 | 3240 | ugeth_warn("%s: No Rx Data Buffer", __func__); |
09f75cd7 | 3241 | dev->stats.rx_dropped++; |
ce973b14 LY |
3242 | break; |
3243 | } | |
3244 | ||
3245 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; | |
3246 | ||
3247 | /* update to point at the next skb */ | |
3248 | ugeth->skb_currx[rxQ] = | |
3249 | (ugeth->skb_currx[rxQ] + | |
3250 | 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); | |
3251 | ||
3252 | if (bd_status & R_W) | |
3253 | bd = ugeth->p_rx_bd_ring[rxQ]; | |
3254 | else | |
18a8e864 | 3255 | bd += sizeof(struct qe_bd); |
ce973b14 | 3256 | |
6fee40e9 | 3257 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3258 | } |
3259 | ||
3260 | ugeth->rxBd[rxQ] = bd; | |
ce973b14 LY |
3261 | return howmany; |
3262 | } | |
3263 | ||
3264 | static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |
3265 | { | |
3266 | /* Start from the next BD that should be filled */ | |
18a8e864 | 3267 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
6fee40e9 | 3268 | u8 __iomem *bd; /* BD pointer */ |
ce973b14 LY |
3269 | u32 bd_status; |
3270 | ||
3271 | bd = ugeth->confBd[txQ]; | |
6fee40e9 | 3272 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3273 | |
3274 | /* Normal processing. */ | |
3275 | while ((bd_status & T_R) == 0) { | |
50f238fd AV |
3276 | struct sk_buff *skb; |
3277 | ||
ce973b14 LY |
3278 | /* BD contains already transmitted buffer. */ |
3279 | /* Handle the transmitted buffer and release */ | |
3280 | /* the BD to be used with the current frame */ | |
3281 | ||
7583605b | 3282 | if (bd == ugeth->txBd[txQ]) /* queue empty? */ |
ce973b14 LY |
3283 | break; |
3284 | ||
09f75cd7 | 3285 | dev->stats.tx_packets++; |
ce973b14 | 3286 | |
50f238fd AV |
3287 | skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; |
3288 | ||
3289 | if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && | |
3290 | skb_recycle_check(skb, | |
3291 | ugeth->ug_info->uf_info.max_rx_buf_length + | |
3292 | UCC_GETH_RX_DATA_BUF_ALIGNMENT)) | |
3293 | __skb_queue_head(&ugeth->rx_recycle, skb); | |
3294 | else | |
3295 | dev_kfree_skb(skb); | |
3296 | ||
ce973b14 LY |
3297 | ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; |
3298 | ugeth->skb_dirtytx[txQ] = | |
3299 | (ugeth->skb_dirtytx[txQ] + | |
3300 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | |
3301 | ||
3302 | /* We freed a buffer, so now we can restart transmission */ | |
3303 | if (netif_queue_stopped(dev)) | |
3304 | netif_wake_queue(dev); | |
3305 | ||
3306 | /* Advance the confirmation BD pointer */ | |
3307 | if (!(bd_status & T_W)) | |
a394f013 | 3308 | bd += sizeof(struct qe_bd); |
ce973b14 | 3309 | else |
a394f013 | 3310 | bd = ugeth->p_tx_bd_ring[txQ]; |
6fee40e9 | 3311 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 | 3312 | } |
a394f013 | 3313 | ugeth->confBd[txQ] = bd; |
ce973b14 LY |
3314 | return 0; |
3315 | } | |
3316 | ||
bea3348e | 3317 | static int ucc_geth_poll(struct napi_struct *napi, int budget) |
ce973b14 | 3318 | { |
bea3348e | 3319 | struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); |
702ff12c | 3320 | struct ucc_geth_info *ug_info; |
bea3348e | 3321 | int howmany, i; |
ce973b14 | 3322 | |
702ff12c MR |
3323 | ug_info = ugeth->ug_info; |
3324 | ||
0cededf3 JT |
3325 | /* Tx event processing */ |
3326 | spin_lock(&ugeth->lock); | |
3327 | for (i = 0; i < ug_info->numQueuesTx; i++) | |
3328 | ucc_geth_tx(ugeth->ndev, i); | |
3329 | spin_unlock(&ugeth->lock); | |
3330 | ||
50f238fd AV |
3331 | howmany = 0; |
3332 | for (i = 0; i < ug_info->numQueuesRx; i++) | |
3333 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | |
3334 | ||
bea3348e | 3335 | if (howmany < budget) { |
288379f0 | 3336 | napi_complete(napi); |
0cededf3 | 3337 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS); |
702ff12c | 3338 | } |
ce973b14 | 3339 | |
bea3348e | 3340 | return howmany; |
ce973b14 | 3341 | } |
ce973b14 | 3342 | |
7d12e780 | 3343 | static irqreturn_t ucc_geth_irq_handler(int irq, void *info) |
ce973b14 | 3344 | { |
06efcad0 | 3345 | struct net_device *dev = info; |
18a8e864 LY |
3346 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3347 | struct ucc_fast_private *uccf; | |
3348 | struct ucc_geth_info *ug_info; | |
702ff12c MR |
3349 | register u32 ucce; |
3350 | register u32 uccm; | |
ce973b14 | 3351 | |
b39d66a8 | 3352 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3353 | |
ce973b14 LY |
3354 | uccf = ugeth->uccf; |
3355 | ug_info = ugeth->ug_info; | |
3356 | ||
702ff12c MR |
3357 | /* read and clear events */ |
3358 | ucce = (u32) in_be32(uccf->p_ucce); | |
3359 | uccm = (u32) in_be32(uccf->p_uccm); | |
3360 | ucce &= uccm; | |
3361 | out_be32(uccf->p_ucce, ucce); | |
ce973b14 | 3362 | |
702ff12c | 3363 | /* check for receive events that require processing */ |
0cededf3 | 3364 | if (ucce & (UCCE_RX_EVENTS | UCCE_TX_EVENTS)) { |
288379f0 | 3365 | if (napi_schedule_prep(&ugeth->napi)) { |
0cededf3 | 3366 | uccm &= ~(UCCE_RX_EVENTS | UCCE_TX_EVENTS); |
702ff12c | 3367 | out_be32(uccf->p_uccm, uccm); |
288379f0 | 3368 | __napi_schedule(&ugeth->napi); |
702ff12c | 3369 | } |
702ff12c | 3370 | } |
ce973b14 | 3371 | |
702ff12c MR |
3372 | /* Errors and other events */ |
3373 | if (ucce & UCCE_OTHER) { | |
3bc53427 | 3374 | if (ucce & UCC_GETH_UCCE_BSY) |
09f75cd7 | 3375 | dev->stats.rx_errors++; |
3bc53427 | 3376 | if (ucce & UCC_GETH_UCCE_TXE) |
09f75cd7 | 3377 | dev->stats.tx_errors++; |
ce973b14 | 3378 | } |
ce973b14 LY |
3379 | |
3380 | return IRQ_HANDLED; | |
3381 | } | |
3382 | ||
26d29ea7 AV |
3383 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3384 | /* | |
3385 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
3386 | * without having to re-enable interrupts. It's not called while | |
3387 | * the interrupt routine is executing. | |
3388 | */ | |
3389 | static void ucc_netpoll(struct net_device *dev) | |
3390 | { | |
3391 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3392 | int irq = ugeth->ug_info->uf_info.irq; | |
3393 | ||
3394 | disable_irq(irq); | |
3395 | ucc_geth_irq_handler(irq, dev); | |
3396 | enable_irq(irq); | |
3397 | } | |
3398 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | |
3399 | ||
3d6593e9 KH |
3400 | static int ucc_geth_set_mac_addr(struct net_device *dev, void *p) |
3401 | { | |
3402 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3403 | struct sockaddr *addr = p; | |
3404 | ||
3405 | if (!is_valid_ether_addr(addr->sa_data)) | |
3406 | return -EADDRNOTAVAIL; | |
3407 | ||
3408 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
3409 | ||
3410 | /* | |
3411 | * If device is not running, we will set mac addr register | |
3412 | * when opening the device. | |
3413 | */ | |
3414 | if (!netif_running(dev)) | |
3415 | return 0; | |
3416 | ||
3417 | spin_lock_irq(&ugeth->lock); | |
3418 | init_mac_station_addr_regs(dev->dev_addr[0], | |
3419 | dev->dev_addr[1], | |
3420 | dev->dev_addr[2], | |
3421 | dev->dev_addr[3], | |
3422 | dev->dev_addr[4], | |
3423 | dev->dev_addr[5], | |
3424 | &ugeth->ug_regs->macstnaddr1, | |
3425 | &ugeth->ug_regs->macstnaddr2); | |
3426 | spin_unlock_irq(&ugeth->lock); | |
3427 | ||
3428 | return 0; | |
3429 | } | |
3430 | ||
54b15983 | 3431 | static int ucc_geth_init_mac(struct ucc_geth_private *ugeth) |
ce973b14 | 3432 | { |
54b15983 | 3433 | struct net_device *dev = ugeth->ndev; |
ce973b14 LY |
3434 | int err; |
3435 | ||
728de4c9 KP |
3436 | err = ucc_struct_init(ugeth); |
3437 | if (err) { | |
890de95e | 3438 | if (netif_msg_ifup(ugeth)) |
54b15983 AV |
3439 | ugeth_err("%s: Cannot configure internal struct, " |
3440 | "aborting.", dev->name); | |
3441 | goto err; | |
728de4c9 KP |
3442 | } |
3443 | ||
ce973b14 LY |
3444 | err = ucc_geth_startup(ugeth); |
3445 | if (err) { | |
890de95e LY |
3446 | if (netif_msg_ifup(ugeth)) |
3447 | ugeth_err("%s: Cannot configure net device, aborting.", | |
3448 | dev->name); | |
54b15983 | 3449 | goto err; |
ce973b14 LY |
3450 | } |
3451 | ||
3452 | err = adjust_enet_interface(ugeth); | |
3453 | if (err) { | |
890de95e LY |
3454 | if (netif_msg_ifup(ugeth)) |
3455 | ugeth_err("%s: Cannot configure net device, aborting.", | |
3456 | dev->name); | |
54b15983 | 3457 | goto err; |
ce973b14 LY |
3458 | } |
3459 | ||
3460 | /* Set MACSTNADDR1, MACSTNADDR2 */ | |
3461 | /* For more details see the hardware spec. */ | |
3462 | init_mac_station_addr_regs(dev->dev_addr[0], | |
3463 | dev->dev_addr[1], | |
3464 | dev->dev_addr[2], | |
3465 | dev->dev_addr[3], | |
3466 | dev->dev_addr[4], | |
3467 | dev->dev_addr[5], | |
3468 | &ugeth->ug_regs->macstnaddr1, | |
3469 | &ugeth->ug_regs->macstnaddr2); | |
3470 | ||
67c2fb8f | 3471 | err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); |
ce973b14 | 3472 | if (err) { |
890de95e | 3473 | if (netif_msg_ifup(ugeth)) |
67c2fb8f | 3474 | ugeth_err("%s: Cannot enable net device, aborting.", dev->name); |
54b15983 AV |
3475 | goto err; |
3476 | } | |
3477 | ||
3478 | return 0; | |
3479 | err: | |
3480 | ucc_geth_stop(ugeth); | |
3481 | return err; | |
3482 | } | |
3483 | ||
3484 | /* Called when something needs to use the ethernet device */ | |
3485 | /* Returns 0 for success. */ | |
3486 | static int ucc_geth_open(struct net_device *dev) | |
3487 | { | |
3488 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3489 | int err; | |
3490 | ||
3491 | ugeth_vdbg("%s: IN", __func__); | |
3492 | ||
3493 | /* Test station address */ | |
3494 | if (dev->dev_addr[0] & ENET_GROUP_ADDR) { | |
3495 | if (netif_msg_ifup(ugeth)) | |
3496 | ugeth_err("%s: Multicast address used for station " | |
3497 | "address - is this what you wanted?", | |
3498 | __func__); | |
3499 | return -EINVAL; | |
3500 | } | |
3501 | ||
3502 | err = init_phy(dev); | |
3503 | if (err) { | |
3504 | if (netif_msg_ifup(ugeth)) | |
3505 | ugeth_err("%s: Cannot initialize PHY, aborting.", | |
3506 | dev->name); | |
3507 | return err; | |
3508 | } | |
3509 | ||
3510 | err = ucc_geth_init_mac(ugeth); | |
3511 | if (err) { | |
3512 | if (netif_msg_ifup(ugeth)) | |
3513 | ugeth_err("%s: Cannot initialize MAC, aborting.", | |
3514 | dev->name); | |
3515 | goto err; | |
ce973b14 | 3516 | } |
ce973b14 | 3517 | |
67c2fb8f AV |
3518 | err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, |
3519 | 0, "UCC Geth", dev); | |
ce973b14 | 3520 | if (err) { |
890de95e | 3521 | if (netif_msg_ifup(ugeth)) |
67c2fb8f AV |
3522 | ugeth_err("%s: Cannot get IRQ for net device, aborting.", |
3523 | dev->name); | |
54b15983 | 3524 | goto err; |
ce973b14 LY |
3525 | } |
3526 | ||
54b15983 AV |
3527 | phy_start(ugeth->phydev); |
3528 | napi_enable(&ugeth->napi); | |
ce973b14 LY |
3529 | netif_start_queue(dev); |
3530 | ||
2394905f AV |
3531 | device_set_wakeup_capable(&dev->dev, |
3532 | qe_alive_during_sleep() || ugeth->phydev->irq); | |
3533 | device_set_wakeup_enable(&dev->dev, ugeth->wol_en); | |
3534 | ||
ce973b14 | 3535 | return err; |
bea3348e | 3536 | |
54b15983 | 3537 | err: |
ba574696 | 3538 | ucc_geth_stop(ugeth); |
bea3348e | 3539 | return err; |
ce973b14 LY |
3540 | } |
3541 | ||
3542 | /* Stops the kernel queue, and halts the controller */ | |
3543 | static int ucc_geth_close(struct net_device *dev) | |
3544 | { | |
18a8e864 | 3545 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
ce973b14 | 3546 | |
b39d66a8 | 3547 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3548 | |
bea3348e | 3549 | napi_disable(&ugeth->napi); |
bea3348e | 3550 | |
ce973b14 LY |
3551 | ucc_geth_stop(ugeth); |
3552 | ||
da1aa63e | 3553 | free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev); |
67c2fb8f | 3554 | |
ce973b14 LY |
3555 | netif_stop_queue(dev); |
3556 | ||
3557 | return 0; | |
3558 | } | |
3559 | ||
fdb614c2 AV |
3560 | /* Reopen device. This will reset the MAC and PHY. */ |
3561 | static void ucc_geth_timeout_work(struct work_struct *work) | |
3562 | { | |
3563 | struct ucc_geth_private *ugeth; | |
3564 | struct net_device *dev; | |
3565 | ||
3566 | ugeth = container_of(work, struct ucc_geth_private, timeout_work); | |
da1aa63e | 3567 | dev = ugeth->ndev; |
fdb614c2 AV |
3568 | |
3569 | ugeth_vdbg("%s: IN", __func__); | |
3570 | ||
3571 | dev->stats.tx_errors++; | |
3572 | ||
3573 | ugeth_dump_regs(ugeth); | |
3574 | ||
3575 | if (dev->flags & IFF_UP) { | |
3576 | /* | |
3577 | * Must reset MAC *and* PHY. This is done by reopening | |
3578 | * the device. | |
3579 | */ | |
3580 | ucc_geth_close(dev); | |
3581 | ucc_geth_open(dev); | |
3582 | } | |
3583 | ||
3584 | netif_tx_schedule_all(dev); | |
3585 | } | |
3586 | ||
3587 | /* | |
3588 | * ucc_geth_timeout gets called when a packet has not been | |
3589 | * transmitted after a set amount of time. | |
3590 | */ | |
3591 | static void ucc_geth_timeout(struct net_device *dev) | |
3592 | { | |
3593 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3594 | ||
3595 | netif_carrier_off(dev); | |
3596 | schedule_work(&ugeth->timeout_work); | |
3597 | } | |
3598 | ||
2394905f AV |
3599 | |
3600 | #ifdef CONFIG_PM | |
3601 | ||
3602 | static int ucc_geth_suspend(struct of_device *ofdev, pm_message_t state) | |
3603 | { | |
3604 | struct net_device *ndev = dev_get_drvdata(&ofdev->dev); | |
3605 | struct ucc_geth_private *ugeth = netdev_priv(ndev); | |
3606 | ||
3607 | if (!netif_running(ndev)) | |
3608 | return 0; | |
3609 | ||
3610 | napi_disable(&ugeth->napi); | |
3611 | ||
3612 | /* | |
3613 | * Disable the controller, otherwise we'll wakeup on any network | |
3614 | * activity. | |
3615 | */ | |
3616 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | |
3617 | ||
3618 | if (ugeth->wol_en & WAKE_MAGIC) { | |
3619 | setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); | |
3620 | setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); | |
3621 | ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX); | |
3622 | } else if (!(ugeth->wol_en & WAKE_PHY)) { | |
3623 | phy_stop(ugeth->phydev); | |
3624 | } | |
3625 | ||
3626 | return 0; | |
3627 | } | |
3628 | ||
3629 | static int ucc_geth_resume(struct of_device *ofdev) | |
3630 | { | |
3631 | struct net_device *ndev = dev_get_drvdata(&ofdev->dev); | |
3632 | struct ucc_geth_private *ugeth = netdev_priv(ndev); | |
3633 | int err; | |
3634 | ||
3635 | if (!netif_running(ndev)) | |
3636 | return 0; | |
3637 | ||
3638 | if (qe_alive_during_sleep()) { | |
3639 | if (ugeth->wol_en & WAKE_MAGIC) { | |
3640 | ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX); | |
3641 | clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE); | |
3642 | clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD); | |
3643 | } | |
3644 | ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); | |
3645 | } else { | |
3646 | /* | |
3647 | * Full reinitialization is required if QE shuts down | |
3648 | * during sleep. | |
3649 | */ | |
3650 | ucc_geth_memclean(ugeth); | |
3651 | ||
3652 | err = ucc_geth_init_mac(ugeth); | |
3653 | if (err) { | |
3654 | ugeth_err("%s: Cannot initialize MAC, aborting.", | |
3655 | ndev->name); | |
3656 | return err; | |
3657 | } | |
3658 | } | |
3659 | ||
3660 | ugeth->oldlink = 0; | |
3661 | ugeth->oldspeed = 0; | |
3662 | ugeth->oldduplex = -1; | |
3663 | ||
3664 | phy_stop(ugeth->phydev); | |
3665 | phy_start(ugeth->phydev); | |
3666 | ||
3667 | napi_enable(&ugeth->napi); | |
3668 | netif_start_queue(ndev); | |
3669 | ||
3670 | return 0; | |
3671 | } | |
3672 | ||
3673 | #else | |
3674 | #define ucc_geth_suspend NULL | |
3675 | #define ucc_geth_resume NULL | |
3676 | #endif | |
3677 | ||
4e19b5c1 | 3678 | static phy_interface_t to_phy_interface(const char *phy_connection_type) |
728de4c9 | 3679 | { |
4e19b5c1 | 3680 | if (strcasecmp(phy_connection_type, "mii") == 0) |
728de4c9 | 3681 | return PHY_INTERFACE_MODE_MII; |
4e19b5c1 | 3682 | if (strcasecmp(phy_connection_type, "gmii") == 0) |
728de4c9 | 3683 | return PHY_INTERFACE_MODE_GMII; |
4e19b5c1 | 3684 | if (strcasecmp(phy_connection_type, "tbi") == 0) |
728de4c9 | 3685 | return PHY_INTERFACE_MODE_TBI; |
4e19b5c1 | 3686 | if (strcasecmp(phy_connection_type, "rmii") == 0) |
728de4c9 | 3687 | return PHY_INTERFACE_MODE_RMII; |
4e19b5c1 | 3688 | if (strcasecmp(phy_connection_type, "rgmii") == 0) |
728de4c9 | 3689 | return PHY_INTERFACE_MODE_RGMII; |
4e19b5c1 | 3690 | if (strcasecmp(phy_connection_type, "rgmii-id") == 0) |
728de4c9 | 3691 | return PHY_INTERFACE_MODE_RGMII_ID; |
bd0ceaab KP |
3692 | if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) |
3693 | return PHY_INTERFACE_MODE_RGMII_TXID; | |
3694 | if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) | |
3695 | return PHY_INTERFACE_MODE_RGMII_RXID; | |
4e19b5c1 | 3696 | if (strcasecmp(phy_connection_type, "rtbi") == 0) |
728de4c9 | 3697 | return PHY_INTERFACE_MODE_RTBI; |
047584ce HW |
3698 | if (strcasecmp(phy_connection_type, "sgmii") == 0) |
3699 | return PHY_INTERFACE_MODE_SGMII; | |
728de4c9 KP |
3700 | |
3701 | return PHY_INTERFACE_MODE_MII; | |
3702 | } | |
3703 | ||
a9dbae78 JT |
3704 | static const struct net_device_ops ucc_geth_netdev_ops = { |
3705 | .ndo_open = ucc_geth_open, | |
3706 | .ndo_stop = ucc_geth_close, | |
3707 | .ndo_start_xmit = ucc_geth_start_xmit, | |
3708 | .ndo_validate_addr = eth_validate_addr, | |
3d6593e9 | 3709 | .ndo_set_mac_address = ucc_geth_set_mac_addr, |
a9dbae78 JT |
3710 | .ndo_change_mtu = eth_change_mtu, |
3711 | .ndo_set_multicast_list = ucc_geth_set_multi, | |
3712 | .ndo_tx_timeout = ucc_geth_timeout, | |
3713 | #ifdef CONFIG_NET_POLL_CONTROLLER | |
3714 | .ndo_poll_controller = ucc_netpoll, | |
3715 | #endif | |
3716 | }; | |
3717 | ||
18a8e864 | 3718 | static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) |
ce973b14 | 3719 | { |
18a8e864 LY |
3720 | struct device *device = &ofdev->dev; |
3721 | struct device_node *np = ofdev->node; | |
ce973b14 LY |
3722 | struct net_device *dev = NULL; |
3723 | struct ucc_geth_private *ugeth = NULL; | |
3724 | struct ucc_geth_info *ug_info; | |
18a8e864 | 3725 | struct resource res; |
728de4c9 | 3726 | int err, ucc_num, max_speed = 0; |
18a8e864 | 3727 | const unsigned int *prop; |
9fb1e350 | 3728 | const char *sprop; |
9b4c7a4e | 3729 | const void *mac_addr; |
728de4c9 KP |
3730 | phy_interface_t phy_interface; |
3731 | static const int enet_to_speed[] = { | |
3732 | SPEED_10, SPEED_10, SPEED_10, | |
3733 | SPEED_100, SPEED_100, SPEED_100, | |
3734 | SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, | |
3735 | }; | |
3736 | static const phy_interface_t enet_to_phy_interface[] = { | |
3737 | PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, | |
3738 | PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, | |
3739 | PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, | |
3740 | PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, | |
3741 | PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, | |
047584ce | 3742 | PHY_INTERFACE_MODE_SGMII, |
728de4c9 | 3743 | }; |
ce973b14 | 3744 | |
b39d66a8 | 3745 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3746 | |
56626f33 AV |
3747 | prop = of_get_property(np, "cell-index", NULL); |
3748 | if (!prop) { | |
3749 | prop = of_get_property(np, "device-id", NULL); | |
3750 | if (!prop) | |
3751 | return -ENODEV; | |
3752 | } | |
3753 | ||
18a8e864 LY |
3754 | ucc_num = *prop - 1; |
3755 | if ((ucc_num < 0) || (ucc_num > 7)) | |
3756 | return -ENODEV; | |
3757 | ||
3758 | ug_info = &ugeth_info[ucc_num]; | |
890de95e LY |
3759 | if (ug_info == NULL) { |
3760 | if (netif_msg_probe(&debug)) | |
3761 | ugeth_err("%s: [%d] Missing additional data!", | |
b39d66a8 | 3762 | __func__, ucc_num); |
890de95e LY |
3763 | return -ENODEV; |
3764 | } | |
3765 | ||
18a8e864 | 3766 | ug_info->uf_info.ucc_num = ucc_num; |
728de4c9 | 3767 | |
9fb1e350 TT |
3768 | sprop = of_get_property(np, "rx-clock-name", NULL); |
3769 | if (sprop) { | |
3770 | ug_info->uf_info.rx_clock = qe_clock_source(sprop); | |
3771 | if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || | |
3772 | (ug_info->uf_info.rx_clock > QE_CLK24)) { | |
3773 | printk(KERN_ERR | |
3774 | "ucc_geth: invalid rx-clock-name property\n"); | |
3775 | return -EINVAL; | |
3776 | } | |
3777 | } else { | |
3778 | prop = of_get_property(np, "rx-clock", NULL); | |
3779 | if (!prop) { | |
3780 | /* If both rx-clock-name and rx-clock are missing, | |
3781 | we want to tell people to use rx-clock-name. */ | |
3782 | printk(KERN_ERR | |
3783 | "ucc_geth: missing rx-clock-name property\n"); | |
3784 | return -EINVAL; | |
3785 | } | |
3786 | if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { | |
3787 | printk(KERN_ERR | |
3788 | "ucc_geth: invalid rx-clock propperty\n"); | |
3789 | return -EINVAL; | |
3790 | } | |
3791 | ug_info->uf_info.rx_clock = *prop; | |
3792 | } | |
3793 | ||
3794 | sprop = of_get_property(np, "tx-clock-name", NULL); | |
3795 | if (sprop) { | |
3796 | ug_info->uf_info.tx_clock = qe_clock_source(sprop); | |
3797 | if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || | |
3798 | (ug_info->uf_info.tx_clock > QE_CLK24)) { | |
3799 | printk(KERN_ERR | |
3800 | "ucc_geth: invalid tx-clock-name property\n"); | |
3801 | return -EINVAL; | |
3802 | } | |
3803 | } else { | |
e410553f | 3804 | prop = of_get_property(np, "tx-clock", NULL); |
9fb1e350 TT |
3805 | if (!prop) { |
3806 | printk(KERN_ERR | |
af901ca1 | 3807 | "ucc_geth: missing tx-clock-name property\n"); |
9fb1e350 TT |
3808 | return -EINVAL; |
3809 | } | |
3810 | if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { | |
3811 | printk(KERN_ERR | |
3812 | "ucc_geth: invalid tx-clock property\n"); | |
3813 | return -EINVAL; | |
3814 | } | |
3815 | ug_info->uf_info.tx_clock = *prop; | |
3816 | } | |
3817 | ||
18a8e864 LY |
3818 | err = of_address_to_resource(np, 0, &res); |
3819 | if (err) | |
3820 | return -EINVAL; | |
3821 | ||
3822 | ug_info->uf_info.regs = res.start; | |
3823 | ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); | |
3104a6ff AV |
3824 | |
3825 | ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0); | |
728de4c9 | 3826 | |
fb1001f3 HW |
3827 | /* Find the TBI PHY node. If it's not there, we don't support SGMII */ |
3828 | ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0); | |
3829 | ||
728de4c9 | 3830 | /* get the phy interface type, or default to MII */ |
4e19b5c1 | 3831 | prop = of_get_property(np, "phy-connection-type", NULL); |
728de4c9 KP |
3832 | if (!prop) { |
3833 | /* handle interface property present in old trees */ | |
3104a6ff | 3834 | prop = of_get_property(ug_info->phy_node, "interface", NULL); |
4e19b5c1 | 3835 | if (prop != NULL) { |
728de4c9 | 3836 | phy_interface = enet_to_phy_interface[*prop]; |
4e19b5c1 KP |
3837 | max_speed = enet_to_speed[*prop]; |
3838 | } else | |
728de4c9 KP |
3839 | phy_interface = PHY_INTERFACE_MODE_MII; |
3840 | } else { | |
3841 | phy_interface = to_phy_interface((const char *)prop); | |
3842 | } | |
3843 | ||
4e19b5c1 KP |
3844 | /* get speed, or derive from PHY interface */ |
3845 | if (max_speed == 0) | |
728de4c9 KP |
3846 | switch (phy_interface) { |
3847 | case PHY_INTERFACE_MODE_GMII: | |
3848 | case PHY_INTERFACE_MODE_RGMII: | |
3849 | case PHY_INTERFACE_MODE_RGMII_ID: | |
bd0ceaab KP |
3850 | case PHY_INTERFACE_MODE_RGMII_RXID: |
3851 | case PHY_INTERFACE_MODE_RGMII_TXID: | |
728de4c9 KP |
3852 | case PHY_INTERFACE_MODE_TBI: |
3853 | case PHY_INTERFACE_MODE_RTBI: | |
047584ce | 3854 | case PHY_INTERFACE_MODE_SGMII: |
728de4c9 KP |
3855 | max_speed = SPEED_1000; |
3856 | break; | |
3857 | default: | |
3858 | max_speed = SPEED_100; | |
3859 | break; | |
3860 | } | |
728de4c9 KP |
3861 | |
3862 | if (max_speed == SPEED_1000) { | |
4e19b5c1 | 3863 | /* configure muram FIFOs for gigabit operation */ |
728de4c9 KP |
3864 | ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; |
3865 | ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; | |
3866 | ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; | |
3867 | ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; | |
3868 | ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; | |
3869 | ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; | |
ffea31ed | 3870 | ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; |
674e4f93 HW |
3871 | |
3872 | /* If QE's snum number is 46 which means we need to support | |
3873 | * 4 UECs at 1000Base-T simultaneously, we need to allocate | |
3874 | * more Threads to Rx. | |
3875 | */ | |
3876 | if (qe_get_num_of_snums() == 46) | |
3877 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_6; | |
3878 | else | |
3879 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; | |
728de4c9 KP |
3880 | } |
3881 | ||
890de95e LY |
3882 | if (netif_msg_probe(&debug)) |
3883 | printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", | |
3884 | ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, | |
3885 | ug_info->uf_info.irq); | |
ce973b14 | 3886 | |
ce973b14 LY |
3887 | /* Create an ethernet device instance */ |
3888 | dev = alloc_etherdev(sizeof(*ugeth)); | |
3889 | ||
3890 | if (dev == NULL) | |
3891 | return -ENOMEM; | |
3892 | ||
3893 | ugeth = netdev_priv(dev); | |
3894 | spin_lock_init(&ugeth->lock); | |
3895 | ||
80a9fad8 AV |
3896 | /* Create CQs for hash tables */ |
3897 | INIT_LIST_HEAD(&ugeth->group_hash_q); | |
3898 | INIT_LIST_HEAD(&ugeth->ind_hash_q); | |
3899 | ||
ce973b14 LY |
3900 | dev_set_drvdata(device, dev); |
3901 | ||
3902 | /* Set the dev->base_addr to the gfar reg region */ | |
3903 | dev->base_addr = (unsigned long)(ug_info->uf_info.regs); | |
3904 | ||
ce973b14 LY |
3905 | SET_NETDEV_DEV(dev, device); |
3906 | ||
3907 | /* Fill in the dev structure */ | |
ac421852 | 3908 | uec_set_ethtool_ops(dev); |
a9dbae78 | 3909 | dev->netdev_ops = &ucc_geth_netdev_ops; |
ce973b14 | 3910 | dev->watchdog_timeo = TX_TIMEOUT; |
1762a29a | 3911 | INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); |
0cededf3 | 3912 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64); |
ce973b14 | 3913 | dev->mtu = 1500; |
ce973b14 | 3914 | |
890de95e | 3915 | ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); |
728de4c9 KP |
3916 | ugeth->phy_interface = phy_interface; |
3917 | ugeth->max_speed = max_speed; | |
3918 | ||
ce973b14 LY |
3919 | err = register_netdev(dev); |
3920 | if (err) { | |
890de95e LY |
3921 | if (netif_msg_probe(ugeth)) |
3922 | ugeth_err("%s: Cannot register net device, aborting.", | |
3923 | dev->name); | |
ce973b14 LY |
3924 | free_netdev(dev); |
3925 | return err; | |
3926 | } | |
3927 | ||
e9eb70c9 | 3928 | mac_addr = of_get_mac_address(np); |
9b4c7a4e LY |
3929 | if (mac_addr) |
3930 | memcpy(dev->dev_addr, mac_addr, 6); | |
ce973b14 | 3931 | |
728de4c9 | 3932 | ugeth->ug_info = ug_info; |
da1aa63e AV |
3933 | ugeth->dev = device; |
3934 | ugeth->ndev = dev; | |
b1c4a9dd | 3935 | ugeth->node = np; |
728de4c9 | 3936 | |
ce973b14 LY |
3937 | return 0; |
3938 | } | |
3939 | ||
18a8e864 | 3940 | static int ucc_geth_remove(struct of_device* ofdev) |
ce973b14 | 3941 | { |
18a8e864 | 3942 | struct device *device = &ofdev->dev; |
ce973b14 LY |
3943 | struct net_device *dev = dev_get_drvdata(device); |
3944 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3945 | ||
80a9fad8 | 3946 | unregister_netdev(dev); |
ce973b14 | 3947 | free_netdev(dev); |
80a9fad8 AV |
3948 | ucc_geth_memclean(ugeth); |
3949 | dev_set_drvdata(device, NULL); | |
ce973b14 LY |
3950 | |
3951 | return 0; | |
3952 | } | |
3953 | ||
18a8e864 LY |
3954 | static struct of_device_id ucc_geth_match[] = { |
3955 | { | |
3956 | .type = "network", | |
3957 | .compatible = "ucc_geth", | |
3958 | }, | |
3959 | {}, | |
3960 | }; | |
3961 | ||
3962 | MODULE_DEVICE_TABLE(of, ucc_geth_match); | |
3963 | ||
3964 | static struct of_platform_driver ucc_geth_driver = { | |
3965 | .name = DRV_NAME, | |
3966 | .match_table = ucc_geth_match, | |
3967 | .probe = ucc_geth_probe, | |
3968 | .remove = ucc_geth_remove, | |
2394905f AV |
3969 | .suspend = ucc_geth_suspend, |
3970 | .resume = ucc_geth_resume, | |
ce973b14 LY |
3971 | }; |
3972 | ||
3973 | static int __init ucc_geth_init(void) | |
3974 | { | |
728de4c9 KP |
3975 | int i, ret; |
3976 | ||
890de95e LY |
3977 | if (netif_msg_drv(&debug)) |
3978 | printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); | |
ce973b14 LY |
3979 | for (i = 0; i < 8; i++) |
3980 | memcpy(&(ugeth_info[i]), &ugeth_primary_info, | |
3981 | sizeof(ugeth_primary_info)); | |
3982 | ||
728de4c9 KP |
3983 | ret = of_register_platform_driver(&ucc_geth_driver); |
3984 | ||
728de4c9 | 3985 | return ret; |
ce973b14 LY |
3986 | } |
3987 | ||
3988 | static void __exit ucc_geth_exit(void) | |
3989 | { | |
a4f0c2ca | 3990 | of_unregister_platform_driver(&ucc_geth_driver); |
ce973b14 LY |
3991 | } |
3992 | ||
3993 | module_init(ucc_geth_init); | |
3994 | module_exit(ucc_geth_exit); | |
3995 | ||
3996 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | |
3997 | MODULE_DESCRIPTION(DRV_DESC); | |
c2bcf00b | 3998 | MODULE_VERSION(DRV_VERSION); |
ce973b14 | 3999 | MODULE_LICENSE("GPL"); |