]>
Commit | Line | Data |
---|---|---|
ce973b14 | 1 | /* |
4e19b5c1 | 2 | * Copyright (C) 2006-2007 Freescale Semicondutor, Inc. All rights reserved. |
ce973b14 LY |
3 | * |
4 | * Author: Shlomi Gridish <[email protected]> | |
18a8e864 | 5 | * Li Yang <[email protected]> |
ce973b14 LY |
6 | * |
7 | * Description: | |
8 | * QE UCC Gigabit Ethernet Driver | |
9 | * | |
ce973b14 LY |
10 | * This program is free software; you can redistribute it and/or modify it |
11 | * under the terms of the GNU General Public License as published by the | |
12 | * Free Software Foundation; either version 2 of the License, or (at your | |
13 | * option) any later version. | |
14 | */ | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/errno.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/stddef.h> | |
20 | #include <linux/interrupt.h> | |
21 | #include <linux/netdevice.h> | |
22 | #include <linux/etherdevice.h> | |
23 | #include <linux/skbuff.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/mm.h> | |
ce973b14 LY |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/fsl_devices.h> | |
ce973b14 | 28 | #include <linux/mii.h> |
728de4c9 | 29 | #include <linux/phy.h> |
df19b6b0 | 30 | #include <linux/workqueue.h> |
55b6c8e9 | 31 | #include <linux/of_platform.h> |
ce973b14 LY |
32 | |
33 | #include <asm/uaccess.h> | |
34 | #include <asm/irq.h> | |
35 | #include <asm/io.h> | |
36 | #include <asm/immap_qe.h> | |
37 | #include <asm/qe.h> | |
38 | #include <asm/ucc.h> | |
39 | #include <asm/ucc_fast.h> | |
40 | ||
41 | #include "ucc_geth.h" | |
728de4c9 | 42 | #include "ucc_geth_mii.h" |
ce973b14 LY |
43 | |
44 | #undef DEBUG | |
45 | ||
ce973b14 LY |
46 | #define ugeth_printk(level, format, arg...) \ |
47 | printk(level format "\n", ## arg) | |
48 | ||
49 | #define ugeth_dbg(format, arg...) \ | |
50 | ugeth_printk(KERN_DEBUG , format , ## arg) | |
51 | #define ugeth_err(format, arg...) \ | |
52 | ugeth_printk(KERN_ERR , format , ## arg) | |
53 | #define ugeth_info(format, arg...) \ | |
54 | ugeth_printk(KERN_INFO , format , ## arg) | |
55 | #define ugeth_warn(format, arg...) \ | |
56 | ugeth_printk(KERN_WARNING , format , ## arg) | |
57 | ||
58 | #ifdef UGETH_VERBOSE_DEBUG | |
59 | #define ugeth_vdbg ugeth_dbg | |
60 | #else | |
61 | #define ugeth_vdbg(fmt, args...) do { } while (0) | |
62 | #endif /* UGETH_VERBOSE_DEBUG */ | |
890de95e | 63 | #define UGETH_MSG_DEFAULT (NETIF_MSG_IFUP << 1 ) - 1 |
ce973b14 | 64 | |
88a15f2e | 65 | |
ce973b14 LY |
66 | static DEFINE_SPINLOCK(ugeth_lock); |
67 | ||
890de95e LY |
68 | static struct { |
69 | u32 msg_enable; | |
70 | } debug = { -1 }; | |
71 | ||
72 | module_param_named(debug, debug.msg_enable, int, 0); | |
73 | MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 0xffff=all)"); | |
74 | ||
18a8e864 | 75 | static struct ucc_geth_info ugeth_primary_info = { |
ce973b14 LY |
76 | .uf_info = { |
77 | .bd_mem_part = MEM_PART_SYSTEM, | |
78 | .rtsm = UCC_FAST_SEND_IDLES_BETWEEN_FRAMES, | |
79 | .max_rx_buf_length = 1536, | |
728de4c9 | 80 | /* adjusted at startup if max-speed 1000 */ |
ce973b14 LY |
81 | .urfs = UCC_GETH_URFS_INIT, |
82 | .urfet = UCC_GETH_URFET_INIT, | |
83 | .urfset = UCC_GETH_URFSET_INIT, | |
84 | .utfs = UCC_GETH_UTFS_INIT, | |
85 | .utfet = UCC_GETH_UTFET_INIT, | |
86 | .utftt = UCC_GETH_UTFTT_INIT, | |
ce973b14 LY |
87 | .ufpt = 256, |
88 | .mode = UCC_FAST_PROTOCOL_MODE_ETHERNET, | |
89 | .ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL, | |
90 | .tenc = UCC_FAST_TX_ENCODING_NRZ, | |
91 | .renc = UCC_FAST_RX_ENCODING_NRZ, | |
92 | .tcrc = UCC_FAST_16_BIT_CRC, | |
93 | .synl = UCC_FAST_SYNC_LEN_NOT_USED, | |
94 | }, | |
95 | .numQueuesTx = 1, | |
96 | .numQueuesRx = 1, | |
97 | .extendedFilteringChainPointer = ((uint32_t) NULL), | |
98 | .typeorlen = 3072 /*1536 */ , | |
99 | .nonBackToBackIfgPart1 = 0x40, | |
100 | .nonBackToBackIfgPart2 = 0x60, | |
101 | .miminumInterFrameGapEnforcement = 0x50, | |
102 | .backToBackInterFrameGap = 0x60, | |
103 | .mblinterval = 128, | |
104 | .nortsrbytetime = 5, | |
105 | .fracsiz = 1, | |
106 | .strictpriorityq = 0xff, | |
107 | .altBebTruncation = 0xa, | |
108 | .excessDefer = 1, | |
109 | .maxRetransmission = 0xf, | |
110 | .collisionWindow = 0x37, | |
111 | .receiveFlowControl = 1, | |
ac421852 | 112 | .transmitFlowControl = 1, |
ce973b14 LY |
113 | .maxGroupAddrInHash = 4, |
114 | .maxIndAddrInHash = 4, | |
115 | .prel = 7, | |
116 | .maxFrameLength = 1518, | |
117 | .minFrameLength = 64, | |
118 | .maxD1Length = 1520, | |
119 | .maxD2Length = 1520, | |
120 | .vlantype = 0x8100, | |
121 | .ecamptr = ((uint32_t) NULL), | |
122 | .eventRegMask = UCCE_OTHER, | |
123 | .pausePeriod = 0xf000, | |
124 | .interruptcoalescingmaxvalue = {1, 1, 1, 1, 1, 1, 1, 1}, | |
125 | .bdRingLenTx = { | |
126 | TX_BD_RING_LEN, | |
127 | TX_BD_RING_LEN, | |
128 | TX_BD_RING_LEN, | |
129 | TX_BD_RING_LEN, | |
130 | TX_BD_RING_LEN, | |
131 | TX_BD_RING_LEN, | |
132 | TX_BD_RING_LEN, | |
133 | TX_BD_RING_LEN}, | |
134 | ||
135 | .bdRingLenRx = { | |
136 | RX_BD_RING_LEN, | |
137 | RX_BD_RING_LEN, | |
138 | RX_BD_RING_LEN, | |
139 | RX_BD_RING_LEN, | |
140 | RX_BD_RING_LEN, | |
141 | RX_BD_RING_LEN, | |
142 | RX_BD_RING_LEN, | |
143 | RX_BD_RING_LEN}, | |
144 | ||
145 | .numStationAddresses = UCC_GETH_NUM_OF_STATION_ADDRESSES_1, | |
146 | .largestexternallookupkeysize = | |
147 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE, | |
ac421852 LY |
148 | .statisticsMode = UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE | |
149 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX | | |
150 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX, | |
ce973b14 LY |
151 | .vlanOperationTagged = UCC_GETH_VLAN_OPERATION_TAGGED_NOP, |
152 | .vlanOperationNonTagged = UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP, | |
153 | .rxQoSMode = UCC_GETH_QOS_MODE_DEFAULT, | |
154 | .aufc = UPSMR_AUTOMATIC_FLOW_CONTROL_MODE_NONE, | |
155 | .padAndCrc = MACCFG2_PAD_AND_CRC_MODE_PAD_AND_CRC, | |
ffea31ed JT |
156 | .numThreadsTx = UCC_GETH_NUM_OF_THREADS_1, |
157 | .numThreadsRx = UCC_GETH_NUM_OF_THREADS_1, | |
ce973b14 LY |
158 | .riscTx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, |
159 | .riscRx = QE_RISC_ALLOCATION_RISC1_AND_RISC2, | |
160 | }; | |
161 | ||
18a8e864 | 162 | static struct ucc_geth_info ugeth_info[8]; |
ce973b14 LY |
163 | |
164 | #ifdef DEBUG | |
165 | static void mem_disp(u8 *addr, int size) | |
166 | { | |
167 | u8 *i; | |
168 | int size16Aling = (size >> 4) << 4; | |
169 | int size4Aling = (size >> 2) << 2; | |
170 | int notAlign = 0; | |
171 | if (size % 16) | |
172 | notAlign = 1; | |
173 | ||
174 | for (i = addr; (u32) i < (u32) addr + size16Aling; i += 16) | |
175 | printk("0x%08x: %08x %08x %08x %08x\r\n", | |
176 | (u32) i, | |
177 | *((u32 *) (i)), | |
178 | *((u32 *) (i + 4)), | |
179 | *((u32 *) (i + 8)), *((u32 *) (i + 12))); | |
180 | if (notAlign == 1) | |
181 | printk("0x%08x: ", (u32) i); | |
182 | for (; (u32) i < (u32) addr + size4Aling; i += 4) | |
183 | printk("%08x ", *((u32 *) (i))); | |
184 | for (; (u32) i < (u32) addr + size; i++) | |
185 | printk("%02x", *((u8 *) (i))); | |
186 | if (notAlign == 1) | |
187 | printk("\r\n"); | |
188 | } | |
189 | #endif /* DEBUG */ | |
190 | ||
ce973b14 LY |
191 | static struct list_head *dequeue(struct list_head *lh) |
192 | { | |
193 | unsigned long flags; | |
194 | ||
1083cfe1 | 195 | spin_lock_irqsave(&ugeth_lock, flags); |
ce973b14 LY |
196 | if (!list_empty(lh)) { |
197 | struct list_head *node = lh->next; | |
198 | list_del(node); | |
1083cfe1 | 199 | spin_unlock_irqrestore(&ugeth_lock, flags); |
ce973b14 LY |
200 | return node; |
201 | } else { | |
1083cfe1 | 202 | spin_unlock_irqrestore(&ugeth_lock, flags); |
ce973b14 LY |
203 | return NULL; |
204 | } | |
205 | } | |
206 | ||
6fee40e9 AF |
207 | static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth, |
208 | u8 __iomem *bd) | |
ce973b14 LY |
209 | { |
210 | struct sk_buff *skb = NULL; | |
211 | ||
212 | skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + | |
213 | UCC_GETH_RX_DATA_BUF_ALIGNMENT); | |
214 | ||
215 | if (skb == NULL) | |
216 | return NULL; | |
217 | ||
218 | /* We need the data buffer to be aligned properly. We will reserve | |
219 | * as many bytes as needed to align the data properly | |
220 | */ | |
221 | skb_reserve(skb, | |
222 | UCC_GETH_RX_DATA_BUF_ALIGNMENT - | |
223 | (((unsigned)skb->data) & (UCC_GETH_RX_DATA_BUF_ALIGNMENT - | |
224 | 1))); | |
225 | ||
226 | skb->dev = ugeth->dev; | |
227 | ||
6fee40e9 | 228 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
7f80202b | 229 | dma_map_single(&ugeth->dev->dev, |
ce973b14 LY |
230 | skb->data, |
231 | ugeth->ug_info->uf_info.max_rx_buf_length + | |
232 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | |
233 | DMA_FROM_DEVICE)); | |
234 | ||
6fee40e9 AF |
235 | out_be32((u32 __iomem *)bd, |
236 | (R_E | R_I | (in_be32((u32 __iomem*)bd) & R_W))); | |
ce973b14 LY |
237 | |
238 | return skb; | |
239 | } | |
240 | ||
18a8e864 | 241 | static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ) |
ce973b14 | 242 | { |
6fee40e9 | 243 | u8 __iomem *bd; |
ce973b14 LY |
244 | u32 bd_status; |
245 | struct sk_buff *skb; | |
246 | int i; | |
247 | ||
248 | bd = ugeth->p_rx_bd_ring[rxQ]; | |
249 | i = 0; | |
250 | ||
251 | do { | |
6fee40e9 | 252 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
253 | skb = get_new_skb(ugeth, bd); |
254 | ||
255 | if (!skb) /* If can not allocate data buffer, | |
256 | abort. Cleanup will be elsewhere */ | |
257 | return -ENOMEM; | |
258 | ||
259 | ugeth->rx_skbuff[rxQ][i] = skb; | |
260 | ||
261 | /* advance the BD pointer */ | |
18a8e864 | 262 | bd += sizeof(struct qe_bd); |
ce973b14 LY |
263 | i++; |
264 | } while (!(bd_status & R_W)); | |
265 | ||
266 | return 0; | |
267 | } | |
268 | ||
18a8e864 | 269 | static int fill_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 270 | u32 *p_start, |
ce973b14 LY |
271 | u8 num_entries, |
272 | u32 thread_size, | |
273 | u32 thread_alignment, | |
18a8e864 | 274 | enum qe_risc_allocation risc, |
ce973b14 LY |
275 | int skip_page_for_first_entry) |
276 | { | |
277 | u32 init_enet_offset; | |
278 | u8 i; | |
279 | int snum; | |
280 | ||
281 | for (i = 0; i < num_entries; i++) { | |
282 | if ((snum = qe_get_snum()) < 0) { | |
890de95e LY |
283 | if (netif_msg_ifup(ugeth)) |
284 | ugeth_err("fill_init_enet_entries: Can not get SNUM."); | |
ce973b14 LY |
285 | return snum; |
286 | } | |
287 | if ((i == 0) && skip_page_for_first_entry) | |
288 | /* First entry of Rx does not have page */ | |
289 | init_enet_offset = 0; | |
290 | else { | |
291 | init_enet_offset = | |
292 | qe_muram_alloc(thread_size, thread_alignment); | |
4c35630c | 293 | if (IS_ERR_VALUE(init_enet_offset)) { |
890de95e LY |
294 | if (netif_msg_ifup(ugeth)) |
295 | ugeth_err("fill_init_enet_entries: Can not allocate DPRAM memory."); | |
ce973b14 LY |
296 | qe_put_snum((u8) snum); |
297 | return -ENOMEM; | |
298 | } | |
299 | } | |
300 | *(p_start++) = | |
301 | ((u8) snum << ENET_INIT_PARAM_SNUM_SHIFT) | init_enet_offset | |
302 | | risc; | |
303 | } | |
304 | ||
305 | return 0; | |
306 | } | |
307 | ||
18a8e864 | 308 | static int return_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 309 | u32 *p_start, |
ce973b14 | 310 | u8 num_entries, |
18a8e864 | 311 | enum qe_risc_allocation risc, |
ce973b14 LY |
312 | int skip_page_for_first_entry) |
313 | { | |
314 | u32 init_enet_offset; | |
315 | u8 i; | |
316 | int snum; | |
317 | ||
318 | for (i = 0; i < num_entries; i++) { | |
6fee40e9 AF |
319 | u32 val = *p_start; |
320 | ||
ce973b14 LY |
321 | /* Check that this entry was actually valid -- |
322 | needed in case failed in allocations */ | |
6fee40e9 | 323 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { |
ce973b14 | 324 | snum = |
6fee40e9 | 325 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> |
ce973b14 LY |
326 | ENET_INIT_PARAM_SNUM_SHIFT; |
327 | qe_put_snum((u8) snum); | |
328 | if (!((i == 0) && skip_page_for_first_entry)) { | |
329 | /* First entry of Rx does not have page */ | |
330 | init_enet_offset = | |
6fee40e9 | 331 | (val & ENET_INIT_PARAM_PTR_MASK); |
ce973b14 LY |
332 | qe_muram_free(init_enet_offset); |
333 | } | |
6fee40e9 | 334 | *p_start++ = 0; |
ce973b14 LY |
335 | } |
336 | } | |
337 | ||
338 | return 0; | |
339 | } | |
340 | ||
341 | #ifdef DEBUG | |
18a8e864 | 342 | static int dump_init_enet_entries(struct ucc_geth_private *ugeth, |
6fee40e9 | 343 | u32 __iomem *p_start, |
ce973b14 LY |
344 | u8 num_entries, |
345 | u32 thread_size, | |
18a8e864 | 346 | enum qe_risc_allocation risc, |
ce973b14 LY |
347 | int skip_page_for_first_entry) |
348 | { | |
349 | u32 init_enet_offset; | |
350 | u8 i; | |
351 | int snum; | |
352 | ||
353 | for (i = 0; i < num_entries; i++) { | |
6fee40e9 AF |
354 | u32 val = in_be32(p_start); |
355 | ||
ce973b14 LY |
356 | /* Check that this entry was actually valid -- |
357 | needed in case failed in allocations */ | |
6fee40e9 | 358 | if ((val & ENET_INIT_PARAM_RISC_MASK) == risc) { |
ce973b14 | 359 | snum = |
6fee40e9 | 360 | (u32) (val & ENET_INIT_PARAM_SNUM_MASK) >> |
ce973b14 LY |
361 | ENET_INIT_PARAM_SNUM_SHIFT; |
362 | qe_put_snum((u8) snum); | |
363 | if (!((i == 0) && skip_page_for_first_entry)) { | |
364 | /* First entry of Rx does not have page */ | |
365 | init_enet_offset = | |
366 | (in_be32(p_start) & | |
367 | ENET_INIT_PARAM_PTR_MASK); | |
368 | ugeth_info("Init enet entry %d:", i); | |
369 | ugeth_info("Base address: 0x%08x", | |
370 | (u32) | |
371 | qe_muram_addr(init_enet_offset)); | |
372 | mem_disp(qe_muram_addr(init_enet_offset), | |
373 | thread_size); | |
374 | } | |
375 | p_start++; | |
376 | } | |
377 | } | |
378 | ||
379 | return 0; | |
380 | } | |
381 | #endif | |
382 | ||
18a8e864 | 383 | static void put_enet_addr_container(struct enet_addr_container *enet_addr_cont) |
ce973b14 LY |
384 | { |
385 | kfree(enet_addr_cont); | |
386 | } | |
387 | ||
df19b6b0 | 388 | static void set_mac_addr(__be16 __iomem *reg, u8 *mac) |
18a8e864 LY |
389 | { |
390 | out_be16(®[0], ((u16)mac[5] << 8) | mac[4]); | |
391 | out_be16(®[1], ((u16)mac[3] << 8) | mac[2]); | |
392 | out_be16(®[2], ((u16)mac[1] << 8) | mac[0]); | |
393 | } | |
394 | ||
18a8e864 | 395 | static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num) |
ce973b14 | 396 | { |
6fee40e9 | 397 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
ce973b14 LY |
398 | |
399 | if (!(paddr_num < NUM_OF_PADDRS)) { | |
b39d66a8 | 400 | ugeth_warn("%s: Illagel paddr_num.", __func__); |
ce973b14 LY |
401 | return -EINVAL; |
402 | } | |
403 | ||
404 | p_82xx_addr_filt = | |
6fee40e9 | 405 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> |
ce973b14 LY |
406 | addressfiltering; |
407 | ||
408 | /* Writing address ff.ff.ff.ff.ff.ff disables address | |
409 | recognition for this register */ | |
410 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].h, 0xffff); | |
411 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].m, 0xffff); | |
412 | out_be16(&p_82xx_addr_filt->paddr[paddr_num].l, 0xffff); | |
413 | ||
414 | return 0; | |
415 | } | |
416 | ||
18a8e864 LY |
417 | static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth, |
418 | u8 *p_enet_addr) | |
ce973b14 | 419 | { |
6fee40e9 | 420 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
ce973b14 LY |
421 | u32 cecr_subblock; |
422 | ||
423 | p_82xx_addr_filt = | |
6fee40e9 | 424 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram-> |
ce973b14 LY |
425 | addressfiltering; |
426 | ||
427 | cecr_subblock = | |
428 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
429 | ||
430 | /* Ethernet frames are defined in Little Endian mode, | |
431 | therefor to insert */ | |
432 | /* the address to the hash (Big Endian mode), we reverse the bytes.*/ | |
18a8e864 LY |
433 | |
434 | set_mac_addr(&p_82xx_addr_filt->taddr.h, p_enet_addr); | |
ce973b14 LY |
435 | |
436 | qe_issue_cmd(QE_SET_GROUP_ADDRESS, cecr_subblock, | |
18a8e864 | 437 | QE_CR_PROTOCOL_ETHERNET, 0); |
ce973b14 LY |
438 | } |
439 | ||
440 | #ifdef CONFIG_UGETH_MAGIC_PACKET | |
18a8e864 | 441 | static void magic_packet_detection_enable(struct ucc_geth_private *ugeth) |
ce973b14 | 442 | { |
18a8e864 | 443 | struct ucc_fast_private *uccf; |
6fee40e9 | 444 | struct ucc_geth __iomem *ug_regs; |
ce973b14 LY |
445 | |
446 | uccf = ugeth->uccf; | |
447 | ug_regs = ugeth->ug_regs; | |
448 | ||
449 | /* Enable interrupts for magic packet detection */ | |
3bc53427 | 450 | setbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD); |
ce973b14 LY |
451 | |
452 | /* Enable magic packet detection */ | |
3bc53427 | 453 | setbits32(&ug_regs->maccfg2, MACCFG2_MPE); |
ce973b14 LY |
454 | } |
455 | ||
18a8e864 | 456 | static void magic_packet_detection_disable(struct ucc_geth_private *ugeth) |
ce973b14 | 457 | { |
18a8e864 | 458 | struct ucc_fast_private *uccf; |
6fee40e9 | 459 | struct ucc_geth __iomem *ug_regs; |
ce973b14 LY |
460 | |
461 | uccf = ugeth->uccf; | |
462 | ug_regs = ugeth->ug_regs; | |
463 | ||
464 | /* Disable interrupts for magic packet detection */ | |
3bc53427 | 465 | clrbits32(uccf->p_uccm, UCC_GETH_UCCE_MPD); |
ce973b14 LY |
466 | |
467 | /* Disable magic packet detection */ | |
3bc53427 | 468 | clrbits32(&ug_regs->maccfg2, MACCFG2_MPE); |
ce973b14 LY |
469 | } |
470 | #endif /* MAGIC_PACKET */ | |
471 | ||
18a8e864 | 472 | static inline int compare_addr(u8 **addr1, u8 **addr2) |
ce973b14 LY |
473 | { |
474 | return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS); | |
475 | } | |
476 | ||
477 | #ifdef DEBUG | |
18a8e864 LY |
478 | static void get_statistics(struct ucc_geth_private *ugeth, |
479 | struct ucc_geth_tx_firmware_statistics * | |
ce973b14 | 480 | tx_firmware_statistics, |
18a8e864 | 481 | struct ucc_geth_rx_firmware_statistics * |
ce973b14 | 482 | rx_firmware_statistics, |
18a8e864 | 483 | struct ucc_geth_hardware_statistics *hardware_statistics) |
ce973b14 | 484 | { |
6fee40e9 AF |
485 | struct ucc_fast __iomem *uf_regs; |
486 | struct ucc_geth __iomem *ug_regs; | |
18a8e864 LY |
487 | struct ucc_geth_tx_firmware_statistics_pram *p_tx_fw_statistics_pram; |
488 | struct ucc_geth_rx_firmware_statistics_pram *p_rx_fw_statistics_pram; | |
ce973b14 LY |
489 | |
490 | ug_regs = ugeth->ug_regs; | |
6fee40e9 | 491 | uf_regs = (struct ucc_fast __iomem *) ug_regs; |
ce973b14 LY |
492 | p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram; |
493 | p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram; | |
494 | ||
495 | /* Tx firmware only if user handed pointer and driver actually | |
496 | gathers Tx firmware statistics */ | |
497 | if (tx_firmware_statistics && p_tx_fw_statistics_pram) { | |
498 | tx_firmware_statistics->sicoltx = | |
499 | in_be32(&p_tx_fw_statistics_pram->sicoltx); | |
500 | tx_firmware_statistics->mulcoltx = | |
501 | in_be32(&p_tx_fw_statistics_pram->mulcoltx); | |
502 | tx_firmware_statistics->latecoltxfr = | |
503 | in_be32(&p_tx_fw_statistics_pram->latecoltxfr); | |
504 | tx_firmware_statistics->frabortduecol = | |
505 | in_be32(&p_tx_fw_statistics_pram->frabortduecol); | |
506 | tx_firmware_statistics->frlostinmactxer = | |
507 | in_be32(&p_tx_fw_statistics_pram->frlostinmactxer); | |
508 | tx_firmware_statistics->carriersenseertx = | |
509 | in_be32(&p_tx_fw_statistics_pram->carriersenseertx); | |
510 | tx_firmware_statistics->frtxok = | |
511 | in_be32(&p_tx_fw_statistics_pram->frtxok); | |
512 | tx_firmware_statistics->txfrexcessivedefer = | |
513 | in_be32(&p_tx_fw_statistics_pram->txfrexcessivedefer); | |
514 | tx_firmware_statistics->txpkts256 = | |
515 | in_be32(&p_tx_fw_statistics_pram->txpkts256); | |
516 | tx_firmware_statistics->txpkts512 = | |
517 | in_be32(&p_tx_fw_statistics_pram->txpkts512); | |
518 | tx_firmware_statistics->txpkts1024 = | |
519 | in_be32(&p_tx_fw_statistics_pram->txpkts1024); | |
520 | tx_firmware_statistics->txpktsjumbo = | |
521 | in_be32(&p_tx_fw_statistics_pram->txpktsjumbo); | |
522 | } | |
523 | ||
524 | /* Rx firmware only if user handed pointer and driver actually | |
525 | * gathers Rx firmware statistics */ | |
526 | if (rx_firmware_statistics && p_rx_fw_statistics_pram) { | |
527 | int i; | |
528 | rx_firmware_statistics->frrxfcser = | |
529 | in_be32(&p_rx_fw_statistics_pram->frrxfcser); | |
530 | rx_firmware_statistics->fraligner = | |
531 | in_be32(&p_rx_fw_statistics_pram->fraligner); | |
532 | rx_firmware_statistics->inrangelenrxer = | |
533 | in_be32(&p_rx_fw_statistics_pram->inrangelenrxer); | |
534 | rx_firmware_statistics->outrangelenrxer = | |
535 | in_be32(&p_rx_fw_statistics_pram->outrangelenrxer); | |
536 | rx_firmware_statistics->frtoolong = | |
537 | in_be32(&p_rx_fw_statistics_pram->frtoolong); | |
538 | rx_firmware_statistics->runt = | |
539 | in_be32(&p_rx_fw_statistics_pram->runt); | |
540 | rx_firmware_statistics->verylongevent = | |
541 | in_be32(&p_rx_fw_statistics_pram->verylongevent); | |
542 | rx_firmware_statistics->symbolerror = | |
543 | in_be32(&p_rx_fw_statistics_pram->symbolerror); | |
544 | rx_firmware_statistics->dropbsy = | |
545 | in_be32(&p_rx_fw_statistics_pram->dropbsy); | |
546 | for (i = 0; i < 0x8; i++) | |
547 | rx_firmware_statistics->res0[i] = | |
548 | p_rx_fw_statistics_pram->res0[i]; | |
549 | rx_firmware_statistics->mismatchdrop = | |
550 | in_be32(&p_rx_fw_statistics_pram->mismatchdrop); | |
551 | rx_firmware_statistics->underpkts = | |
552 | in_be32(&p_rx_fw_statistics_pram->underpkts); | |
553 | rx_firmware_statistics->pkts256 = | |
554 | in_be32(&p_rx_fw_statistics_pram->pkts256); | |
555 | rx_firmware_statistics->pkts512 = | |
556 | in_be32(&p_rx_fw_statistics_pram->pkts512); | |
557 | rx_firmware_statistics->pkts1024 = | |
558 | in_be32(&p_rx_fw_statistics_pram->pkts1024); | |
559 | rx_firmware_statistics->pktsjumbo = | |
560 | in_be32(&p_rx_fw_statistics_pram->pktsjumbo); | |
561 | rx_firmware_statistics->frlossinmacer = | |
562 | in_be32(&p_rx_fw_statistics_pram->frlossinmacer); | |
563 | rx_firmware_statistics->pausefr = | |
564 | in_be32(&p_rx_fw_statistics_pram->pausefr); | |
565 | for (i = 0; i < 0x4; i++) | |
566 | rx_firmware_statistics->res1[i] = | |
567 | p_rx_fw_statistics_pram->res1[i]; | |
568 | rx_firmware_statistics->removevlan = | |
569 | in_be32(&p_rx_fw_statistics_pram->removevlan); | |
570 | rx_firmware_statistics->replacevlan = | |
571 | in_be32(&p_rx_fw_statistics_pram->replacevlan); | |
572 | rx_firmware_statistics->insertvlan = | |
573 | in_be32(&p_rx_fw_statistics_pram->insertvlan); | |
574 | } | |
575 | ||
576 | /* Hardware only if user handed pointer and driver actually | |
577 | gathers hardware statistics */ | |
3bc53427 TT |
578 | if (hardware_statistics && |
579 | (in_be32(&uf_regs->upsmr) & UCC_GETH_UPSMR_HSE)) { | |
ce973b14 LY |
580 | hardware_statistics->tx64 = in_be32(&ug_regs->tx64); |
581 | hardware_statistics->tx127 = in_be32(&ug_regs->tx127); | |
582 | hardware_statistics->tx255 = in_be32(&ug_regs->tx255); | |
583 | hardware_statistics->rx64 = in_be32(&ug_regs->rx64); | |
584 | hardware_statistics->rx127 = in_be32(&ug_regs->rx127); | |
585 | hardware_statistics->rx255 = in_be32(&ug_regs->rx255); | |
586 | hardware_statistics->txok = in_be32(&ug_regs->txok); | |
587 | hardware_statistics->txcf = in_be16(&ug_regs->txcf); | |
588 | hardware_statistics->tmca = in_be32(&ug_regs->tmca); | |
589 | hardware_statistics->tbca = in_be32(&ug_regs->tbca); | |
590 | hardware_statistics->rxfok = in_be32(&ug_regs->rxfok); | |
591 | hardware_statistics->rxbok = in_be32(&ug_regs->rxbok); | |
592 | hardware_statistics->rbyt = in_be32(&ug_regs->rbyt); | |
593 | hardware_statistics->rmca = in_be32(&ug_regs->rmca); | |
594 | hardware_statistics->rbca = in_be32(&ug_regs->rbca); | |
595 | } | |
596 | } | |
597 | ||
18a8e864 | 598 | static void dump_bds(struct ucc_geth_private *ugeth) |
ce973b14 LY |
599 | { |
600 | int i; | |
601 | int length; | |
602 | ||
603 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | |
604 | if (ugeth->p_tx_bd_ring[i]) { | |
605 | length = | |
606 | (ugeth->ug_info->bdRingLenTx[i] * | |
18a8e864 | 607 | sizeof(struct qe_bd)); |
ce973b14 LY |
608 | ugeth_info("TX BDs[%d]", i); |
609 | mem_disp(ugeth->p_tx_bd_ring[i], length); | |
610 | } | |
611 | } | |
612 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
613 | if (ugeth->p_rx_bd_ring[i]) { | |
614 | length = | |
615 | (ugeth->ug_info->bdRingLenRx[i] * | |
18a8e864 | 616 | sizeof(struct qe_bd)); |
ce973b14 LY |
617 | ugeth_info("RX BDs[%d]", i); |
618 | mem_disp(ugeth->p_rx_bd_ring[i], length); | |
619 | } | |
620 | } | |
621 | } | |
622 | ||
18a8e864 | 623 | static void dump_regs(struct ucc_geth_private *ugeth) |
ce973b14 LY |
624 | { |
625 | int i; | |
626 | ||
627 | ugeth_info("UCC%d Geth registers:", ugeth->ug_info->uf_info.ucc_num); | |
628 | ugeth_info("Base address: 0x%08x", (u32) ugeth->ug_regs); | |
629 | ||
630 | ugeth_info("maccfg1 : addr - 0x%08x, val - 0x%08x", | |
631 | (u32) & ugeth->ug_regs->maccfg1, | |
632 | in_be32(&ugeth->ug_regs->maccfg1)); | |
633 | ugeth_info("maccfg2 : addr - 0x%08x, val - 0x%08x", | |
634 | (u32) & ugeth->ug_regs->maccfg2, | |
635 | in_be32(&ugeth->ug_regs->maccfg2)); | |
636 | ugeth_info("ipgifg : addr - 0x%08x, val - 0x%08x", | |
637 | (u32) & ugeth->ug_regs->ipgifg, | |
638 | in_be32(&ugeth->ug_regs->ipgifg)); | |
639 | ugeth_info("hafdup : addr - 0x%08x, val - 0x%08x", | |
640 | (u32) & ugeth->ug_regs->hafdup, | |
641 | in_be32(&ugeth->ug_regs->hafdup)); | |
ce973b14 LY |
642 | ugeth_info("ifctl : addr - 0x%08x, val - 0x%08x", |
643 | (u32) & ugeth->ug_regs->ifctl, | |
644 | in_be32(&ugeth->ug_regs->ifctl)); | |
645 | ugeth_info("ifstat : addr - 0x%08x, val - 0x%08x", | |
646 | (u32) & ugeth->ug_regs->ifstat, | |
647 | in_be32(&ugeth->ug_regs->ifstat)); | |
648 | ugeth_info("macstnaddr1: addr - 0x%08x, val - 0x%08x", | |
649 | (u32) & ugeth->ug_regs->macstnaddr1, | |
650 | in_be32(&ugeth->ug_regs->macstnaddr1)); | |
651 | ugeth_info("macstnaddr2: addr - 0x%08x, val - 0x%08x", | |
652 | (u32) & ugeth->ug_regs->macstnaddr2, | |
653 | in_be32(&ugeth->ug_regs->macstnaddr2)); | |
654 | ugeth_info("uempr : addr - 0x%08x, val - 0x%08x", | |
655 | (u32) & ugeth->ug_regs->uempr, | |
656 | in_be32(&ugeth->ug_regs->uempr)); | |
657 | ugeth_info("utbipar : addr - 0x%08x, val - 0x%08x", | |
658 | (u32) & ugeth->ug_regs->utbipar, | |
659 | in_be32(&ugeth->ug_regs->utbipar)); | |
660 | ugeth_info("uescr : addr - 0x%08x, val - 0x%04x", | |
661 | (u32) & ugeth->ug_regs->uescr, | |
662 | in_be16(&ugeth->ug_regs->uescr)); | |
663 | ugeth_info("tx64 : addr - 0x%08x, val - 0x%08x", | |
664 | (u32) & ugeth->ug_regs->tx64, | |
665 | in_be32(&ugeth->ug_regs->tx64)); | |
666 | ugeth_info("tx127 : addr - 0x%08x, val - 0x%08x", | |
667 | (u32) & ugeth->ug_regs->tx127, | |
668 | in_be32(&ugeth->ug_regs->tx127)); | |
669 | ugeth_info("tx255 : addr - 0x%08x, val - 0x%08x", | |
670 | (u32) & ugeth->ug_regs->tx255, | |
671 | in_be32(&ugeth->ug_regs->tx255)); | |
672 | ugeth_info("rx64 : addr - 0x%08x, val - 0x%08x", | |
673 | (u32) & ugeth->ug_regs->rx64, | |
674 | in_be32(&ugeth->ug_regs->rx64)); | |
675 | ugeth_info("rx127 : addr - 0x%08x, val - 0x%08x", | |
676 | (u32) & ugeth->ug_regs->rx127, | |
677 | in_be32(&ugeth->ug_regs->rx127)); | |
678 | ugeth_info("rx255 : addr - 0x%08x, val - 0x%08x", | |
679 | (u32) & ugeth->ug_regs->rx255, | |
680 | in_be32(&ugeth->ug_regs->rx255)); | |
681 | ugeth_info("txok : addr - 0x%08x, val - 0x%08x", | |
682 | (u32) & ugeth->ug_regs->txok, | |
683 | in_be32(&ugeth->ug_regs->txok)); | |
684 | ugeth_info("txcf : addr - 0x%08x, val - 0x%04x", | |
685 | (u32) & ugeth->ug_regs->txcf, | |
686 | in_be16(&ugeth->ug_regs->txcf)); | |
687 | ugeth_info("tmca : addr - 0x%08x, val - 0x%08x", | |
688 | (u32) & ugeth->ug_regs->tmca, | |
689 | in_be32(&ugeth->ug_regs->tmca)); | |
690 | ugeth_info("tbca : addr - 0x%08x, val - 0x%08x", | |
691 | (u32) & ugeth->ug_regs->tbca, | |
692 | in_be32(&ugeth->ug_regs->tbca)); | |
693 | ugeth_info("rxfok : addr - 0x%08x, val - 0x%08x", | |
694 | (u32) & ugeth->ug_regs->rxfok, | |
695 | in_be32(&ugeth->ug_regs->rxfok)); | |
696 | ugeth_info("rxbok : addr - 0x%08x, val - 0x%08x", | |
697 | (u32) & ugeth->ug_regs->rxbok, | |
698 | in_be32(&ugeth->ug_regs->rxbok)); | |
699 | ugeth_info("rbyt : addr - 0x%08x, val - 0x%08x", | |
700 | (u32) & ugeth->ug_regs->rbyt, | |
701 | in_be32(&ugeth->ug_regs->rbyt)); | |
702 | ugeth_info("rmca : addr - 0x%08x, val - 0x%08x", | |
703 | (u32) & ugeth->ug_regs->rmca, | |
704 | in_be32(&ugeth->ug_regs->rmca)); | |
705 | ugeth_info("rbca : addr - 0x%08x, val - 0x%08x", | |
706 | (u32) & ugeth->ug_regs->rbca, | |
707 | in_be32(&ugeth->ug_regs->rbca)); | |
708 | ugeth_info("scar : addr - 0x%08x, val - 0x%08x", | |
709 | (u32) & ugeth->ug_regs->scar, | |
710 | in_be32(&ugeth->ug_regs->scar)); | |
711 | ugeth_info("scam : addr - 0x%08x, val - 0x%08x", | |
712 | (u32) & ugeth->ug_regs->scam, | |
713 | in_be32(&ugeth->ug_regs->scam)); | |
714 | ||
715 | if (ugeth->p_thread_data_tx) { | |
716 | int numThreadsTxNumerical; | |
717 | switch (ugeth->ug_info->numThreadsTx) { | |
718 | case UCC_GETH_NUM_OF_THREADS_1: | |
719 | numThreadsTxNumerical = 1; | |
720 | break; | |
721 | case UCC_GETH_NUM_OF_THREADS_2: | |
722 | numThreadsTxNumerical = 2; | |
723 | break; | |
724 | case UCC_GETH_NUM_OF_THREADS_4: | |
725 | numThreadsTxNumerical = 4; | |
726 | break; | |
727 | case UCC_GETH_NUM_OF_THREADS_6: | |
728 | numThreadsTxNumerical = 6; | |
729 | break; | |
730 | case UCC_GETH_NUM_OF_THREADS_8: | |
731 | numThreadsTxNumerical = 8; | |
732 | break; | |
733 | default: | |
734 | numThreadsTxNumerical = 0; | |
735 | break; | |
736 | } | |
737 | ||
738 | ugeth_info("Thread data TXs:"); | |
739 | ugeth_info("Base address: 0x%08x", | |
740 | (u32) ugeth->p_thread_data_tx); | |
741 | for (i = 0; i < numThreadsTxNumerical; i++) { | |
742 | ugeth_info("Thread data TX[%d]:", i); | |
743 | ugeth_info("Base address: 0x%08x", | |
744 | (u32) & ugeth->p_thread_data_tx[i]); | |
745 | mem_disp((u8 *) & ugeth->p_thread_data_tx[i], | |
18a8e864 | 746 | sizeof(struct ucc_geth_thread_data_tx)); |
ce973b14 LY |
747 | } |
748 | } | |
749 | if (ugeth->p_thread_data_rx) { | |
750 | int numThreadsRxNumerical; | |
751 | switch (ugeth->ug_info->numThreadsRx) { | |
752 | case UCC_GETH_NUM_OF_THREADS_1: | |
753 | numThreadsRxNumerical = 1; | |
754 | break; | |
755 | case UCC_GETH_NUM_OF_THREADS_2: | |
756 | numThreadsRxNumerical = 2; | |
757 | break; | |
758 | case UCC_GETH_NUM_OF_THREADS_4: | |
759 | numThreadsRxNumerical = 4; | |
760 | break; | |
761 | case UCC_GETH_NUM_OF_THREADS_6: | |
762 | numThreadsRxNumerical = 6; | |
763 | break; | |
764 | case UCC_GETH_NUM_OF_THREADS_8: | |
765 | numThreadsRxNumerical = 8; | |
766 | break; | |
767 | default: | |
768 | numThreadsRxNumerical = 0; | |
769 | break; | |
770 | } | |
771 | ||
772 | ugeth_info("Thread data RX:"); | |
773 | ugeth_info("Base address: 0x%08x", | |
774 | (u32) ugeth->p_thread_data_rx); | |
775 | for (i = 0; i < numThreadsRxNumerical; i++) { | |
776 | ugeth_info("Thread data RX[%d]:", i); | |
777 | ugeth_info("Base address: 0x%08x", | |
778 | (u32) & ugeth->p_thread_data_rx[i]); | |
779 | mem_disp((u8 *) & ugeth->p_thread_data_rx[i], | |
18a8e864 | 780 | sizeof(struct ucc_geth_thread_data_rx)); |
ce973b14 LY |
781 | } |
782 | } | |
783 | if (ugeth->p_exf_glbl_param) { | |
784 | ugeth_info("EXF global param:"); | |
785 | ugeth_info("Base address: 0x%08x", | |
786 | (u32) ugeth->p_exf_glbl_param); | |
787 | mem_disp((u8 *) ugeth->p_exf_glbl_param, | |
788 | sizeof(*ugeth->p_exf_glbl_param)); | |
789 | } | |
790 | if (ugeth->p_tx_glbl_pram) { | |
791 | ugeth_info("TX global param:"); | |
792 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_tx_glbl_pram); | |
793 | ugeth_info("temoder : addr - 0x%08x, val - 0x%04x", | |
794 | (u32) & ugeth->p_tx_glbl_pram->temoder, | |
795 | in_be16(&ugeth->p_tx_glbl_pram->temoder)); | |
796 | ugeth_info("sqptr : addr - 0x%08x, val - 0x%08x", | |
797 | (u32) & ugeth->p_tx_glbl_pram->sqptr, | |
798 | in_be32(&ugeth->p_tx_glbl_pram->sqptr)); | |
799 | ugeth_info("schedulerbasepointer: addr - 0x%08x, val - 0x%08x", | |
800 | (u32) & ugeth->p_tx_glbl_pram->schedulerbasepointer, | |
801 | in_be32(&ugeth->p_tx_glbl_pram-> | |
802 | schedulerbasepointer)); | |
803 | ugeth_info("txrmonbaseptr: addr - 0x%08x, val - 0x%08x", | |
804 | (u32) & ugeth->p_tx_glbl_pram->txrmonbaseptr, | |
805 | in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr)); | |
806 | ugeth_info("tstate : addr - 0x%08x, val - 0x%08x", | |
807 | (u32) & ugeth->p_tx_glbl_pram->tstate, | |
808 | in_be32(&ugeth->p_tx_glbl_pram->tstate)); | |
809 | ugeth_info("iphoffset[0] : addr - 0x%08x, val - 0x%02x", | |
810 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[0], | |
811 | ugeth->p_tx_glbl_pram->iphoffset[0]); | |
812 | ugeth_info("iphoffset[1] : addr - 0x%08x, val - 0x%02x", | |
813 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[1], | |
814 | ugeth->p_tx_glbl_pram->iphoffset[1]); | |
815 | ugeth_info("iphoffset[2] : addr - 0x%08x, val - 0x%02x", | |
816 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[2], | |
817 | ugeth->p_tx_glbl_pram->iphoffset[2]); | |
818 | ugeth_info("iphoffset[3] : addr - 0x%08x, val - 0x%02x", | |
819 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[3], | |
820 | ugeth->p_tx_glbl_pram->iphoffset[3]); | |
821 | ugeth_info("iphoffset[4] : addr - 0x%08x, val - 0x%02x", | |
822 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[4], | |
823 | ugeth->p_tx_glbl_pram->iphoffset[4]); | |
824 | ugeth_info("iphoffset[5] : addr - 0x%08x, val - 0x%02x", | |
825 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[5], | |
826 | ugeth->p_tx_glbl_pram->iphoffset[5]); | |
827 | ugeth_info("iphoffset[6] : addr - 0x%08x, val - 0x%02x", | |
828 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[6], | |
829 | ugeth->p_tx_glbl_pram->iphoffset[6]); | |
830 | ugeth_info("iphoffset[7] : addr - 0x%08x, val - 0x%02x", | |
831 | (u32) & ugeth->p_tx_glbl_pram->iphoffset[7], | |
832 | ugeth->p_tx_glbl_pram->iphoffset[7]); | |
833 | ugeth_info("vtagtable[0] : addr - 0x%08x, val - 0x%08x", | |
834 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[0], | |
835 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0])); | |
836 | ugeth_info("vtagtable[1] : addr - 0x%08x, val - 0x%08x", | |
837 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[1], | |
838 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1])); | |
839 | ugeth_info("vtagtable[2] : addr - 0x%08x, val - 0x%08x", | |
840 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[2], | |
841 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2])); | |
842 | ugeth_info("vtagtable[3] : addr - 0x%08x, val - 0x%08x", | |
843 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[3], | |
844 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3])); | |
845 | ugeth_info("vtagtable[4] : addr - 0x%08x, val - 0x%08x", | |
846 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[4], | |
847 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4])); | |
848 | ugeth_info("vtagtable[5] : addr - 0x%08x, val - 0x%08x", | |
849 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[5], | |
850 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5])); | |
851 | ugeth_info("vtagtable[6] : addr - 0x%08x, val - 0x%08x", | |
852 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[6], | |
853 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6])); | |
854 | ugeth_info("vtagtable[7] : addr - 0x%08x, val - 0x%08x", | |
855 | (u32) & ugeth->p_tx_glbl_pram->vtagtable[7], | |
856 | in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7])); | |
857 | ugeth_info("tqptr : addr - 0x%08x, val - 0x%08x", | |
858 | (u32) & ugeth->p_tx_glbl_pram->tqptr, | |
859 | in_be32(&ugeth->p_tx_glbl_pram->tqptr)); | |
860 | } | |
861 | if (ugeth->p_rx_glbl_pram) { | |
862 | ugeth_info("RX global param:"); | |
863 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_glbl_pram); | |
864 | ugeth_info("remoder : addr - 0x%08x, val - 0x%08x", | |
865 | (u32) & ugeth->p_rx_glbl_pram->remoder, | |
866 | in_be32(&ugeth->p_rx_glbl_pram->remoder)); | |
867 | ugeth_info("rqptr : addr - 0x%08x, val - 0x%08x", | |
868 | (u32) & ugeth->p_rx_glbl_pram->rqptr, | |
869 | in_be32(&ugeth->p_rx_glbl_pram->rqptr)); | |
870 | ugeth_info("typeorlen : addr - 0x%08x, val - 0x%04x", | |
871 | (u32) & ugeth->p_rx_glbl_pram->typeorlen, | |
872 | in_be16(&ugeth->p_rx_glbl_pram->typeorlen)); | |
873 | ugeth_info("rxgstpack : addr - 0x%08x, val - 0x%02x", | |
874 | (u32) & ugeth->p_rx_glbl_pram->rxgstpack, | |
875 | ugeth->p_rx_glbl_pram->rxgstpack); | |
876 | ugeth_info("rxrmonbaseptr : addr - 0x%08x, val - 0x%08x", | |
877 | (u32) & ugeth->p_rx_glbl_pram->rxrmonbaseptr, | |
878 | in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr)); | |
879 | ugeth_info("intcoalescingptr: addr - 0x%08x, val - 0x%08x", | |
880 | (u32) & ugeth->p_rx_glbl_pram->intcoalescingptr, | |
881 | in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr)); | |
882 | ugeth_info("rstate : addr - 0x%08x, val - 0x%02x", | |
883 | (u32) & ugeth->p_rx_glbl_pram->rstate, | |
884 | ugeth->p_rx_glbl_pram->rstate); | |
885 | ugeth_info("mrblr : addr - 0x%08x, val - 0x%04x", | |
886 | (u32) & ugeth->p_rx_glbl_pram->mrblr, | |
887 | in_be16(&ugeth->p_rx_glbl_pram->mrblr)); | |
888 | ugeth_info("rbdqptr : addr - 0x%08x, val - 0x%08x", | |
889 | (u32) & ugeth->p_rx_glbl_pram->rbdqptr, | |
890 | in_be32(&ugeth->p_rx_glbl_pram->rbdqptr)); | |
891 | ugeth_info("mflr : addr - 0x%08x, val - 0x%04x", | |
892 | (u32) & ugeth->p_rx_glbl_pram->mflr, | |
893 | in_be16(&ugeth->p_rx_glbl_pram->mflr)); | |
894 | ugeth_info("minflr : addr - 0x%08x, val - 0x%04x", | |
895 | (u32) & ugeth->p_rx_glbl_pram->minflr, | |
896 | in_be16(&ugeth->p_rx_glbl_pram->minflr)); | |
897 | ugeth_info("maxd1 : addr - 0x%08x, val - 0x%04x", | |
898 | (u32) & ugeth->p_rx_glbl_pram->maxd1, | |
899 | in_be16(&ugeth->p_rx_glbl_pram->maxd1)); | |
900 | ugeth_info("maxd2 : addr - 0x%08x, val - 0x%04x", | |
901 | (u32) & ugeth->p_rx_glbl_pram->maxd2, | |
902 | in_be16(&ugeth->p_rx_glbl_pram->maxd2)); | |
903 | ugeth_info("ecamptr : addr - 0x%08x, val - 0x%08x", | |
904 | (u32) & ugeth->p_rx_glbl_pram->ecamptr, | |
905 | in_be32(&ugeth->p_rx_glbl_pram->ecamptr)); | |
906 | ugeth_info("l2qt : addr - 0x%08x, val - 0x%08x", | |
907 | (u32) & ugeth->p_rx_glbl_pram->l2qt, | |
908 | in_be32(&ugeth->p_rx_glbl_pram->l2qt)); | |
909 | ugeth_info("l3qt[0] : addr - 0x%08x, val - 0x%08x", | |
910 | (u32) & ugeth->p_rx_glbl_pram->l3qt[0], | |
911 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[0])); | |
912 | ugeth_info("l3qt[1] : addr - 0x%08x, val - 0x%08x", | |
913 | (u32) & ugeth->p_rx_glbl_pram->l3qt[1], | |
914 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[1])); | |
915 | ugeth_info("l3qt[2] : addr - 0x%08x, val - 0x%08x", | |
916 | (u32) & ugeth->p_rx_glbl_pram->l3qt[2], | |
917 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[2])); | |
918 | ugeth_info("l3qt[3] : addr - 0x%08x, val - 0x%08x", | |
919 | (u32) & ugeth->p_rx_glbl_pram->l3qt[3], | |
920 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[3])); | |
921 | ugeth_info("l3qt[4] : addr - 0x%08x, val - 0x%08x", | |
922 | (u32) & ugeth->p_rx_glbl_pram->l3qt[4], | |
923 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[4])); | |
924 | ugeth_info("l3qt[5] : addr - 0x%08x, val - 0x%08x", | |
925 | (u32) & ugeth->p_rx_glbl_pram->l3qt[5], | |
926 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[5])); | |
927 | ugeth_info("l3qt[6] : addr - 0x%08x, val - 0x%08x", | |
928 | (u32) & ugeth->p_rx_glbl_pram->l3qt[6], | |
929 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[6])); | |
930 | ugeth_info("l3qt[7] : addr - 0x%08x, val - 0x%08x", | |
931 | (u32) & ugeth->p_rx_glbl_pram->l3qt[7], | |
932 | in_be32(&ugeth->p_rx_glbl_pram->l3qt[7])); | |
933 | ugeth_info("vlantype : addr - 0x%08x, val - 0x%04x", | |
934 | (u32) & ugeth->p_rx_glbl_pram->vlantype, | |
935 | in_be16(&ugeth->p_rx_glbl_pram->vlantype)); | |
936 | ugeth_info("vlantci : addr - 0x%08x, val - 0x%04x", | |
937 | (u32) & ugeth->p_rx_glbl_pram->vlantci, | |
938 | in_be16(&ugeth->p_rx_glbl_pram->vlantci)); | |
939 | for (i = 0; i < 64; i++) | |
940 | ugeth_info | |
941 | ("addressfiltering[%d]: addr - 0x%08x, val - 0x%02x", | |
942 | i, | |
943 | (u32) & ugeth->p_rx_glbl_pram->addressfiltering[i], | |
944 | ugeth->p_rx_glbl_pram->addressfiltering[i]); | |
945 | ugeth_info("exfGlobalParam : addr - 0x%08x, val - 0x%08x", | |
946 | (u32) & ugeth->p_rx_glbl_pram->exfGlobalParam, | |
947 | in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam)); | |
948 | } | |
949 | if (ugeth->p_send_q_mem_reg) { | |
950 | ugeth_info("Send Q memory registers:"); | |
951 | ugeth_info("Base address: 0x%08x", | |
952 | (u32) ugeth->p_send_q_mem_reg); | |
953 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | |
954 | ugeth_info("SQQD[%d]:", i); | |
955 | ugeth_info("Base address: 0x%08x", | |
956 | (u32) & ugeth->p_send_q_mem_reg->sqqd[i]); | |
957 | mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i], | |
18a8e864 | 958 | sizeof(struct ucc_geth_send_queue_qd)); |
ce973b14 LY |
959 | } |
960 | } | |
961 | if (ugeth->p_scheduler) { | |
962 | ugeth_info("Scheduler:"); | |
963 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_scheduler); | |
964 | mem_disp((u8 *) ugeth->p_scheduler, | |
965 | sizeof(*ugeth->p_scheduler)); | |
966 | } | |
967 | if (ugeth->p_tx_fw_statistics_pram) { | |
968 | ugeth_info("TX FW statistics pram:"); | |
969 | ugeth_info("Base address: 0x%08x", | |
970 | (u32) ugeth->p_tx_fw_statistics_pram); | |
971 | mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram, | |
972 | sizeof(*ugeth->p_tx_fw_statistics_pram)); | |
973 | } | |
974 | if (ugeth->p_rx_fw_statistics_pram) { | |
975 | ugeth_info("RX FW statistics pram:"); | |
976 | ugeth_info("Base address: 0x%08x", | |
977 | (u32) ugeth->p_rx_fw_statistics_pram); | |
978 | mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram, | |
979 | sizeof(*ugeth->p_rx_fw_statistics_pram)); | |
980 | } | |
981 | if (ugeth->p_rx_irq_coalescing_tbl) { | |
982 | ugeth_info("RX IRQ coalescing tables:"); | |
983 | ugeth_info("Base address: 0x%08x", | |
984 | (u32) ugeth->p_rx_irq_coalescing_tbl); | |
985 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
986 | ugeth_info("RX IRQ coalescing table entry[%d]:", i); | |
987 | ugeth_info("Base address: 0x%08x", | |
988 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | |
989 | coalescingentry[i]); | |
990 | ugeth_info | |
991 | ("interruptcoalescingmaxvalue: addr - 0x%08x, val - 0x%08x", | |
992 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | |
993 | coalescingentry[i].interruptcoalescingmaxvalue, | |
994 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | |
995 | coalescingentry[i]. | |
996 | interruptcoalescingmaxvalue)); | |
997 | ugeth_info | |
998 | ("interruptcoalescingcounter : addr - 0x%08x, val - 0x%08x", | |
999 | (u32) & ugeth->p_rx_irq_coalescing_tbl-> | |
1000 | coalescingentry[i].interruptcoalescingcounter, | |
1001 | in_be32(&ugeth->p_rx_irq_coalescing_tbl-> | |
1002 | coalescingentry[i]. | |
1003 | interruptcoalescingcounter)); | |
1004 | } | |
1005 | } | |
1006 | if (ugeth->p_rx_bd_qs_tbl) { | |
1007 | ugeth_info("RX BD QS tables:"); | |
1008 | ugeth_info("Base address: 0x%08x", (u32) ugeth->p_rx_bd_qs_tbl); | |
1009 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
1010 | ugeth_info("RX BD QS table[%d]:", i); | |
1011 | ugeth_info("Base address: 0x%08x", | |
1012 | (u32) & ugeth->p_rx_bd_qs_tbl[i]); | |
1013 | ugeth_info | |
1014 | ("bdbaseptr : addr - 0x%08x, val - 0x%08x", | |
1015 | (u32) & ugeth->p_rx_bd_qs_tbl[i].bdbaseptr, | |
1016 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr)); | |
1017 | ugeth_info | |
1018 | ("bdptr : addr - 0x%08x, val - 0x%08x", | |
1019 | (u32) & ugeth->p_rx_bd_qs_tbl[i].bdptr, | |
1020 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr)); | |
1021 | ugeth_info | |
1022 | ("externalbdbaseptr: addr - 0x%08x, val - 0x%08x", | |
1023 | (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
1024 | in_be32(&ugeth->p_rx_bd_qs_tbl[i]. | |
1025 | externalbdbaseptr)); | |
1026 | ugeth_info | |
1027 | ("externalbdptr : addr - 0x%08x, val - 0x%08x", | |
1028 | (u32) & ugeth->p_rx_bd_qs_tbl[i].externalbdptr, | |
1029 | in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr)); | |
1030 | ugeth_info("ucode RX Prefetched BDs:"); | |
1031 | ugeth_info("Base address: 0x%08x", | |
1032 | (u32) | |
1033 | qe_muram_addr(in_be32 | |
1034 | (&ugeth->p_rx_bd_qs_tbl[i]. | |
1035 | bdbaseptr))); | |
1036 | mem_disp((u8 *) | |
1037 | qe_muram_addr(in_be32 | |
1038 | (&ugeth->p_rx_bd_qs_tbl[i]. | |
1039 | bdbaseptr)), | |
18a8e864 | 1040 | sizeof(struct ucc_geth_rx_prefetched_bds)); |
ce973b14 LY |
1041 | } |
1042 | } | |
1043 | if (ugeth->p_init_enet_param_shadow) { | |
1044 | int size; | |
1045 | ugeth_info("Init enet param shadow:"); | |
1046 | ugeth_info("Base address: 0x%08x", | |
1047 | (u32) ugeth->p_init_enet_param_shadow); | |
1048 | mem_disp((u8 *) ugeth->p_init_enet_param_shadow, | |
1049 | sizeof(*ugeth->p_init_enet_param_shadow)); | |
1050 | ||
18a8e864 | 1051 | size = sizeof(struct ucc_geth_thread_rx_pram); |
ce973b14 LY |
1052 | if (ugeth->ug_info->rxExtendedFiltering) { |
1053 | size += | |
1054 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | |
1055 | if (ugeth->ug_info->largestexternallookupkeysize == | |
1056 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | |
1057 | size += | |
1058 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | |
1059 | if (ugeth->ug_info->largestexternallookupkeysize == | |
1060 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | |
1061 | size += | |
1062 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | |
1063 | } | |
1064 | ||
1065 | dump_init_enet_entries(ugeth, | |
1066 | &(ugeth->p_init_enet_param_shadow-> | |
1067 | txthread[0]), | |
1068 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | |
18a8e864 | 1069 | sizeof(struct ucc_geth_thread_tx_pram), |
ce973b14 LY |
1070 | ugeth->ug_info->riscTx, 0); |
1071 | dump_init_enet_entries(ugeth, | |
1072 | &(ugeth->p_init_enet_param_shadow-> | |
1073 | rxthread[0]), | |
1074 | ENET_INIT_PARAM_MAX_ENTRIES_RX, size, | |
1075 | ugeth->ug_info->riscRx, 1); | |
1076 | } | |
1077 | } | |
1078 | #endif /* DEBUG */ | |
1079 | ||
6fee40e9 AF |
1080 | static void init_default_reg_vals(u32 __iomem *upsmr_register, |
1081 | u32 __iomem *maccfg1_register, | |
1082 | u32 __iomem *maccfg2_register) | |
ce973b14 LY |
1083 | { |
1084 | out_be32(upsmr_register, UCC_GETH_UPSMR_INIT); | |
1085 | out_be32(maccfg1_register, UCC_GETH_MACCFG1_INIT); | |
1086 | out_be32(maccfg2_register, UCC_GETH_MACCFG2_INIT); | |
1087 | } | |
1088 | ||
1089 | static int init_half_duplex_params(int alt_beb, | |
1090 | int back_pressure_no_backoff, | |
1091 | int no_backoff, | |
1092 | int excess_defer, | |
1093 | u8 alt_beb_truncation, | |
1094 | u8 max_retransmissions, | |
1095 | u8 collision_window, | |
6fee40e9 | 1096 | u32 __iomem *hafdup_register) |
ce973b14 LY |
1097 | { |
1098 | u32 value = 0; | |
1099 | ||
1100 | if ((alt_beb_truncation > HALFDUP_ALT_BEB_TRUNCATION_MAX) || | |
1101 | (max_retransmissions > HALFDUP_MAX_RETRANSMISSION_MAX) || | |
1102 | (collision_window > HALFDUP_COLLISION_WINDOW_MAX)) | |
1103 | return -EINVAL; | |
1104 | ||
1105 | value = (u32) (alt_beb_truncation << HALFDUP_ALT_BEB_TRUNCATION_SHIFT); | |
1106 | ||
1107 | if (alt_beb) | |
1108 | value |= HALFDUP_ALT_BEB; | |
1109 | if (back_pressure_no_backoff) | |
1110 | value |= HALFDUP_BACK_PRESSURE_NO_BACKOFF; | |
1111 | if (no_backoff) | |
1112 | value |= HALFDUP_NO_BACKOFF; | |
1113 | if (excess_defer) | |
1114 | value |= HALFDUP_EXCESSIVE_DEFER; | |
1115 | ||
1116 | value |= (max_retransmissions << HALFDUP_MAX_RETRANSMISSION_SHIFT); | |
1117 | ||
1118 | value |= collision_window; | |
1119 | ||
1120 | out_be32(hafdup_register, value); | |
1121 | return 0; | |
1122 | } | |
1123 | ||
1124 | static int init_inter_frame_gap_params(u8 non_btb_cs_ipg, | |
1125 | u8 non_btb_ipg, | |
1126 | u8 min_ifg, | |
1127 | u8 btb_ipg, | |
6fee40e9 | 1128 | u32 __iomem *ipgifg_register) |
ce973b14 LY |
1129 | { |
1130 | u32 value = 0; | |
1131 | ||
1132 | /* Non-Back-to-back IPG part 1 should be <= Non-Back-to-back | |
1133 | IPG part 2 */ | |
1134 | if (non_btb_cs_ipg > non_btb_ipg) | |
1135 | return -EINVAL; | |
1136 | ||
1137 | if ((non_btb_cs_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART1_MAX) || | |
1138 | (non_btb_ipg > IPGIFG_NON_BACK_TO_BACK_IFG_PART2_MAX) || | |
1139 | /*(min_ifg > IPGIFG_MINIMUM_IFG_ENFORCEMENT_MAX) || */ | |
1140 | (btb_ipg > IPGIFG_BACK_TO_BACK_IFG_MAX)) | |
1141 | return -EINVAL; | |
1142 | ||
1143 | value |= | |
1144 | ((non_btb_cs_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART1_SHIFT) & | |
1145 | IPGIFG_NBTB_CS_IPG_MASK); | |
1146 | value |= | |
1147 | ((non_btb_ipg << IPGIFG_NON_BACK_TO_BACK_IFG_PART2_SHIFT) & | |
1148 | IPGIFG_NBTB_IPG_MASK); | |
1149 | value |= | |
1150 | ((min_ifg << IPGIFG_MINIMUM_IFG_ENFORCEMENT_SHIFT) & | |
1151 | IPGIFG_MIN_IFG_MASK); | |
1152 | value |= (btb_ipg & IPGIFG_BTB_IPG_MASK); | |
1153 | ||
1154 | out_be32(ipgifg_register, value); | |
1155 | return 0; | |
1156 | } | |
1157 | ||
ac421852 | 1158 | int init_flow_control_params(u32 automatic_flow_control_mode, |
ce973b14 LY |
1159 | int rx_flow_control_enable, |
1160 | int tx_flow_control_enable, | |
1161 | u16 pause_period, | |
1162 | u16 extension_field, | |
6fee40e9 AF |
1163 | u32 __iomem *upsmr_register, |
1164 | u32 __iomem *uempr_register, | |
1165 | u32 __iomem *maccfg1_register) | |
ce973b14 LY |
1166 | { |
1167 | u32 value = 0; | |
1168 | ||
1169 | /* Set UEMPR register */ | |
1170 | value = (u32) pause_period << UEMPR_PAUSE_TIME_VALUE_SHIFT; | |
1171 | value |= (u32) extension_field << UEMPR_EXTENDED_PAUSE_TIME_VALUE_SHIFT; | |
1172 | out_be32(uempr_register, value); | |
1173 | ||
1174 | /* Set UPSMR register */ | |
3bc53427 | 1175 | setbits32(upsmr_register, automatic_flow_control_mode); |
ce973b14 LY |
1176 | |
1177 | value = in_be32(maccfg1_register); | |
1178 | if (rx_flow_control_enable) | |
1179 | value |= MACCFG1_FLOW_RX; | |
1180 | if (tx_flow_control_enable) | |
1181 | value |= MACCFG1_FLOW_TX; | |
1182 | out_be32(maccfg1_register, value); | |
1183 | ||
1184 | return 0; | |
1185 | } | |
1186 | ||
1187 | static int init_hw_statistics_gathering_mode(int enable_hardware_statistics, | |
1188 | int auto_zero_hardware_statistics, | |
6fee40e9 AF |
1189 | u32 __iomem *upsmr_register, |
1190 | u16 __iomem *uescr_register) | |
ce973b14 | 1191 | { |
ce973b14 | 1192 | u16 uescr_value = 0; |
3bc53427 | 1193 | |
ce973b14 | 1194 | /* Enable hardware statistics gathering if requested */ |
3bc53427 TT |
1195 | if (enable_hardware_statistics) |
1196 | setbits32(upsmr_register, UCC_GETH_UPSMR_HSE); | |
ce973b14 LY |
1197 | |
1198 | /* Clear hardware statistics counters */ | |
1199 | uescr_value = in_be16(uescr_register); | |
1200 | uescr_value |= UESCR_CLRCNT; | |
1201 | /* Automatically zero hardware statistics counters on read, | |
1202 | if requested */ | |
1203 | if (auto_zero_hardware_statistics) | |
1204 | uescr_value |= UESCR_AUTOZ; | |
1205 | out_be16(uescr_register, uescr_value); | |
1206 | ||
1207 | return 0; | |
1208 | } | |
1209 | ||
1210 | static int init_firmware_statistics_gathering_mode(int | |
1211 | enable_tx_firmware_statistics, | |
1212 | int enable_rx_firmware_statistics, | |
6fee40e9 | 1213 | u32 __iomem *tx_rmon_base_ptr, |
ce973b14 | 1214 | u32 tx_firmware_statistics_structure_address, |
6fee40e9 | 1215 | u32 __iomem *rx_rmon_base_ptr, |
ce973b14 | 1216 | u32 rx_firmware_statistics_structure_address, |
6fee40e9 AF |
1217 | u16 __iomem *temoder_register, |
1218 | u32 __iomem *remoder_register) | |
ce973b14 LY |
1219 | { |
1220 | /* Note: this function does not check if */ | |
1221 | /* the parameters it receives are NULL */ | |
ce973b14 LY |
1222 | |
1223 | if (enable_tx_firmware_statistics) { | |
1224 | out_be32(tx_rmon_base_ptr, | |
1225 | tx_firmware_statistics_structure_address); | |
3bc53427 | 1226 | setbits16(temoder_register, TEMODER_TX_RMON_STATISTICS_ENABLE); |
ce973b14 LY |
1227 | } |
1228 | ||
1229 | if (enable_rx_firmware_statistics) { | |
1230 | out_be32(rx_rmon_base_ptr, | |
1231 | rx_firmware_statistics_structure_address); | |
3bc53427 | 1232 | setbits32(remoder_register, REMODER_RX_RMON_STATISTICS_ENABLE); |
ce973b14 LY |
1233 | } |
1234 | ||
1235 | return 0; | |
1236 | } | |
1237 | ||
1238 | static int init_mac_station_addr_regs(u8 address_byte_0, | |
1239 | u8 address_byte_1, | |
1240 | u8 address_byte_2, | |
1241 | u8 address_byte_3, | |
1242 | u8 address_byte_4, | |
1243 | u8 address_byte_5, | |
6fee40e9 AF |
1244 | u32 __iomem *macstnaddr1_register, |
1245 | u32 __iomem *macstnaddr2_register) | |
ce973b14 LY |
1246 | { |
1247 | u32 value = 0; | |
1248 | ||
1249 | /* Example: for a station address of 0x12345678ABCD, */ | |
1250 | /* 0x12 is byte 0, 0x34 is byte 1 and so on and 0xCD is byte 5 */ | |
1251 | ||
1252 | /* MACSTNADDR1 Register: */ | |
1253 | ||
1254 | /* 0 7 8 15 */ | |
1255 | /* station address byte 5 station address byte 4 */ | |
1256 | /* 16 23 24 31 */ | |
1257 | /* station address byte 3 station address byte 2 */ | |
1258 | value |= (u32) ((address_byte_2 << 0) & 0x000000FF); | |
1259 | value |= (u32) ((address_byte_3 << 8) & 0x0000FF00); | |
1260 | value |= (u32) ((address_byte_4 << 16) & 0x00FF0000); | |
1261 | value |= (u32) ((address_byte_5 << 24) & 0xFF000000); | |
1262 | ||
1263 | out_be32(macstnaddr1_register, value); | |
1264 | ||
1265 | /* MACSTNADDR2 Register: */ | |
1266 | ||
1267 | /* 0 7 8 15 */ | |
1268 | /* station address byte 1 station address byte 0 */ | |
1269 | /* 16 23 24 31 */ | |
1270 | /* reserved reserved */ | |
1271 | value = 0; | |
1272 | value |= (u32) ((address_byte_0 << 16) & 0x00FF0000); | |
1273 | value |= (u32) ((address_byte_1 << 24) & 0xFF000000); | |
1274 | ||
1275 | out_be32(macstnaddr2_register, value); | |
1276 | ||
1277 | return 0; | |
1278 | } | |
1279 | ||
ce973b14 | 1280 | static int init_check_frame_length_mode(int length_check, |
6fee40e9 | 1281 | u32 __iomem *maccfg2_register) |
ce973b14 LY |
1282 | { |
1283 | u32 value = 0; | |
1284 | ||
1285 | value = in_be32(maccfg2_register); | |
1286 | ||
1287 | if (length_check) | |
1288 | value |= MACCFG2_LC; | |
1289 | else | |
1290 | value &= ~MACCFG2_LC; | |
1291 | ||
1292 | out_be32(maccfg2_register, value); | |
1293 | return 0; | |
1294 | } | |
1295 | ||
1296 | static int init_preamble_length(u8 preamble_length, | |
6fee40e9 | 1297 | u32 __iomem *maccfg2_register) |
ce973b14 | 1298 | { |
ce973b14 LY |
1299 | if ((preamble_length < 3) || (preamble_length > 7)) |
1300 | return -EINVAL; | |
1301 | ||
3bc53427 TT |
1302 | clrsetbits_be32(maccfg2_register, MACCFG2_PREL_MASK, |
1303 | preamble_length << MACCFG2_PREL_SHIFT); | |
1304 | ||
ce973b14 LY |
1305 | return 0; |
1306 | } | |
1307 | ||
ce973b14 LY |
1308 | static int init_rx_parameters(int reject_broadcast, |
1309 | int receive_short_frames, | |
6fee40e9 | 1310 | int promiscuous, u32 __iomem *upsmr_register) |
ce973b14 LY |
1311 | { |
1312 | u32 value = 0; | |
1313 | ||
1314 | value = in_be32(upsmr_register); | |
1315 | ||
1316 | if (reject_broadcast) | |
3bc53427 | 1317 | value |= UCC_GETH_UPSMR_BRO; |
ce973b14 | 1318 | else |
3bc53427 | 1319 | value &= ~UCC_GETH_UPSMR_BRO; |
ce973b14 LY |
1320 | |
1321 | if (receive_short_frames) | |
3bc53427 | 1322 | value |= UCC_GETH_UPSMR_RSH; |
ce973b14 | 1323 | else |
3bc53427 | 1324 | value &= ~UCC_GETH_UPSMR_RSH; |
ce973b14 LY |
1325 | |
1326 | if (promiscuous) | |
3bc53427 | 1327 | value |= UCC_GETH_UPSMR_PRO; |
ce973b14 | 1328 | else |
3bc53427 | 1329 | value &= ~UCC_GETH_UPSMR_PRO; |
ce973b14 LY |
1330 | |
1331 | out_be32(upsmr_register, value); | |
1332 | ||
1333 | return 0; | |
1334 | } | |
1335 | ||
1336 | static int init_max_rx_buff_len(u16 max_rx_buf_len, | |
6fee40e9 | 1337 | u16 __iomem *mrblr_register) |
ce973b14 LY |
1338 | { |
1339 | /* max_rx_buf_len value must be a multiple of 128 */ | |
1340 | if ((max_rx_buf_len == 0) | |
1341 | || (max_rx_buf_len % UCC_GETH_MRBLR_ALIGNMENT)) | |
1342 | return -EINVAL; | |
1343 | ||
1344 | out_be16(mrblr_register, max_rx_buf_len); | |
1345 | return 0; | |
1346 | } | |
1347 | ||
1348 | static int init_min_frame_len(u16 min_frame_length, | |
6fee40e9 AF |
1349 | u16 __iomem *minflr_register, |
1350 | u16 __iomem *mrblr_register) | |
ce973b14 LY |
1351 | { |
1352 | u16 mrblr_value = 0; | |
1353 | ||
1354 | mrblr_value = in_be16(mrblr_register); | |
1355 | if (min_frame_length >= (mrblr_value - 4)) | |
1356 | return -EINVAL; | |
1357 | ||
1358 | out_be16(minflr_register, min_frame_length); | |
1359 | return 0; | |
1360 | } | |
1361 | ||
18a8e864 | 1362 | static int adjust_enet_interface(struct ucc_geth_private *ugeth) |
ce973b14 | 1363 | { |
18a8e864 | 1364 | struct ucc_geth_info *ug_info; |
6fee40e9 AF |
1365 | struct ucc_geth __iomem *ug_regs; |
1366 | struct ucc_fast __iomem *uf_regs; | |
728de4c9 KP |
1367 | int ret_val; |
1368 | u32 upsmr, maccfg2, tbiBaseAddress; | |
ce973b14 LY |
1369 | u16 value; |
1370 | ||
b39d66a8 | 1371 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 LY |
1372 | |
1373 | ug_info = ugeth->ug_info; | |
1374 | ug_regs = ugeth->ug_regs; | |
1375 | uf_regs = ugeth->uccf->uf_regs; | |
1376 | ||
ce973b14 LY |
1377 | /* Set MACCFG2 */ |
1378 | maccfg2 = in_be32(&ug_regs->maccfg2); | |
1379 | maccfg2 &= ~MACCFG2_INTERFACE_MODE_MASK; | |
728de4c9 KP |
1380 | if ((ugeth->max_speed == SPEED_10) || |
1381 | (ugeth->max_speed == SPEED_100)) | |
ce973b14 | 1382 | maccfg2 |= MACCFG2_INTERFACE_MODE_NIBBLE; |
728de4c9 | 1383 | else if (ugeth->max_speed == SPEED_1000) |
ce973b14 LY |
1384 | maccfg2 |= MACCFG2_INTERFACE_MODE_BYTE; |
1385 | maccfg2 |= ug_info->padAndCrc; | |
1386 | out_be32(&ug_regs->maccfg2, maccfg2); | |
1387 | ||
1388 | /* Set UPSMR */ | |
1389 | upsmr = in_be32(&uf_regs->upsmr); | |
3bc53427 TT |
1390 | upsmr &= ~(UCC_GETH_UPSMR_RPM | UCC_GETH_UPSMR_R10M | |
1391 | UCC_GETH_UPSMR_TBIM | UCC_GETH_UPSMR_RMM); | |
728de4c9 KP |
1392 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || |
1393 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | |
1394 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | |
bd0ceaab KP |
1395 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1396 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | |
728de4c9 | 1397 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
3bc53427 | 1398 | upsmr |= UCC_GETH_UPSMR_RPM; |
728de4c9 KP |
1399 | switch (ugeth->max_speed) { |
1400 | case SPEED_10: | |
3bc53427 | 1401 | upsmr |= UCC_GETH_UPSMR_R10M; |
728de4c9 KP |
1402 | /* FALLTHROUGH */ |
1403 | case SPEED_100: | |
1404 | if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI) | |
3bc53427 | 1405 | upsmr |= UCC_GETH_UPSMR_RMM; |
728de4c9 KP |
1406 | } |
1407 | } | |
1408 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || | |
1409 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | |
3bc53427 | 1410 | upsmr |= UCC_GETH_UPSMR_TBIM; |
728de4c9 | 1411 | } |
ce973b14 LY |
1412 | out_be32(&uf_regs->upsmr, upsmr); |
1413 | ||
ce973b14 LY |
1414 | /* Disable autonegotiation in tbi mode, because by default it |
1415 | comes up in autonegotiation mode. */ | |
1416 | /* Note that this depends on proper setting in utbipar register. */ | |
728de4c9 KP |
1417 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) || |
1418 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { | |
ce973b14 LY |
1419 | tbiBaseAddress = in_be32(&ug_regs->utbipar); |
1420 | tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK; | |
1421 | tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT; | |
728de4c9 KP |
1422 | value = ugeth->phydev->bus->read(ugeth->phydev->bus, |
1423 | (u8) tbiBaseAddress, ENET_TBI_MII_CR); | |
ce973b14 | 1424 | value &= ~0x1000; /* Turn off autonegotiation */ |
728de4c9 KP |
1425 | ugeth->phydev->bus->write(ugeth->phydev->bus, |
1426 | (u8) tbiBaseAddress, ENET_TBI_MII_CR, value); | |
ce973b14 LY |
1427 | } |
1428 | ||
1429 | init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2); | |
1430 | ||
1431 | ret_val = init_preamble_length(ug_info->prel, &ug_regs->maccfg2); | |
1432 | if (ret_val != 0) { | |
890de95e LY |
1433 | if (netif_msg_probe(ugeth)) |
1434 | ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", | |
b39d66a8 | 1435 | __func__); |
ce973b14 LY |
1436 | return ret_val; |
1437 | } | |
1438 | ||
1439 | return 0; | |
1440 | } | |
1441 | ||
1442 | /* Called every time the controller might need to be made | |
1443 | * aware of new link state. The PHY code conveys this | |
1444 | * information through variables in the ugeth structure, and this | |
1445 | * function converts those variables into the appropriate | |
1446 | * register values, and can bring down the device if needed. | |
1447 | */ | |
728de4c9 | 1448 | |
ce973b14 LY |
1449 | static void adjust_link(struct net_device *dev) |
1450 | { | |
18a8e864 | 1451 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
6fee40e9 AF |
1452 | struct ucc_geth __iomem *ug_regs; |
1453 | struct ucc_fast __iomem *uf_regs; | |
728de4c9 KP |
1454 | struct phy_device *phydev = ugeth->phydev; |
1455 | unsigned long flags; | |
1456 | int new_state = 0; | |
ce973b14 LY |
1457 | |
1458 | ug_regs = ugeth->ug_regs; | |
728de4c9 | 1459 | uf_regs = ugeth->uccf->uf_regs; |
ce973b14 | 1460 | |
728de4c9 KP |
1461 | spin_lock_irqsave(&ugeth->lock, flags); |
1462 | ||
1463 | if (phydev->link) { | |
1464 | u32 tempval = in_be32(&ug_regs->maccfg2); | |
1465 | u32 upsmr = in_be32(&uf_regs->upsmr); | |
ce973b14 LY |
1466 | /* Now we make sure that we can be in full duplex mode. |
1467 | * If not, we operate in half-duplex mode. */ | |
728de4c9 KP |
1468 | if (phydev->duplex != ugeth->oldduplex) { |
1469 | new_state = 1; | |
1470 | if (!(phydev->duplex)) | |
ce973b14 | 1471 | tempval &= ~(MACCFG2_FDX); |
728de4c9 | 1472 | else |
ce973b14 | 1473 | tempval |= MACCFG2_FDX; |
728de4c9 | 1474 | ugeth->oldduplex = phydev->duplex; |
ce973b14 LY |
1475 | } |
1476 | ||
728de4c9 KP |
1477 | if (phydev->speed != ugeth->oldspeed) { |
1478 | new_state = 1; | |
1479 | switch (phydev->speed) { | |
1480 | case SPEED_1000: | |
1481 | tempval = ((tempval & | |
1482 | ~(MACCFG2_INTERFACE_MODE_MASK)) | | |
1483 | MACCFG2_INTERFACE_MODE_BYTE); | |
a1862a53 | 1484 | break; |
728de4c9 KP |
1485 | case SPEED_100: |
1486 | case SPEED_10: | |
1487 | tempval = ((tempval & | |
1488 | ~(MACCFG2_INTERFACE_MODE_MASK)) | | |
1489 | MACCFG2_INTERFACE_MODE_NIBBLE); | |
1490 | /* if reduced mode, re-set UPSMR.R10M */ | |
1491 | if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) || | |
1492 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) || | |
1493 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) || | |
bd0ceaab KP |
1494 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) || |
1495 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) || | |
728de4c9 KP |
1496 | (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) { |
1497 | if (phydev->speed == SPEED_10) | |
3bc53427 | 1498 | upsmr |= UCC_GETH_UPSMR_R10M; |
728de4c9 | 1499 | else |
3bc53427 | 1500 | upsmr &= ~UCC_GETH_UPSMR_R10M; |
728de4c9 | 1501 | } |
ce973b14 LY |
1502 | break; |
1503 | default: | |
728de4c9 KP |
1504 | if (netif_msg_link(ugeth)) |
1505 | ugeth_warn( | |
1506 | "%s: Ack! Speed (%d) is not 10/100/1000!", | |
1507 | dev->name, phydev->speed); | |
ce973b14 LY |
1508 | break; |
1509 | } | |
728de4c9 | 1510 | ugeth->oldspeed = phydev->speed; |
ce973b14 LY |
1511 | } |
1512 | ||
728de4c9 KP |
1513 | out_be32(&ug_regs->maccfg2, tempval); |
1514 | out_be32(&uf_regs->upsmr, upsmr); | |
1515 | ||
ce973b14 | 1516 | if (!ugeth->oldlink) { |
728de4c9 | 1517 | new_state = 1; |
ce973b14 | 1518 | ugeth->oldlink = 1; |
ce973b14 | 1519 | } |
728de4c9 KP |
1520 | } else if (ugeth->oldlink) { |
1521 | new_state = 1; | |
ce973b14 LY |
1522 | ugeth->oldlink = 0; |
1523 | ugeth->oldspeed = 0; | |
1524 | ugeth->oldduplex = -1; | |
ce973b14 | 1525 | } |
728de4c9 KP |
1526 | |
1527 | if (new_state && netif_msg_link(ugeth)) | |
1528 | phy_print_status(phydev); | |
1529 | ||
1530 | spin_unlock_irqrestore(&ugeth->lock, flags); | |
ce973b14 LY |
1531 | } |
1532 | ||
1533 | /* Configure the PHY for dev. | |
1534 | * returns 0 if success. -1 if failure | |
1535 | */ | |
1536 | static int init_phy(struct net_device *dev) | |
1537 | { | |
728de4c9 | 1538 | struct ucc_geth_private *priv = netdev_priv(dev); |
b1c4a9dd HW |
1539 | struct device_node *np = priv->node; |
1540 | struct device_node *phy, *mdio; | |
1541 | const phandle *ph; | |
1542 | char bus_name[MII_BUS_ID_SIZE]; | |
1543 | const unsigned int *id; | |
728de4c9 KP |
1544 | struct phy_device *phydev; |
1545 | char phy_id[BUS_ID_SIZE]; | |
ce973b14 | 1546 | |
728de4c9 KP |
1547 | priv->oldlink = 0; |
1548 | priv->oldspeed = 0; | |
1549 | priv->oldduplex = -1; | |
ce973b14 | 1550 | |
b1c4a9dd HW |
1551 | ph = of_get_property(np, "phy-handle", NULL); |
1552 | phy = of_find_node_by_phandle(*ph); | |
1553 | mdio = of_get_parent(phy); | |
1554 | ||
1555 | id = of_get_property(phy, "reg", NULL); | |
1556 | ||
1557 | of_node_put(phy); | |
1558 | of_node_put(mdio); | |
1559 | ||
1560 | uec_mdio_bus_name(bus_name, mdio); | |
1561 | snprintf(phy_id, sizeof(phy_id), "%s:%02x", | |
1562 | bus_name, *id); | |
ce973b14 | 1563 | |
728de4c9 | 1564 | phydev = phy_connect(dev, phy_id, &adjust_link, 0, priv->phy_interface); |
ce973b14 | 1565 | |
728de4c9 KP |
1566 | if (IS_ERR(phydev)) { |
1567 | printk("%s: Could not attach to PHY\n", dev->name); | |
1568 | return PTR_ERR(phydev); | |
ce973b14 LY |
1569 | } |
1570 | ||
728de4c9 | 1571 | phydev->supported &= (ADVERTISED_10baseT_Half | |
ce973b14 LY |
1572 | ADVERTISED_10baseT_Full | |
1573 | ADVERTISED_100baseT_Half | | |
728de4c9 | 1574 | ADVERTISED_100baseT_Full); |
ce973b14 | 1575 | |
728de4c9 KP |
1576 | if (priv->max_speed == SPEED_1000) |
1577 | phydev->supported |= ADVERTISED_1000baseT_Full; | |
ce973b14 | 1578 | |
728de4c9 | 1579 | phydev->advertising = phydev->supported; |
68dc44af | 1580 | |
728de4c9 | 1581 | priv->phydev = phydev; |
ce973b14 LY |
1582 | |
1583 | return 0; | |
ce973b14 LY |
1584 | } |
1585 | ||
728de4c9 | 1586 | |
ce973b14 | 1587 | |
18a8e864 | 1588 | static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth) |
ce973b14 | 1589 | { |
18a8e864 | 1590 | struct ucc_fast_private *uccf; |
ce973b14 LY |
1591 | u32 cecr_subblock; |
1592 | u32 temp; | |
b3431c64 | 1593 | int i = 10; |
ce973b14 LY |
1594 | |
1595 | uccf = ugeth->uccf; | |
1596 | ||
1597 | /* Mask GRACEFUL STOP TX interrupt bit and clear it */ | |
3bc53427 TT |
1598 | clrbits32(uccf->p_uccm, UCC_GETH_UCCE_GRA); |
1599 | out_be32(uccf->p_ucce, UCC_GETH_UCCE_GRA); /* clear by writing 1 */ | |
ce973b14 LY |
1600 | |
1601 | /* Issue host command */ | |
1602 | cecr_subblock = | |
1603 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
1604 | qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock, | |
18a8e864 | 1605 | QE_CR_PROTOCOL_ETHERNET, 0); |
ce973b14 LY |
1606 | |
1607 | /* Wait for command to complete */ | |
1608 | do { | |
b3431c64 | 1609 | msleep(10); |
ce973b14 | 1610 | temp = in_be32(uccf->p_ucce); |
3bc53427 | 1611 | } while (!(temp & UCC_GETH_UCCE_GRA) && --i); |
ce973b14 LY |
1612 | |
1613 | uccf->stopped_tx = 1; | |
1614 | ||
1615 | return 0; | |
1616 | } | |
1617 | ||
18a8e864 | 1618 | static int ugeth_graceful_stop_rx(struct ucc_geth_private * ugeth) |
ce973b14 | 1619 | { |
18a8e864 | 1620 | struct ucc_fast_private *uccf; |
ce973b14 LY |
1621 | u32 cecr_subblock; |
1622 | u8 temp; | |
b3431c64 | 1623 | int i = 10; |
ce973b14 LY |
1624 | |
1625 | uccf = ugeth->uccf; | |
1626 | ||
1627 | /* Clear acknowledge bit */ | |
6fee40e9 | 1628 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); |
ce973b14 | 1629 | temp &= ~GRACEFUL_STOP_ACKNOWLEDGE_RX; |
6fee40e9 | 1630 | out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp); |
ce973b14 LY |
1631 | |
1632 | /* Keep issuing command and checking acknowledge bit until | |
1633 | it is asserted, according to spec */ | |
1634 | do { | |
1635 | /* Issue host command */ | |
1636 | cecr_subblock = | |
1637 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info. | |
1638 | ucc_num); | |
1639 | qe_issue_cmd(QE_GRACEFUL_STOP_RX, cecr_subblock, | |
18a8e864 | 1640 | QE_CR_PROTOCOL_ETHERNET, 0); |
b3431c64 | 1641 | msleep(10); |
6fee40e9 | 1642 | temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack); |
b3431c64 | 1643 | } while (!(temp & GRACEFUL_STOP_ACKNOWLEDGE_RX) && --i); |
ce973b14 LY |
1644 | |
1645 | uccf->stopped_rx = 1; | |
1646 | ||
1647 | return 0; | |
1648 | } | |
1649 | ||
18a8e864 | 1650 | static int ugeth_restart_tx(struct ucc_geth_private *ugeth) |
ce973b14 | 1651 | { |
18a8e864 | 1652 | struct ucc_fast_private *uccf; |
ce973b14 LY |
1653 | u32 cecr_subblock; |
1654 | ||
1655 | uccf = ugeth->uccf; | |
1656 | ||
1657 | cecr_subblock = | |
1658 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
18a8e864 | 1659 | qe_issue_cmd(QE_RESTART_TX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, 0); |
ce973b14 LY |
1660 | uccf->stopped_tx = 0; |
1661 | ||
1662 | return 0; | |
1663 | } | |
1664 | ||
18a8e864 | 1665 | static int ugeth_restart_rx(struct ucc_geth_private *ugeth) |
ce973b14 | 1666 | { |
18a8e864 | 1667 | struct ucc_fast_private *uccf; |
ce973b14 LY |
1668 | u32 cecr_subblock; |
1669 | ||
1670 | uccf = ugeth->uccf; | |
1671 | ||
1672 | cecr_subblock = | |
1673 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
18a8e864 | 1674 | qe_issue_cmd(QE_RESTART_RX, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, |
ce973b14 LY |
1675 | 0); |
1676 | uccf->stopped_rx = 0; | |
1677 | ||
1678 | return 0; | |
1679 | } | |
1680 | ||
18a8e864 | 1681 | static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode) |
ce973b14 | 1682 | { |
18a8e864 | 1683 | struct ucc_fast_private *uccf; |
ce973b14 LY |
1684 | int enabled_tx, enabled_rx; |
1685 | ||
1686 | uccf = ugeth->uccf; | |
1687 | ||
1688 | /* check if the UCC number is in range. */ | |
1689 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | |
890de95e | 1690 | if (netif_msg_probe(ugeth)) |
b39d66a8 | 1691 | ugeth_err("%s: ucc_num out of range.", __func__); |
ce973b14 LY |
1692 | return -EINVAL; |
1693 | } | |
1694 | ||
1695 | enabled_tx = uccf->enabled_tx; | |
1696 | enabled_rx = uccf->enabled_rx; | |
1697 | ||
1698 | /* Get Tx and Rx going again, in case this channel was actively | |
1699 | disabled. */ | |
1700 | if ((mode & COMM_DIR_TX) && (!enabled_tx) && uccf->stopped_tx) | |
1701 | ugeth_restart_tx(ugeth); | |
1702 | if ((mode & COMM_DIR_RX) && (!enabled_rx) && uccf->stopped_rx) | |
1703 | ugeth_restart_rx(ugeth); | |
1704 | ||
1705 | ucc_fast_enable(uccf, mode); /* OK to do even if not disabled */ | |
1706 | ||
1707 | return 0; | |
1708 | ||
1709 | } | |
1710 | ||
18a8e864 | 1711 | static int ugeth_disable(struct ucc_geth_private * ugeth, enum comm_dir mode) |
ce973b14 | 1712 | { |
18a8e864 | 1713 | struct ucc_fast_private *uccf; |
ce973b14 LY |
1714 | |
1715 | uccf = ugeth->uccf; | |
1716 | ||
1717 | /* check if the UCC number is in range. */ | |
1718 | if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { | |
890de95e | 1719 | if (netif_msg_probe(ugeth)) |
b39d66a8 | 1720 | ugeth_err("%s: ucc_num out of range.", __func__); |
ce973b14 LY |
1721 | return -EINVAL; |
1722 | } | |
1723 | ||
1724 | /* Stop any transmissions */ | |
1725 | if ((mode & COMM_DIR_TX) && uccf->enabled_tx && !uccf->stopped_tx) | |
1726 | ugeth_graceful_stop_tx(ugeth); | |
1727 | ||
1728 | /* Stop any receptions */ | |
1729 | if ((mode & COMM_DIR_RX) && uccf->enabled_rx && !uccf->stopped_rx) | |
1730 | ugeth_graceful_stop_rx(ugeth); | |
1731 | ||
1732 | ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */ | |
1733 | ||
1734 | return 0; | |
1735 | } | |
1736 | ||
18a8e864 | 1737 | static void ugeth_dump_regs(struct ucc_geth_private *ugeth) |
ce973b14 LY |
1738 | { |
1739 | #ifdef DEBUG | |
1740 | ucc_fast_dump_regs(ugeth->uccf); | |
1741 | dump_regs(ugeth); | |
1742 | dump_bds(ugeth); | |
1743 | #endif | |
1744 | } | |
1745 | ||
18a8e864 | 1746 | static int ugeth_82xx_filtering_clear_all_addr_in_hash(struct ucc_geth_private * |
ce973b14 | 1747 | ugeth, |
18a8e864 | 1748 | enum enet_addr_type |
ce973b14 LY |
1749 | enet_addr_type) |
1750 | { | |
6fee40e9 | 1751 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
18a8e864 LY |
1752 | struct ucc_fast_private *uccf; |
1753 | enum comm_dir comm_dir; | |
ce973b14 LY |
1754 | struct list_head *p_lh; |
1755 | u16 i, num; | |
6fee40e9 AF |
1756 | u32 __iomem *addr_h; |
1757 | u32 __iomem *addr_l; | |
ce973b14 LY |
1758 | u8 *p_counter; |
1759 | ||
1760 | uccf = ugeth->uccf; | |
1761 | ||
1762 | p_82xx_addr_filt = | |
6fee40e9 AF |
1763 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) |
1764 | ugeth->p_rx_glbl_pram->addressfiltering; | |
ce973b14 LY |
1765 | |
1766 | if (enet_addr_type == ENET_ADDR_TYPE_GROUP) { | |
1767 | addr_h = &(p_82xx_addr_filt->gaddr_h); | |
1768 | addr_l = &(p_82xx_addr_filt->gaddr_l); | |
1769 | p_lh = &ugeth->group_hash_q; | |
1770 | p_counter = &(ugeth->numGroupAddrInHash); | |
1771 | } else if (enet_addr_type == ENET_ADDR_TYPE_INDIVIDUAL) { | |
1772 | addr_h = &(p_82xx_addr_filt->iaddr_h); | |
1773 | addr_l = &(p_82xx_addr_filt->iaddr_l); | |
1774 | p_lh = &ugeth->ind_hash_q; | |
1775 | p_counter = &(ugeth->numIndAddrInHash); | |
1776 | } else | |
1777 | return -EINVAL; | |
1778 | ||
1779 | comm_dir = 0; | |
1780 | if (uccf->enabled_tx) | |
1781 | comm_dir |= COMM_DIR_TX; | |
1782 | if (uccf->enabled_rx) | |
1783 | comm_dir |= COMM_DIR_RX; | |
1784 | if (comm_dir) | |
1785 | ugeth_disable(ugeth, comm_dir); | |
1786 | ||
1787 | /* Clear the hash table. */ | |
1788 | out_be32(addr_h, 0x00000000); | |
1789 | out_be32(addr_l, 0x00000000); | |
1790 | ||
1791 | if (!p_lh) | |
1792 | return 0; | |
1793 | ||
1794 | num = *p_counter; | |
1795 | ||
1796 | /* Delete all remaining CQ elements */ | |
1797 | for (i = 0; i < num; i++) | |
1798 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY(dequeue(p_lh))); | |
1799 | ||
1800 | *p_counter = 0; | |
1801 | ||
1802 | if (comm_dir) | |
1803 | ugeth_enable(ugeth, comm_dir); | |
1804 | ||
1805 | return 0; | |
1806 | } | |
1807 | ||
18a8e864 | 1808 | static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth, |
ce973b14 LY |
1809 | u8 paddr_num) |
1810 | { | |
1811 | ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */ | |
1812 | return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */ | |
1813 | } | |
1814 | ||
18a8e864 | 1815 | static void ucc_geth_memclean(struct ucc_geth_private *ugeth) |
ce973b14 LY |
1816 | { |
1817 | u16 i, j; | |
6fee40e9 | 1818 | u8 __iomem *bd; |
ce973b14 LY |
1819 | |
1820 | if (!ugeth) | |
1821 | return; | |
1822 | ||
80a9fad8 | 1823 | if (ugeth->uccf) { |
ce973b14 | 1824 | ucc_fast_free(ugeth->uccf); |
80a9fad8 AV |
1825 | ugeth->uccf = NULL; |
1826 | } | |
ce973b14 LY |
1827 | |
1828 | if (ugeth->p_thread_data_tx) { | |
1829 | qe_muram_free(ugeth->thread_dat_tx_offset); | |
1830 | ugeth->p_thread_data_tx = NULL; | |
1831 | } | |
1832 | if (ugeth->p_thread_data_rx) { | |
1833 | qe_muram_free(ugeth->thread_dat_rx_offset); | |
1834 | ugeth->p_thread_data_rx = NULL; | |
1835 | } | |
1836 | if (ugeth->p_exf_glbl_param) { | |
1837 | qe_muram_free(ugeth->exf_glbl_param_offset); | |
1838 | ugeth->p_exf_glbl_param = NULL; | |
1839 | } | |
1840 | if (ugeth->p_rx_glbl_pram) { | |
1841 | qe_muram_free(ugeth->rx_glbl_pram_offset); | |
1842 | ugeth->p_rx_glbl_pram = NULL; | |
1843 | } | |
1844 | if (ugeth->p_tx_glbl_pram) { | |
1845 | qe_muram_free(ugeth->tx_glbl_pram_offset); | |
1846 | ugeth->p_tx_glbl_pram = NULL; | |
1847 | } | |
1848 | if (ugeth->p_send_q_mem_reg) { | |
1849 | qe_muram_free(ugeth->send_q_mem_reg_offset); | |
1850 | ugeth->p_send_q_mem_reg = NULL; | |
1851 | } | |
1852 | if (ugeth->p_scheduler) { | |
1853 | qe_muram_free(ugeth->scheduler_offset); | |
1854 | ugeth->p_scheduler = NULL; | |
1855 | } | |
1856 | if (ugeth->p_tx_fw_statistics_pram) { | |
1857 | qe_muram_free(ugeth->tx_fw_statistics_pram_offset); | |
1858 | ugeth->p_tx_fw_statistics_pram = NULL; | |
1859 | } | |
1860 | if (ugeth->p_rx_fw_statistics_pram) { | |
1861 | qe_muram_free(ugeth->rx_fw_statistics_pram_offset); | |
1862 | ugeth->p_rx_fw_statistics_pram = NULL; | |
1863 | } | |
1864 | if (ugeth->p_rx_irq_coalescing_tbl) { | |
1865 | qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset); | |
1866 | ugeth->p_rx_irq_coalescing_tbl = NULL; | |
1867 | } | |
1868 | if (ugeth->p_rx_bd_qs_tbl) { | |
1869 | qe_muram_free(ugeth->rx_bd_qs_tbl_offset); | |
1870 | ugeth->p_rx_bd_qs_tbl = NULL; | |
1871 | } | |
1872 | if (ugeth->p_init_enet_param_shadow) { | |
1873 | return_init_enet_entries(ugeth, | |
1874 | &(ugeth->p_init_enet_param_shadow-> | |
1875 | rxthread[0]), | |
1876 | ENET_INIT_PARAM_MAX_ENTRIES_RX, | |
1877 | ugeth->ug_info->riscRx, 1); | |
1878 | return_init_enet_entries(ugeth, | |
1879 | &(ugeth->p_init_enet_param_shadow-> | |
1880 | txthread[0]), | |
1881 | ENET_INIT_PARAM_MAX_ENTRIES_TX, | |
1882 | ugeth->ug_info->riscTx, 0); | |
1883 | kfree(ugeth->p_init_enet_param_shadow); | |
1884 | ugeth->p_init_enet_param_shadow = NULL; | |
1885 | } | |
1886 | for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) { | |
1887 | bd = ugeth->p_tx_bd_ring[i]; | |
3a8205ea NIP |
1888 | if (!bd) |
1889 | continue; | |
ce973b14 LY |
1890 | for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { |
1891 | if (ugeth->tx_skbuff[i][j]) { | |
7f80202b | 1892 | dma_unmap_single(&ugeth->dev->dev, |
6fee40e9 AF |
1893 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
1894 | (in_be32((u32 __iomem *)bd) & | |
ce973b14 LY |
1895 | BD_LENGTH_MASK), |
1896 | DMA_TO_DEVICE); | |
1897 | dev_kfree_skb_any(ugeth->tx_skbuff[i][j]); | |
1898 | ugeth->tx_skbuff[i][j] = NULL; | |
1899 | } | |
1900 | } | |
1901 | ||
1902 | kfree(ugeth->tx_skbuff[i]); | |
1903 | ||
1904 | if (ugeth->p_tx_bd_ring[i]) { | |
1905 | if (ugeth->ug_info->uf_info.bd_mem_part == | |
1906 | MEM_PART_SYSTEM) | |
1907 | kfree((void *)ugeth->tx_bd_ring_offset[i]); | |
1908 | else if (ugeth->ug_info->uf_info.bd_mem_part == | |
1909 | MEM_PART_MURAM) | |
1910 | qe_muram_free(ugeth->tx_bd_ring_offset[i]); | |
1911 | ugeth->p_tx_bd_ring[i] = NULL; | |
1912 | } | |
1913 | } | |
1914 | for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) { | |
1915 | if (ugeth->p_rx_bd_ring[i]) { | |
1916 | /* Return existing data buffers in ring */ | |
1917 | bd = ugeth->p_rx_bd_ring[i]; | |
1918 | for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { | |
1919 | if (ugeth->rx_skbuff[i][j]) { | |
7f80202b | 1920 | dma_unmap_single(&ugeth->dev->dev, |
6fee40e9 | 1921 | in_be32(&((struct qe_bd __iomem *)bd)->buf), |
18a8e864 LY |
1922 | ugeth->ug_info-> |
1923 | uf_info.max_rx_buf_length + | |
1924 | UCC_GETH_RX_DATA_BUF_ALIGNMENT, | |
1925 | DMA_FROM_DEVICE); | |
1926 | dev_kfree_skb_any( | |
1927 | ugeth->rx_skbuff[i][j]); | |
ce973b14 LY |
1928 | ugeth->rx_skbuff[i][j] = NULL; |
1929 | } | |
18a8e864 | 1930 | bd += sizeof(struct qe_bd); |
ce973b14 LY |
1931 | } |
1932 | ||
1933 | kfree(ugeth->rx_skbuff[i]); | |
1934 | ||
1935 | if (ugeth->ug_info->uf_info.bd_mem_part == | |
1936 | MEM_PART_SYSTEM) | |
1937 | kfree((void *)ugeth->rx_bd_ring_offset[i]); | |
1938 | else if (ugeth->ug_info->uf_info.bd_mem_part == | |
1939 | MEM_PART_MURAM) | |
1940 | qe_muram_free(ugeth->rx_bd_ring_offset[i]); | |
1941 | ugeth->p_rx_bd_ring[i] = NULL; | |
1942 | } | |
1943 | } | |
1944 | while (!list_empty(&ugeth->group_hash_q)) | |
1945 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | |
1946 | (dequeue(&ugeth->group_hash_q))); | |
1947 | while (!list_empty(&ugeth->ind_hash_q)) | |
1948 | put_enet_addr_container(ENET_ADDR_CONT_ENTRY | |
1949 | (dequeue(&ugeth->ind_hash_q))); | |
3e73fc9a AV |
1950 | if (ugeth->ug_regs) { |
1951 | iounmap(ugeth->ug_regs); | |
1952 | ugeth->ug_regs = NULL; | |
1953 | } | |
ce973b14 LY |
1954 | } |
1955 | ||
1956 | static void ucc_geth_set_multi(struct net_device *dev) | |
1957 | { | |
18a8e864 | 1958 | struct ucc_geth_private *ugeth; |
ce973b14 | 1959 | struct dev_mc_list *dmi; |
6fee40e9 AF |
1960 | struct ucc_fast __iomem *uf_regs; |
1961 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; | |
9030b3dd | 1962 | int i; |
ce973b14 LY |
1963 | |
1964 | ugeth = netdev_priv(dev); | |
1965 | ||
1966 | uf_regs = ugeth->uccf->uf_regs; | |
1967 | ||
1968 | if (dev->flags & IFF_PROMISC) { | |
3bc53427 | 1969 | setbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); |
ce973b14 | 1970 | } else { |
3bc53427 | 1971 | clrbits32(&uf_regs->upsmr, UCC_GETH_UPSMR_PRO); |
ce973b14 LY |
1972 | |
1973 | p_82xx_addr_filt = | |
6fee40e9 | 1974 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
ce973b14 LY |
1975 | p_rx_glbl_pram->addressfiltering; |
1976 | ||
1977 | if (dev->flags & IFF_ALLMULTI) { | |
1978 | /* Catch all multicast addresses, so set the | |
1979 | * filter to all 1's. | |
1980 | */ | |
1981 | out_be32(&p_82xx_addr_filt->gaddr_h, 0xffffffff); | |
1982 | out_be32(&p_82xx_addr_filt->gaddr_l, 0xffffffff); | |
1983 | } else { | |
1984 | /* Clear filter and add the addresses in the list. | |
1985 | */ | |
1986 | out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); | |
1987 | out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); | |
1988 | ||
1989 | dmi = dev->mc_list; | |
1990 | ||
1991 | for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) { | |
1992 | ||
1993 | /* Only support group multicast for now. | |
1994 | */ | |
1995 | if (!(dmi->dmi_addr[0] & 1)) | |
1996 | continue; | |
1997 | ||
ce973b14 LY |
1998 | /* Ask CPM to run CRC and set bit in |
1999 | * filter mask. | |
2000 | */ | |
9030b3dd | 2001 | hw_add_addr_in_hash(ugeth, dmi->dmi_addr); |
ce973b14 LY |
2002 | } |
2003 | } | |
2004 | } | |
2005 | } | |
2006 | ||
18a8e864 | 2007 | static void ucc_geth_stop(struct ucc_geth_private *ugeth) |
ce973b14 | 2008 | { |
6fee40e9 | 2009 | struct ucc_geth __iomem *ug_regs = ugeth->ug_regs; |
728de4c9 | 2010 | struct phy_device *phydev = ugeth->phydev; |
ce973b14 | 2011 | |
b39d66a8 | 2012 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 LY |
2013 | |
2014 | /* Disable the controller */ | |
2015 | ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); | |
2016 | ||
2017 | /* Tell the kernel the link is down */ | |
728de4c9 | 2018 | phy_stop(phydev); |
ce973b14 LY |
2019 | |
2020 | /* Mask all interrupts */ | |
c6f5047b | 2021 | out_be32(ugeth->uccf->p_uccm, 0x00000000); |
ce973b14 LY |
2022 | |
2023 | /* Clear all interrupts */ | |
2024 | out_be32(ugeth->uccf->p_ucce, 0xffffffff); | |
2025 | ||
2026 | /* Disable Rx and Tx */ | |
3bc53427 | 2027 | clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
ce973b14 | 2028 | |
ce973b14 LY |
2029 | ucc_geth_memclean(ugeth); |
2030 | } | |
2031 | ||
728de4c9 | 2032 | static int ucc_struct_init(struct ucc_geth_private *ugeth) |
ce973b14 | 2033 | { |
18a8e864 LY |
2034 | struct ucc_geth_info *ug_info; |
2035 | struct ucc_fast_info *uf_info; | |
728de4c9 | 2036 | int i; |
ce973b14 LY |
2037 | |
2038 | ug_info = ugeth->ug_info; | |
2039 | uf_info = &ug_info->uf_info; | |
2040 | ||
2041 | if (!((uf_info->bd_mem_part == MEM_PART_SYSTEM) || | |
2042 | (uf_info->bd_mem_part == MEM_PART_MURAM))) { | |
890de95e LY |
2043 | if (netif_msg_probe(ugeth)) |
2044 | ugeth_err("%s: Bad memory partition value.", | |
b39d66a8 | 2045 | __func__); |
ce973b14 LY |
2046 | return -EINVAL; |
2047 | } | |
2048 | ||
2049 | /* Rx BD lengths */ | |
2050 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2051 | if ((ug_info->bdRingLenRx[i] < UCC_GETH_RX_BD_RING_SIZE_MIN) || | |
2052 | (ug_info->bdRingLenRx[i] % | |
2053 | UCC_GETH_RX_BD_RING_SIZE_ALIGNMENT)) { | |
890de95e LY |
2054 | if (netif_msg_probe(ugeth)) |
2055 | ugeth_err | |
2056 | ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", | |
b39d66a8 | 2057 | __func__); |
ce973b14 LY |
2058 | return -EINVAL; |
2059 | } | |
2060 | } | |
2061 | ||
2062 | /* Tx BD lengths */ | |
2063 | for (i = 0; i < ug_info->numQueuesTx; i++) { | |
2064 | if (ug_info->bdRingLenTx[i] < UCC_GETH_TX_BD_RING_SIZE_MIN) { | |
890de95e LY |
2065 | if (netif_msg_probe(ugeth)) |
2066 | ugeth_err | |
2067 | ("%s: Tx BD ring length must be no smaller than 2.", | |
b39d66a8 | 2068 | __func__); |
ce973b14 LY |
2069 | return -EINVAL; |
2070 | } | |
2071 | } | |
2072 | ||
2073 | /* mrblr */ | |
2074 | if ((uf_info->max_rx_buf_length == 0) || | |
2075 | (uf_info->max_rx_buf_length % UCC_GETH_MRBLR_ALIGNMENT)) { | |
890de95e LY |
2076 | if (netif_msg_probe(ugeth)) |
2077 | ugeth_err | |
2078 | ("%s: max_rx_buf_length must be non-zero multiple of 128.", | |
b39d66a8 | 2079 | __func__); |
ce973b14 LY |
2080 | return -EINVAL; |
2081 | } | |
2082 | ||
2083 | /* num Tx queues */ | |
2084 | if (ug_info->numQueuesTx > NUM_TX_QUEUES) { | |
890de95e | 2085 | if (netif_msg_probe(ugeth)) |
b39d66a8 | 2086 | ugeth_err("%s: number of tx queues too large.", __func__); |
ce973b14 LY |
2087 | return -EINVAL; |
2088 | } | |
2089 | ||
2090 | /* num Rx queues */ | |
2091 | if (ug_info->numQueuesRx > NUM_RX_QUEUES) { | |
890de95e | 2092 | if (netif_msg_probe(ugeth)) |
b39d66a8 | 2093 | ugeth_err("%s: number of rx queues too large.", __func__); |
ce973b14 LY |
2094 | return -EINVAL; |
2095 | } | |
2096 | ||
2097 | /* l2qt */ | |
2098 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) { | |
2099 | if (ug_info->l2qt[i] >= ug_info->numQueuesRx) { | |
890de95e LY |
2100 | if (netif_msg_probe(ugeth)) |
2101 | ugeth_err | |
2102 | ("%s: VLAN priority table entry must not be" | |
2103 | " larger than number of Rx queues.", | |
b39d66a8 | 2104 | __func__); |
ce973b14 LY |
2105 | return -EINVAL; |
2106 | } | |
2107 | } | |
2108 | ||
2109 | /* l3qt */ | |
2110 | for (i = 0; i < UCC_GETH_IP_PRIORITY_MAX; i++) { | |
2111 | if (ug_info->l3qt[i] >= ug_info->numQueuesRx) { | |
890de95e LY |
2112 | if (netif_msg_probe(ugeth)) |
2113 | ugeth_err | |
2114 | ("%s: IP priority table entry must not be" | |
2115 | " larger than number of Rx queues.", | |
b39d66a8 | 2116 | __func__); |
ce973b14 LY |
2117 | return -EINVAL; |
2118 | } | |
2119 | } | |
2120 | ||
2121 | if (ug_info->cam && !ug_info->ecamptr) { | |
890de95e LY |
2122 | if (netif_msg_probe(ugeth)) |
2123 | ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", | |
b39d66a8 | 2124 | __func__); |
ce973b14 LY |
2125 | return -EINVAL; |
2126 | } | |
2127 | ||
2128 | if ((ug_info->numStationAddresses != | |
2129 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1) | |
2130 | && ug_info->rxExtendedFiltering) { | |
890de95e LY |
2131 | if (netif_msg_probe(ugeth)) |
2132 | ugeth_err("%s: Number of station addresses greater than 1 " | |
2133 | "not allowed in extended parsing mode.", | |
b39d66a8 | 2134 | __func__); |
ce973b14 LY |
2135 | return -EINVAL; |
2136 | } | |
2137 | ||
2138 | /* Generate uccm_mask for receive */ | |
2139 | uf_info->uccm_mask = ug_info->eventRegMask & UCCE_OTHER;/* Errors */ | |
2140 | for (i = 0; i < ug_info->numQueuesRx; i++) | |
3bc53427 | 2141 | uf_info->uccm_mask |= (UCC_GETH_UCCE_RXF0 << i); |
ce973b14 LY |
2142 | |
2143 | for (i = 0; i < ug_info->numQueuesTx; i++) | |
3bc53427 | 2144 | uf_info->uccm_mask |= (UCC_GETH_UCCE_TXB0 << i); |
ce973b14 | 2145 | /* Initialize the general fast UCC block. */ |
728de4c9 | 2146 | if (ucc_fast_init(uf_info, &ugeth->uccf)) { |
890de95e | 2147 | if (netif_msg_probe(ugeth)) |
b39d66a8 | 2148 | ugeth_err("%s: Failed to init uccf.", __func__); |
ce973b14 LY |
2149 | return -ENOMEM; |
2150 | } | |
728de4c9 | 2151 | |
3e73fc9a AV |
2152 | ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs)); |
2153 | if (!ugeth->ug_regs) { | |
2154 | if (netif_msg_probe(ugeth)) | |
2155 | ugeth_err("%s: Failed to ioremap regs.", __func__); | |
2156 | return -ENOMEM; | |
2157 | } | |
728de4c9 KP |
2158 | |
2159 | return 0; | |
2160 | } | |
2161 | ||
2162 | static int ucc_geth_startup(struct ucc_geth_private *ugeth) | |
2163 | { | |
6fee40e9 AF |
2164 | struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; |
2165 | struct ucc_geth_init_pram __iomem *p_init_enet_pram; | |
728de4c9 KP |
2166 | struct ucc_fast_private *uccf; |
2167 | struct ucc_geth_info *ug_info; | |
2168 | struct ucc_fast_info *uf_info; | |
6fee40e9 AF |
2169 | struct ucc_fast __iomem *uf_regs; |
2170 | struct ucc_geth __iomem *ug_regs; | |
728de4c9 KP |
2171 | int ret_val = -EINVAL; |
2172 | u32 remoder = UCC_GETH_REMODER_INIT; | |
3bc53427 | 2173 | u32 init_enet_pram_offset, cecr_subblock, command; |
728de4c9 KP |
2174 | u32 ifstat, i, j, size, l2qt, l3qt, length; |
2175 | u16 temoder = UCC_GETH_TEMODER_INIT; | |
2176 | u16 test; | |
2177 | u8 function_code = 0; | |
6fee40e9 AF |
2178 | u8 __iomem *bd; |
2179 | u8 __iomem *endOfRing; | |
728de4c9 KP |
2180 | u8 numThreadsRxNumerical, numThreadsTxNumerical; |
2181 | ||
b39d66a8 | 2182 | ugeth_vdbg("%s: IN", __func__); |
728de4c9 KP |
2183 | uccf = ugeth->uccf; |
2184 | ug_info = ugeth->ug_info; | |
2185 | uf_info = &ug_info->uf_info; | |
2186 | uf_regs = uccf->uf_regs; | |
2187 | ug_regs = ugeth->ug_regs; | |
ce973b14 LY |
2188 | |
2189 | switch (ug_info->numThreadsRx) { | |
2190 | case UCC_GETH_NUM_OF_THREADS_1: | |
2191 | numThreadsRxNumerical = 1; | |
2192 | break; | |
2193 | case UCC_GETH_NUM_OF_THREADS_2: | |
2194 | numThreadsRxNumerical = 2; | |
2195 | break; | |
2196 | case UCC_GETH_NUM_OF_THREADS_4: | |
2197 | numThreadsRxNumerical = 4; | |
2198 | break; | |
2199 | case UCC_GETH_NUM_OF_THREADS_6: | |
2200 | numThreadsRxNumerical = 6; | |
2201 | break; | |
2202 | case UCC_GETH_NUM_OF_THREADS_8: | |
2203 | numThreadsRxNumerical = 8; | |
2204 | break; | |
2205 | default: | |
890de95e LY |
2206 | if (netif_msg_ifup(ugeth)) |
2207 | ugeth_err("%s: Bad number of Rx threads value.", | |
b39d66a8 | 2208 | __func__); |
ce973b14 LY |
2209 | return -EINVAL; |
2210 | break; | |
2211 | } | |
2212 | ||
2213 | switch (ug_info->numThreadsTx) { | |
2214 | case UCC_GETH_NUM_OF_THREADS_1: | |
2215 | numThreadsTxNumerical = 1; | |
2216 | break; | |
2217 | case UCC_GETH_NUM_OF_THREADS_2: | |
2218 | numThreadsTxNumerical = 2; | |
2219 | break; | |
2220 | case UCC_GETH_NUM_OF_THREADS_4: | |
2221 | numThreadsTxNumerical = 4; | |
2222 | break; | |
2223 | case UCC_GETH_NUM_OF_THREADS_6: | |
2224 | numThreadsTxNumerical = 6; | |
2225 | break; | |
2226 | case UCC_GETH_NUM_OF_THREADS_8: | |
2227 | numThreadsTxNumerical = 8; | |
2228 | break; | |
2229 | default: | |
890de95e LY |
2230 | if (netif_msg_ifup(ugeth)) |
2231 | ugeth_err("%s: Bad number of Tx threads value.", | |
b39d66a8 | 2232 | __func__); |
ce973b14 LY |
2233 | return -EINVAL; |
2234 | break; | |
2235 | } | |
2236 | ||
2237 | /* Calculate rx_extended_features */ | |
2238 | ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck || | |
2239 | ug_info->ipAddressAlignment || | |
2240 | (ug_info->numStationAddresses != | |
2241 | UCC_GETH_NUM_OF_STATION_ADDRESSES_1); | |
2242 | ||
2243 | ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features || | |
2244 | (ug_info->vlanOperationTagged != UCC_GETH_VLAN_OPERATION_TAGGED_NOP) | |
2245 | || (ug_info->vlanOperationNonTagged != | |
2246 | UCC_GETH_VLAN_OPERATION_NON_TAGGED_NOP); | |
2247 | ||
ce973b14 LY |
2248 | init_default_reg_vals(&uf_regs->upsmr, |
2249 | &ug_regs->maccfg1, &ug_regs->maccfg2); | |
2250 | ||
2251 | /* Set UPSMR */ | |
2252 | /* For more details see the hardware spec. */ | |
2253 | init_rx_parameters(ug_info->bro, | |
2254 | ug_info->rsh, ug_info->pro, &uf_regs->upsmr); | |
2255 | ||
2256 | /* We're going to ignore other registers for now, */ | |
2257 | /* except as needed to get up and running */ | |
2258 | ||
2259 | /* Set MACCFG1 */ | |
2260 | /* For more details see the hardware spec. */ | |
2261 | init_flow_control_params(ug_info->aufc, | |
2262 | ug_info->receiveFlowControl, | |
ac421852 | 2263 | ug_info->transmitFlowControl, |
ce973b14 LY |
2264 | ug_info->pausePeriod, |
2265 | ug_info->extensionField, | |
2266 | &uf_regs->upsmr, | |
2267 | &ug_regs->uempr, &ug_regs->maccfg1); | |
2268 | ||
3bc53427 | 2269 | setbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX); |
ce973b14 LY |
2270 | |
2271 | /* Set IPGIFG */ | |
2272 | /* For more details see the hardware spec. */ | |
2273 | ret_val = init_inter_frame_gap_params(ug_info->nonBackToBackIfgPart1, | |
2274 | ug_info->nonBackToBackIfgPart2, | |
2275 | ug_info-> | |
2276 | miminumInterFrameGapEnforcement, | |
2277 | ug_info->backToBackInterFrameGap, | |
2278 | &ug_regs->ipgifg); | |
2279 | if (ret_val != 0) { | |
890de95e LY |
2280 | if (netif_msg_ifup(ugeth)) |
2281 | ugeth_err("%s: IPGIFG initialization parameter too large.", | |
b39d66a8 | 2282 | __func__); |
ce973b14 LY |
2283 | return ret_val; |
2284 | } | |
2285 | ||
2286 | /* Set HAFDUP */ | |
2287 | /* For more details see the hardware spec. */ | |
2288 | ret_val = init_half_duplex_params(ug_info->altBeb, | |
2289 | ug_info->backPressureNoBackoff, | |
2290 | ug_info->noBackoff, | |
2291 | ug_info->excessDefer, | |
2292 | ug_info->altBebTruncation, | |
2293 | ug_info->maxRetransmission, | |
2294 | ug_info->collisionWindow, | |
2295 | &ug_regs->hafdup); | |
2296 | if (ret_val != 0) { | |
890de95e LY |
2297 | if (netif_msg_ifup(ugeth)) |
2298 | ugeth_err("%s: Half Duplex initialization parameter too large.", | |
b39d66a8 | 2299 | __func__); |
ce973b14 LY |
2300 | return ret_val; |
2301 | } | |
2302 | ||
2303 | /* Set IFSTAT */ | |
2304 | /* For more details see the hardware spec. */ | |
2305 | /* Read only - resets upon read */ | |
2306 | ifstat = in_be32(&ug_regs->ifstat); | |
2307 | ||
2308 | /* Clear UEMPR */ | |
2309 | /* For more details see the hardware spec. */ | |
2310 | out_be32(&ug_regs->uempr, 0); | |
2311 | ||
2312 | /* Set UESCR */ | |
2313 | /* For more details see the hardware spec. */ | |
2314 | init_hw_statistics_gathering_mode((ug_info->statisticsMode & | |
2315 | UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE), | |
2316 | 0, &uf_regs->upsmr, &ug_regs->uescr); | |
2317 | ||
2318 | /* Allocate Tx bds */ | |
2319 | for (j = 0; j < ug_info->numQueuesTx; j++) { | |
2320 | /* Allocate in multiple of | |
2321 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT, | |
2322 | according to spec */ | |
18a8e864 | 2323 | length = ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) |
ce973b14 LY |
2324 | / UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) |
2325 | * UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | |
18a8e864 | 2326 | if ((ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)) % |
ce973b14 LY |
2327 | UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT) |
2328 | length += UCC_GETH_TX_BD_RING_SIZE_MEMORY_ALIGNMENT; | |
2329 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { | |
2330 | u32 align = 4; | |
2331 | if (UCC_GETH_TX_BD_RING_ALIGNMENT > 4) | |
2332 | align = UCC_GETH_TX_BD_RING_ALIGNMENT; | |
2333 | ugeth->tx_bd_ring_offset[j] = | |
6fee40e9 | 2334 | (u32) kmalloc((u32) (length + align), GFP_KERNEL); |
04b588d7 | 2335 | |
ce973b14 LY |
2336 | if (ugeth->tx_bd_ring_offset[j] != 0) |
2337 | ugeth->p_tx_bd_ring[j] = | |
6fee40e9 | 2338 | (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] + |
ce973b14 LY |
2339 | align) & ~(align - 1)); |
2340 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | |
2341 | ugeth->tx_bd_ring_offset[j] = | |
2342 | qe_muram_alloc(length, | |
2343 | UCC_GETH_TX_BD_RING_ALIGNMENT); | |
4c35630c | 2344 | if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j])) |
ce973b14 | 2345 | ugeth->p_tx_bd_ring[j] = |
6fee40e9 | 2346 | (u8 __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2347 | tx_bd_ring_offset[j]); |
2348 | } | |
2349 | if (!ugeth->p_tx_bd_ring[j]) { | |
890de95e LY |
2350 | if (netif_msg_ifup(ugeth)) |
2351 | ugeth_err | |
2352 | ("%s: Can not allocate memory for Tx bd rings.", | |
b39d66a8 | 2353 | __func__); |
ce973b14 LY |
2354 | return -ENOMEM; |
2355 | } | |
2356 | /* Zero unused end of bd ring, according to spec */ | |
6fee40e9 AF |
2357 | memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] + |
2358 | ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)), 0, | |
18a8e864 | 2359 | length - ug_info->bdRingLenTx[j] * sizeof(struct qe_bd)); |
ce973b14 LY |
2360 | } |
2361 | ||
2362 | /* Allocate Rx bds */ | |
2363 | for (j = 0; j < ug_info->numQueuesRx; j++) { | |
18a8e864 | 2364 | length = ug_info->bdRingLenRx[j] * sizeof(struct qe_bd); |
ce973b14 LY |
2365 | if (uf_info->bd_mem_part == MEM_PART_SYSTEM) { |
2366 | u32 align = 4; | |
2367 | if (UCC_GETH_RX_BD_RING_ALIGNMENT > 4) | |
2368 | align = UCC_GETH_RX_BD_RING_ALIGNMENT; | |
2369 | ugeth->rx_bd_ring_offset[j] = | |
6fee40e9 | 2370 | (u32) kmalloc((u32) (length + align), GFP_KERNEL); |
ce973b14 LY |
2371 | if (ugeth->rx_bd_ring_offset[j] != 0) |
2372 | ugeth->p_rx_bd_ring[j] = | |
6fee40e9 | 2373 | (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] + |
ce973b14 LY |
2374 | align) & ~(align - 1)); |
2375 | } else if (uf_info->bd_mem_part == MEM_PART_MURAM) { | |
2376 | ugeth->rx_bd_ring_offset[j] = | |
2377 | qe_muram_alloc(length, | |
2378 | UCC_GETH_RX_BD_RING_ALIGNMENT); | |
4c35630c | 2379 | if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j])) |
ce973b14 | 2380 | ugeth->p_rx_bd_ring[j] = |
6fee40e9 | 2381 | (u8 __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2382 | rx_bd_ring_offset[j]); |
2383 | } | |
2384 | if (!ugeth->p_rx_bd_ring[j]) { | |
890de95e LY |
2385 | if (netif_msg_ifup(ugeth)) |
2386 | ugeth_err | |
2387 | ("%s: Can not allocate memory for Rx bd rings.", | |
b39d66a8 | 2388 | __func__); |
ce973b14 LY |
2389 | return -ENOMEM; |
2390 | } | |
2391 | } | |
2392 | ||
2393 | /* Init Tx bds */ | |
2394 | for (j = 0; j < ug_info->numQueuesTx; j++) { | |
2395 | /* Setup the skbuff rings */ | |
04b588d7 AD |
2396 | ugeth->tx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * |
2397 | ugeth->ug_info->bdRingLenTx[j], | |
2398 | GFP_KERNEL); | |
ce973b14 LY |
2399 | |
2400 | if (ugeth->tx_skbuff[j] == NULL) { | |
890de95e LY |
2401 | if (netif_msg_ifup(ugeth)) |
2402 | ugeth_err("%s: Could not allocate tx_skbuff", | |
b39d66a8 | 2403 | __func__); |
ce973b14 LY |
2404 | return -ENOMEM; |
2405 | } | |
2406 | ||
2407 | for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++) | |
2408 | ugeth->tx_skbuff[j][i] = NULL; | |
2409 | ||
2410 | ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0; | |
2411 | bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j]; | |
2412 | for (i = 0; i < ug_info->bdRingLenTx[j]; i++) { | |
18a8e864 | 2413 | /* clear bd buffer */ |
6fee40e9 | 2414 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); |
18a8e864 | 2415 | /* set bd status and length */ |
6fee40e9 | 2416 | out_be32((u32 __iomem *)bd, 0); |
18a8e864 | 2417 | bd += sizeof(struct qe_bd); |
ce973b14 | 2418 | } |
18a8e864 LY |
2419 | bd -= sizeof(struct qe_bd); |
2420 | /* set bd status and length */ | |
6fee40e9 | 2421 | out_be32((u32 __iomem *)bd, T_W); /* for last BD set Wrap bit */ |
ce973b14 LY |
2422 | } |
2423 | ||
2424 | /* Init Rx bds */ | |
2425 | for (j = 0; j < ug_info->numQueuesRx; j++) { | |
2426 | /* Setup the skbuff rings */ | |
04b588d7 AD |
2427 | ugeth->rx_skbuff[j] = kmalloc(sizeof(struct sk_buff *) * |
2428 | ugeth->ug_info->bdRingLenRx[j], | |
2429 | GFP_KERNEL); | |
ce973b14 LY |
2430 | |
2431 | if (ugeth->rx_skbuff[j] == NULL) { | |
890de95e LY |
2432 | if (netif_msg_ifup(ugeth)) |
2433 | ugeth_err("%s: Could not allocate rx_skbuff", | |
b39d66a8 | 2434 | __func__); |
ce973b14 LY |
2435 | return -ENOMEM; |
2436 | } | |
2437 | ||
2438 | for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++) | |
2439 | ugeth->rx_skbuff[j][i] = NULL; | |
2440 | ||
2441 | ugeth->skb_currx[j] = 0; | |
2442 | bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j]; | |
2443 | for (i = 0; i < ug_info->bdRingLenRx[j]; i++) { | |
18a8e864 | 2444 | /* set bd status and length */ |
6fee40e9 | 2445 | out_be32((u32 __iomem *)bd, R_I); |
18a8e864 | 2446 | /* clear bd buffer */ |
6fee40e9 | 2447 | out_be32(&((struct qe_bd __iomem *)bd)->buf, 0); |
18a8e864 | 2448 | bd += sizeof(struct qe_bd); |
ce973b14 | 2449 | } |
18a8e864 LY |
2450 | bd -= sizeof(struct qe_bd); |
2451 | /* set bd status and length */ | |
6fee40e9 | 2452 | out_be32((u32 __iomem *)bd, R_W); /* for last BD set Wrap bit */ |
ce973b14 LY |
2453 | } |
2454 | ||
2455 | /* | |
2456 | * Global PRAM | |
2457 | */ | |
2458 | /* Tx global PRAM */ | |
2459 | /* Allocate global tx parameter RAM page */ | |
2460 | ugeth->tx_glbl_pram_offset = | |
18a8e864 | 2461 | qe_muram_alloc(sizeof(struct ucc_geth_tx_global_pram), |
ce973b14 | 2462 | UCC_GETH_TX_GLOBAL_PRAM_ALIGNMENT); |
4c35630c | 2463 | if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) { |
890de95e LY |
2464 | if (netif_msg_ifup(ugeth)) |
2465 | ugeth_err | |
2466 | ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", | |
b39d66a8 | 2467 | __func__); |
ce973b14 LY |
2468 | return -ENOMEM; |
2469 | } | |
2470 | ugeth->p_tx_glbl_pram = | |
6fee40e9 | 2471 | (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2472 | tx_glbl_pram_offset); |
2473 | /* Zero out p_tx_glbl_pram */ | |
6fee40e9 | 2474 | memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram)); |
ce973b14 LY |
2475 | |
2476 | /* Fill global PRAM */ | |
2477 | ||
2478 | /* TQPTR */ | |
2479 | /* Size varies with number of Tx threads */ | |
2480 | ugeth->thread_dat_tx_offset = | |
2481 | qe_muram_alloc(numThreadsTxNumerical * | |
18a8e864 | 2482 | sizeof(struct ucc_geth_thread_data_tx) + |
ce973b14 LY |
2483 | 32 * (numThreadsTxNumerical == 1), |
2484 | UCC_GETH_THREAD_DATA_ALIGNMENT); | |
4c35630c | 2485 | if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) { |
890de95e LY |
2486 | if (netif_msg_ifup(ugeth)) |
2487 | ugeth_err | |
2488 | ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", | |
b39d66a8 | 2489 | __func__); |
ce973b14 LY |
2490 | return -ENOMEM; |
2491 | } | |
2492 | ||
2493 | ugeth->p_thread_data_tx = | |
6fee40e9 | 2494 | (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2495 | thread_dat_tx_offset); |
2496 | out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset); | |
2497 | ||
2498 | /* vtagtable */ | |
2499 | for (i = 0; i < UCC_GETH_TX_VTAG_TABLE_ENTRY_MAX; i++) | |
2500 | out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i], | |
2501 | ug_info->vtagtable[i]); | |
2502 | ||
2503 | /* iphoffset */ | |
2504 | for (i = 0; i < TX_IP_OFFSET_ENTRY_MAX; i++) | |
6fee40e9 AF |
2505 | out_8(&ugeth->p_tx_glbl_pram->iphoffset[i], |
2506 | ug_info->iphoffset[i]); | |
ce973b14 LY |
2507 | |
2508 | /* SQPTR */ | |
2509 | /* Size varies with number of Tx queues */ | |
2510 | ugeth->send_q_mem_reg_offset = | |
2511 | qe_muram_alloc(ug_info->numQueuesTx * | |
18a8e864 | 2512 | sizeof(struct ucc_geth_send_queue_qd), |
ce973b14 | 2513 | UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT); |
4c35630c | 2514 | if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) { |
890de95e LY |
2515 | if (netif_msg_ifup(ugeth)) |
2516 | ugeth_err | |
2517 | ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", | |
b39d66a8 | 2518 | __func__); |
ce973b14 LY |
2519 | return -ENOMEM; |
2520 | } | |
2521 | ||
2522 | ugeth->p_send_q_mem_reg = | |
6fee40e9 | 2523 | (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2524 | send_q_mem_reg_offset); |
2525 | out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset); | |
2526 | ||
2527 | /* Setup the table */ | |
2528 | /* Assume BD rings are already established */ | |
2529 | for (i = 0; i < ug_info->numQueuesTx; i++) { | |
2530 | endOfRing = | |
2531 | ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] - | |
18a8e864 | 2532 | 1) * sizeof(struct qe_bd); |
ce973b14 LY |
2533 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { |
2534 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | |
2535 | (u32) virt_to_phys(ugeth->p_tx_bd_ring[i])); | |
2536 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | |
2537 | last_bd_completed_address, | |
2538 | (u32) virt_to_phys(endOfRing)); | |
2539 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | |
2540 | MEM_PART_MURAM) { | |
2541 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base, | |
2542 | (u32) immrbar_virt_to_phys(ugeth-> | |
2543 | p_tx_bd_ring[i])); | |
2544 | out_be32(&ugeth->p_send_q_mem_reg->sqqd[i]. | |
2545 | last_bd_completed_address, | |
2546 | (u32) immrbar_virt_to_phys(endOfRing)); | |
2547 | } | |
2548 | } | |
2549 | ||
2550 | /* schedulerbasepointer */ | |
2551 | ||
2552 | if (ug_info->numQueuesTx > 1) { | |
2553 | /* scheduler exists only if more than 1 tx queue */ | |
2554 | ugeth->scheduler_offset = | |
18a8e864 | 2555 | qe_muram_alloc(sizeof(struct ucc_geth_scheduler), |
ce973b14 | 2556 | UCC_GETH_SCHEDULER_ALIGNMENT); |
4c35630c | 2557 | if (IS_ERR_VALUE(ugeth->scheduler_offset)) { |
890de95e LY |
2558 | if (netif_msg_ifup(ugeth)) |
2559 | ugeth_err | |
2560 | ("%s: Can not allocate DPRAM memory for p_scheduler.", | |
b39d66a8 | 2561 | __func__); |
ce973b14 LY |
2562 | return -ENOMEM; |
2563 | } | |
2564 | ||
2565 | ugeth->p_scheduler = | |
6fee40e9 | 2566 | (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2567 | scheduler_offset); |
2568 | out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer, | |
2569 | ugeth->scheduler_offset); | |
2570 | /* Zero out p_scheduler */ | |
6fee40e9 | 2571 | memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler)); |
ce973b14 LY |
2572 | |
2573 | /* Set values in scheduler */ | |
2574 | out_be32(&ugeth->p_scheduler->mblinterval, | |
2575 | ug_info->mblinterval); | |
2576 | out_be16(&ugeth->p_scheduler->nortsrbytetime, | |
2577 | ug_info->nortsrbytetime); | |
6fee40e9 AF |
2578 | out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz); |
2579 | out_8(&ugeth->p_scheduler->strictpriorityq, | |
2580 | ug_info->strictpriorityq); | |
2581 | out_8(&ugeth->p_scheduler->txasap, ug_info->txasap); | |
2582 | out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw); | |
ce973b14 | 2583 | for (i = 0; i < NUM_TX_QUEUES; i++) |
6fee40e9 AF |
2584 | out_8(&ugeth->p_scheduler->weightfactor[i], |
2585 | ug_info->weightfactor[i]); | |
ce973b14 LY |
2586 | |
2587 | /* Set pointers to cpucount registers in scheduler */ | |
2588 | ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0); | |
2589 | ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1); | |
2590 | ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2); | |
2591 | ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3); | |
2592 | ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4); | |
2593 | ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5); | |
2594 | ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6); | |
2595 | ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7); | |
2596 | } | |
2597 | ||
2598 | /* schedulerbasepointer */ | |
2599 | /* TxRMON_PTR (statistics) */ | |
2600 | if (ug_info-> | |
2601 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) { | |
2602 | ugeth->tx_fw_statistics_pram_offset = | |
2603 | qe_muram_alloc(sizeof | |
18a8e864 | 2604 | (struct ucc_geth_tx_firmware_statistics_pram), |
ce973b14 | 2605 | UCC_GETH_TX_STATISTICS_ALIGNMENT); |
4c35630c | 2606 | if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) { |
890de95e LY |
2607 | if (netif_msg_ifup(ugeth)) |
2608 | ugeth_err | |
2609 | ("%s: Can not allocate DPRAM memory for" | |
2610 | " p_tx_fw_statistics_pram.", | |
b39d66a8 | 2611 | __func__); |
ce973b14 LY |
2612 | return -ENOMEM; |
2613 | } | |
2614 | ugeth->p_tx_fw_statistics_pram = | |
6fee40e9 | 2615 | (struct ucc_geth_tx_firmware_statistics_pram __iomem *) |
ce973b14 LY |
2616 | qe_muram_addr(ugeth->tx_fw_statistics_pram_offset); |
2617 | /* Zero out p_tx_fw_statistics_pram */ | |
6fee40e9 | 2618 | memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram, |
18a8e864 | 2619 | 0, sizeof(struct ucc_geth_tx_firmware_statistics_pram)); |
ce973b14 LY |
2620 | } |
2621 | ||
2622 | /* temoder */ | |
2623 | /* Already has speed set */ | |
2624 | ||
2625 | if (ug_info->numQueuesTx > 1) | |
2626 | temoder |= TEMODER_SCHEDULER_ENABLE; | |
2627 | if (ug_info->ipCheckSumGenerate) | |
2628 | temoder |= TEMODER_IP_CHECKSUM_GENERATE; | |
2629 | temoder |= ((ug_info->numQueuesTx - 1) << TEMODER_NUM_OF_QUEUES_SHIFT); | |
2630 | out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder); | |
2631 | ||
2632 | test = in_be16(&ugeth->p_tx_glbl_pram->temoder); | |
2633 | ||
2634 | /* Function code register value to be used later */ | |
6b0b594b | 2635 | function_code = UCC_BMR_BO_BE | UCC_BMR_GBL; |
ce973b14 LY |
2636 | /* Required for QE */ |
2637 | ||
2638 | /* function code register */ | |
2639 | out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24); | |
2640 | ||
2641 | /* Rx global PRAM */ | |
2642 | /* Allocate global rx parameter RAM page */ | |
2643 | ugeth->rx_glbl_pram_offset = | |
18a8e864 | 2644 | qe_muram_alloc(sizeof(struct ucc_geth_rx_global_pram), |
ce973b14 | 2645 | UCC_GETH_RX_GLOBAL_PRAM_ALIGNMENT); |
4c35630c | 2646 | if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) { |
890de95e LY |
2647 | if (netif_msg_ifup(ugeth)) |
2648 | ugeth_err | |
2649 | ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", | |
b39d66a8 | 2650 | __func__); |
ce973b14 LY |
2651 | return -ENOMEM; |
2652 | } | |
2653 | ugeth->p_rx_glbl_pram = | |
6fee40e9 | 2654 | (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2655 | rx_glbl_pram_offset); |
2656 | /* Zero out p_rx_glbl_pram */ | |
6fee40e9 | 2657 | memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram)); |
ce973b14 LY |
2658 | |
2659 | /* Fill global PRAM */ | |
2660 | ||
2661 | /* RQPTR */ | |
2662 | /* Size varies with number of Rx threads */ | |
2663 | ugeth->thread_dat_rx_offset = | |
2664 | qe_muram_alloc(numThreadsRxNumerical * | |
18a8e864 | 2665 | sizeof(struct ucc_geth_thread_data_rx), |
ce973b14 | 2666 | UCC_GETH_THREAD_DATA_ALIGNMENT); |
4c35630c | 2667 | if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) { |
890de95e LY |
2668 | if (netif_msg_ifup(ugeth)) |
2669 | ugeth_err | |
2670 | ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", | |
b39d66a8 | 2671 | __func__); |
ce973b14 LY |
2672 | return -ENOMEM; |
2673 | } | |
2674 | ||
2675 | ugeth->p_thread_data_rx = | |
6fee40e9 | 2676 | (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2677 | thread_dat_rx_offset); |
2678 | out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset); | |
2679 | ||
2680 | /* typeorlen */ | |
2681 | out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen); | |
2682 | ||
2683 | /* rxrmonbaseptr (statistics) */ | |
2684 | if (ug_info-> | |
2685 | statisticsMode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) { | |
2686 | ugeth->rx_fw_statistics_pram_offset = | |
2687 | qe_muram_alloc(sizeof | |
18a8e864 | 2688 | (struct ucc_geth_rx_firmware_statistics_pram), |
ce973b14 | 2689 | UCC_GETH_RX_STATISTICS_ALIGNMENT); |
4c35630c | 2690 | if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) { |
890de95e LY |
2691 | if (netif_msg_ifup(ugeth)) |
2692 | ugeth_err | |
2693 | ("%s: Can not allocate DPRAM memory for" | |
b39d66a8 | 2694 | " p_rx_fw_statistics_pram.", __func__); |
ce973b14 LY |
2695 | return -ENOMEM; |
2696 | } | |
2697 | ugeth->p_rx_fw_statistics_pram = | |
6fee40e9 | 2698 | (struct ucc_geth_rx_firmware_statistics_pram __iomem *) |
ce973b14 LY |
2699 | qe_muram_addr(ugeth->rx_fw_statistics_pram_offset); |
2700 | /* Zero out p_rx_fw_statistics_pram */ | |
6fee40e9 | 2701 | memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0, |
18a8e864 | 2702 | sizeof(struct ucc_geth_rx_firmware_statistics_pram)); |
ce973b14 LY |
2703 | } |
2704 | ||
2705 | /* intCoalescingPtr */ | |
2706 | ||
2707 | /* Size varies with number of Rx queues */ | |
2708 | ugeth->rx_irq_coalescing_tbl_offset = | |
2709 | qe_muram_alloc(ug_info->numQueuesRx * | |
7563907e MB |
2710 | sizeof(struct ucc_geth_rx_interrupt_coalescing_entry) |
2711 | + 4, UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT); | |
4c35630c | 2712 | if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) { |
890de95e LY |
2713 | if (netif_msg_ifup(ugeth)) |
2714 | ugeth_err | |
2715 | ("%s: Can not allocate DPRAM memory for" | |
b39d66a8 | 2716 | " p_rx_irq_coalescing_tbl.", __func__); |
ce973b14 LY |
2717 | return -ENOMEM; |
2718 | } | |
2719 | ||
2720 | ugeth->p_rx_irq_coalescing_tbl = | |
6fee40e9 | 2721 | (struct ucc_geth_rx_interrupt_coalescing_table __iomem *) |
ce973b14 LY |
2722 | qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset); |
2723 | out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr, | |
2724 | ugeth->rx_irq_coalescing_tbl_offset); | |
2725 | ||
2726 | /* Fill interrupt coalescing table */ | |
2727 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2728 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | |
2729 | interruptcoalescingmaxvalue, | |
2730 | ug_info->interruptcoalescingmaxvalue[i]); | |
2731 | out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i]. | |
2732 | interruptcoalescingcounter, | |
2733 | ug_info->interruptcoalescingmaxvalue[i]); | |
2734 | } | |
2735 | ||
2736 | /* MRBLR */ | |
2737 | init_max_rx_buff_len(uf_info->max_rx_buf_length, | |
2738 | &ugeth->p_rx_glbl_pram->mrblr); | |
2739 | /* MFLR */ | |
2740 | out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength); | |
2741 | /* MINFLR */ | |
2742 | init_min_frame_len(ug_info->minFrameLength, | |
2743 | &ugeth->p_rx_glbl_pram->minflr, | |
2744 | &ugeth->p_rx_glbl_pram->mrblr); | |
2745 | /* MAXD1 */ | |
2746 | out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length); | |
2747 | /* MAXD2 */ | |
2748 | out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length); | |
2749 | ||
2750 | /* l2qt */ | |
2751 | l2qt = 0; | |
2752 | for (i = 0; i < UCC_GETH_VLAN_PRIORITY_MAX; i++) | |
2753 | l2qt |= (ug_info->l2qt[i] << (28 - 4 * i)); | |
2754 | out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt); | |
2755 | ||
2756 | /* l3qt */ | |
2757 | for (j = 0; j < UCC_GETH_IP_PRIORITY_MAX; j += 8) { | |
2758 | l3qt = 0; | |
2759 | for (i = 0; i < 8; i++) | |
2760 | l3qt |= (ug_info->l3qt[j + i] << (28 - 4 * i)); | |
18a8e864 | 2761 | out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt); |
ce973b14 LY |
2762 | } |
2763 | ||
2764 | /* vlantype */ | |
2765 | out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype); | |
2766 | ||
2767 | /* vlantci */ | |
2768 | out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci); | |
2769 | ||
2770 | /* ecamptr */ | |
2771 | out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr); | |
2772 | ||
2773 | /* RBDQPTR */ | |
2774 | /* Size varies with number of Rx queues */ | |
2775 | ugeth->rx_bd_qs_tbl_offset = | |
2776 | qe_muram_alloc(ug_info->numQueuesRx * | |
18a8e864 LY |
2777 | (sizeof(struct ucc_geth_rx_bd_queues_entry) + |
2778 | sizeof(struct ucc_geth_rx_prefetched_bds)), | |
ce973b14 | 2779 | UCC_GETH_RX_BD_QUEUES_ALIGNMENT); |
4c35630c | 2780 | if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) { |
890de95e LY |
2781 | if (netif_msg_ifup(ugeth)) |
2782 | ugeth_err | |
2783 | ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", | |
b39d66a8 | 2784 | __func__); |
ce973b14 LY |
2785 | return -ENOMEM; |
2786 | } | |
2787 | ||
2788 | ugeth->p_rx_bd_qs_tbl = | |
6fee40e9 | 2789 | (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2790 | rx_bd_qs_tbl_offset); |
2791 | out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset); | |
2792 | /* Zero out p_rx_bd_qs_tbl */ | |
6fee40e9 | 2793 | memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl, |
ce973b14 | 2794 | 0, |
18a8e864 LY |
2795 | ug_info->numQueuesRx * (sizeof(struct ucc_geth_rx_bd_queues_entry) + |
2796 | sizeof(struct ucc_geth_rx_prefetched_bds))); | |
ce973b14 LY |
2797 | |
2798 | /* Setup the table */ | |
2799 | /* Assume BD rings are already established */ | |
2800 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
2801 | if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) { | |
2802 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
2803 | (u32) virt_to_phys(ugeth->p_rx_bd_ring[i])); | |
2804 | } else if (ugeth->ug_info->uf_info.bd_mem_part == | |
2805 | MEM_PART_MURAM) { | |
2806 | out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr, | |
2807 | (u32) immrbar_virt_to_phys(ugeth-> | |
2808 | p_rx_bd_ring[i])); | |
2809 | } | |
2810 | /* rest of fields handled by QE */ | |
2811 | } | |
2812 | ||
2813 | /* remoder */ | |
2814 | /* Already has speed set */ | |
2815 | ||
2816 | if (ugeth->rx_extended_features) | |
2817 | remoder |= REMODER_RX_EXTENDED_FEATURES; | |
2818 | if (ug_info->rxExtendedFiltering) | |
2819 | remoder |= REMODER_RX_EXTENDED_FILTERING; | |
2820 | if (ug_info->dynamicMaxFrameLength) | |
2821 | remoder |= REMODER_DYNAMIC_MAX_FRAME_LENGTH; | |
2822 | if (ug_info->dynamicMinFrameLength) | |
2823 | remoder |= REMODER_DYNAMIC_MIN_FRAME_LENGTH; | |
2824 | remoder |= | |
2825 | ug_info->vlanOperationTagged << REMODER_VLAN_OPERATION_TAGGED_SHIFT; | |
2826 | remoder |= | |
2827 | ug_info-> | |
2828 | vlanOperationNonTagged << REMODER_VLAN_OPERATION_NON_TAGGED_SHIFT; | |
2829 | remoder |= ug_info->rxQoSMode << REMODER_RX_QOS_MODE_SHIFT; | |
2830 | remoder |= ((ug_info->numQueuesRx - 1) << REMODER_NUM_OF_QUEUES_SHIFT); | |
2831 | if (ug_info->ipCheckSumCheck) | |
2832 | remoder |= REMODER_IP_CHECKSUM_CHECK; | |
2833 | if (ug_info->ipAddressAlignment) | |
2834 | remoder |= REMODER_IP_ADDRESS_ALIGNMENT; | |
2835 | out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder); | |
2836 | ||
2837 | /* Note that this function must be called */ | |
2838 | /* ONLY AFTER p_tx_fw_statistics_pram */ | |
2839 | /* andp_UccGethRxFirmwareStatisticsPram are allocated ! */ | |
2840 | init_firmware_statistics_gathering_mode((ug_info-> | |
2841 | statisticsMode & | |
2842 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX), | |
2843 | (ug_info->statisticsMode & | |
2844 | UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX), | |
2845 | &ugeth->p_tx_glbl_pram->txrmonbaseptr, | |
2846 | ugeth->tx_fw_statistics_pram_offset, | |
2847 | &ugeth->p_rx_glbl_pram->rxrmonbaseptr, | |
2848 | ugeth->rx_fw_statistics_pram_offset, | |
2849 | &ugeth->p_tx_glbl_pram->temoder, | |
2850 | &ugeth->p_rx_glbl_pram->remoder); | |
2851 | ||
2852 | /* function code register */ | |
6fee40e9 | 2853 | out_8(&ugeth->p_rx_glbl_pram->rstate, function_code); |
ce973b14 LY |
2854 | |
2855 | /* initialize extended filtering */ | |
2856 | if (ug_info->rxExtendedFiltering) { | |
2857 | if (!ug_info->extendedFilteringChainPointer) { | |
890de95e LY |
2858 | if (netif_msg_ifup(ugeth)) |
2859 | ugeth_err("%s: Null Extended Filtering Chain Pointer.", | |
b39d66a8 | 2860 | __func__); |
ce973b14 LY |
2861 | return -EINVAL; |
2862 | } | |
2863 | ||
2864 | /* Allocate memory for extended filtering Mode Global | |
2865 | Parameters */ | |
2866 | ugeth->exf_glbl_param_offset = | |
18a8e864 | 2867 | qe_muram_alloc(sizeof(struct ucc_geth_exf_global_pram), |
ce973b14 | 2868 | UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT); |
4c35630c | 2869 | if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) { |
890de95e LY |
2870 | if (netif_msg_ifup(ugeth)) |
2871 | ugeth_err | |
2872 | ("%s: Can not allocate DPRAM memory for" | |
b39d66a8 | 2873 | " p_exf_glbl_param.", __func__); |
ce973b14 LY |
2874 | return -ENOMEM; |
2875 | } | |
2876 | ||
2877 | ugeth->p_exf_glbl_param = | |
6fee40e9 | 2878 | (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth-> |
ce973b14 LY |
2879 | exf_glbl_param_offset); |
2880 | out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam, | |
2881 | ugeth->exf_glbl_param_offset); | |
2882 | out_be32(&ugeth->p_exf_glbl_param->l2pcdptr, | |
2883 | (u32) ug_info->extendedFilteringChainPointer); | |
2884 | ||
2885 | } else { /* initialize 82xx style address filtering */ | |
2886 | ||
2887 | /* Init individual address recognition registers to disabled */ | |
2888 | ||
2889 | for (j = 0; j < NUM_OF_PADDRS; j++) | |
2890 | ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j); | |
2891 | ||
ce973b14 | 2892 | p_82xx_addr_filt = |
6fee40e9 | 2893 | (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth-> |
ce973b14 LY |
2894 | p_rx_glbl_pram->addressfiltering; |
2895 | ||
2896 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | |
2897 | ENET_ADDR_TYPE_GROUP); | |
2898 | ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth, | |
2899 | ENET_ADDR_TYPE_INDIVIDUAL); | |
2900 | } | |
2901 | ||
2902 | /* | |
2903 | * Initialize UCC at QE level | |
2904 | */ | |
2905 | ||
2906 | command = QE_INIT_TX_RX; | |
2907 | ||
2908 | /* Allocate shadow InitEnet command parameter structure. | |
2909 | * This is needed because after the InitEnet command is executed, | |
2910 | * the structure in DPRAM is released, because DPRAM is a premium | |
2911 | * resource. | |
2912 | * This shadow structure keeps a copy of what was done so that the | |
2913 | * allocated resources can be released when the channel is freed. | |
2914 | */ | |
2915 | if (!(ugeth->p_init_enet_param_shadow = | |
04b588d7 | 2916 | kmalloc(sizeof(struct ucc_geth_init_pram), GFP_KERNEL))) { |
890de95e LY |
2917 | if (netif_msg_ifup(ugeth)) |
2918 | ugeth_err | |
2919 | ("%s: Can not allocate memory for" | |
b39d66a8 | 2920 | " p_UccInitEnetParamShadows.", __func__); |
ce973b14 LY |
2921 | return -ENOMEM; |
2922 | } | |
2923 | /* Zero out *p_init_enet_param_shadow */ | |
2924 | memset((char *)ugeth->p_init_enet_param_shadow, | |
18a8e864 | 2925 | 0, sizeof(struct ucc_geth_init_pram)); |
ce973b14 LY |
2926 | |
2927 | /* Fill shadow InitEnet command parameter structure */ | |
2928 | ||
2929 | ugeth->p_init_enet_param_shadow->resinit1 = | |
2930 | ENET_INIT_PARAM_MAGIC_RES_INIT1; | |
2931 | ugeth->p_init_enet_param_shadow->resinit2 = | |
2932 | ENET_INIT_PARAM_MAGIC_RES_INIT2; | |
2933 | ugeth->p_init_enet_param_shadow->resinit3 = | |
2934 | ENET_INIT_PARAM_MAGIC_RES_INIT3; | |
2935 | ugeth->p_init_enet_param_shadow->resinit4 = | |
2936 | ENET_INIT_PARAM_MAGIC_RES_INIT4; | |
2937 | ugeth->p_init_enet_param_shadow->resinit5 = | |
2938 | ENET_INIT_PARAM_MAGIC_RES_INIT5; | |
2939 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2940 | ((u32) ug_info->numThreadsRx) << ENET_INIT_PARAM_RGF_SHIFT; | |
2941 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2942 | ((u32) ug_info->numThreadsTx) << ENET_INIT_PARAM_TGF_SHIFT; | |
2943 | ||
2944 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal |= | |
2945 | ugeth->rx_glbl_pram_offset | ug_info->riscRx; | |
2946 | if ((ug_info->largestexternallookupkeysize != | |
2947 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_NONE) | |
2948 | && (ug_info->largestexternallookupkeysize != | |
2949 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | |
2950 | && (ug_info->largestexternallookupkeysize != | |
2951 | QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { | |
890de95e LY |
2952 | if (netif_msg_ifup(ugeth)) |
2953 | ugeth_err("%s: Invalid largest External Lookup Key Size.", | |
b39d66a8 | 2954 | __func__); |
ce973b14 LY |
2955 | return -EINVAL; |
2956 | } | |
2957 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize = | |
2958 | ug_info->largestexternallookupkeysize; | |
18a8e864 | 2959 | size = sizeof(struct ucc_geth_thread_rx_pram); |
ce973b14 LY |
2960 | if (ug_info->rxExtendedFiltering) { |
2961 | size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; | |
2962 | if (ug_info->largestexternallookupkeysize == | |
2963 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) | |
2964 | size += | |
2965 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; | |
2966 | if (ug_info->largestexternallookupkeysize == | |
2967 | QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) | |
2968 | size += | |
2969 | THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; | |
2970 | } | |
2971 | ||
2972 | if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth-> | |
2973 | p_init_enet_param_shadow->rxthread[0]), | |
2974 | (u8) (numThreadsRxNumerical + 1) | |
2975 | /* Rx needs one extra for terminator */ | |
2976 | , size, UCC_GETH_THREAD_RX_PRAM_ALIGNMENT, | |
2977 | ug_info->riscRx, 1)) != 0) { | |
890de95e LY |
2978 | if (netif_msg_ifup(ugeth)) |
2979 | ugeth_err("%s: Can not fill p_init_enet_param_shadow.", | |
b39d66a8 | 2980 | __func__); |
ce973b14 LY |
2981 | return ret_val; |
2982 | } | |
2983 | ||
2984 | ugeth->p_init_enet_param_shadow->txglobal = | |
2985 | ugeth->tx_glbl_pram_offset | ug_info->riscTx; | |
2986 | if ((ret_val = | |
2987 | fill_init_enet_entries(ugeth, | |
2988 | &(ugeth->p_init_enet_param_shadow-> | |
2989 | txthread[0]), numThreadsTxNumerical, | |
18a8e864 | 2990 | sizeof(struct ucc_geth_thread_tx_pram), |
ce973b14 LY |
2991 | UCC_GETH_THREAD_TX_PRAM_ALIGNMENT, |
2992 | ug_info->riscTx, 0)) != 0) { | |
890de95e LY |
2993 | if (netif_msg_ifup(ugeth)) |
2994 | ugeth_err("%s: Can not fill p_init_enet_param_shadow.", | |
b39d66a8 | 2995 | __func__); |
ce973b14 LY |
2996 | return ret_val; |
2997 | } | |
2998 | ||
2999 | /* Load Rx bds with buffers */ | |
3000 | for (i = 0; i < ug_info->numQueuesRx; i++) { | |
3001 | if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { | |
890de95e LY |
3002 | if (netif_msg_ifup(ugeth)) |
3003 | ugeth_err("%s: Can not fill Rx bds with buffers.", | |
b39d66a8 | 3004 | __func__); |
ce973b14 LY |
3005 | return ret_val; |
3006 | } | |
3007 | } | |
3008 | ||
3009 | /* Allocate InitEnet command parameter structure */ | |
18a8e864 | 3010 | init_enet_pram_offset = qe_muram_alloc(sizeof(struct ucc_geth_init_pram), 4); |
4c35630c | 3011 | if (IS_ERR_VALUE(init_enet_pram_offset)) { |
890de95e LY |
3012 | if (netif_msg_ifup(ugeth)) |
3013 | ugeth_err | |
3014 | ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", | |
b39d66a8 | 3015 | __func__); |
ce973b14 LY |
3016 | return -ENOMEM; |
3017 | } | |
3018 | p_init_enet_pram = | |
6fee40e9 | 3019 | (struct ucc_geth_init_pram __iomem *) qe_muram_addr(init_enet_pram_offset); |
ce973b14 LY |
3020 | |
3021 | /* Copy shadow InitEnet command parameter structure into PRAM */ | |
6fee40e9 AF |
3022 | out_8(&p_init_enet_pram->resinit1, |
3023 | ugeth->p_init_enet_param_shadow->resinit1); | |
3024 | out_8(&p_init_enet_pram->resinit2, | |
3025 | ugeth->p_init_enet_param_shadow->resinit2); | |
3026 | out_8(&p_init_enet_pram->resinit3, | |
3027 | ugeth->p_init_enet_param_shadow->resinit3); | |
3028 | out_8(&p_init_enet_pram->resinit4, | |
3029 | ugeth->p_init_enet_param_shadow->resinit4); | |
ce973b14 LY |
3030 | out_be16(&p_init_enet_pram->resinit5, |
3031 | ugeth->p_init_enet_param_shadow->resinit5); | |
6fee40e9 AF |
3032 | out_8(&p_init_enet_pram->largestexternallookupkeysize, |
3033 | ugeth->p_init_enet_param_shadow->largestexternallookupkeysize); | |
ce973b14 LY |
3034 | out_be32(&p_init_enet_pram->rgftgfrxglobal, |
3035 | ugeth->p_init_enet_param_shadow->rgftgfrxglobal); | |
3036 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_RX; i++) | |
3037 | out_be32(&p_init_enet_pram->rxthread[i], | |
3038 | ugeth->p_init_enet_param_shadow->rxthread[i]); | |
3039 | out_be32(&p_init_enet_pram->txglobal, | |
3040 | ugeth->p_init_enet_param_shadow->txglobal); | |
3041 | for (i = 0; i < ENET_INIT_PARAM_MAX_ENTRIES_TX; i++) | |
3042 | out_be32(&p_init_enet_pram->txthread[i], | |
3043 | ugeth->p_init_enet_param_shadow->txthread[i]); | |
3044 | ||
3045 | /* Issue QE command */ | |
3046 | cecr_subblock = | |
3047 | ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num); | |
18a8e864 | 3048 | qe_issue_cmd(command, cecr_subblock, QE_CR_PROTOCOL_ETHERNET, |
ce973b14 LY |
3049 | init_enet_pram_offset); |
3050 | ||
3051 | /* Free InitEnet command parameter */ | |
3052 | qe_muram_free(init_enet_pram_offset); | |
3053 | ||
3054 | return 0; | |
3055 | } | |
3056 | ||
ce973b14 LY |
3057 | /* This is called by the kernel when a frame is ready for transmission. */ |
3058 | /* It is pointed to by the dev->hard_start_xmit function pointer */ | |
3059 | static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
3060 | { | |
18a8e864 | 3061 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
d5b9049d MR |
3062 | #ifdef CONFIG_UGETH_TX_ON_DEMAND |
3063 | struct ucc_fast_private *uccf; | |
3064 | #endif | |
6fee40e9 | 3065 | u8 __iomem *bd; /* BD pointer */ |
ce973b14 LY |
3066 | u32 bd_status; |
3067 | u8 txQ = 0; | |
3068 | ||
b39d66a8 | 3069 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 LY |
3070 | |
3071 | spin_lock_irq(&ugeth->lock); | |
3072 | ||
09f75cd7 | 3073 | dev->stats.tx_bytes += skb->len; |
ce973b14 LY |
3074 | |
3075 | /* Start from the next BD that should be filled */ | |
3076 | bd = ugeth->txBd[txQ]; | |
6fee40e9 | 3077 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3078 | /* Save the skb pointer so we can free it later */ |
3079 | ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb; | |
3080 | ||
3081 | /* Update the current skb pointer (wrapping if this was the last) */ | |
3082 | ugeth->skb_curtx[txQ] = | |
3083 | (ugeth->skb_curtx[txQ] + | |
3084 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | |
3085 | ||
3086 | /* set up the buffer descriptor */ | |
6fee40e9 | 3087 | out_be32(&((struct qe_bd __iomem *)bd)->buf, |
7f80202b AF |
3088 | dma_map_single(&ugeth->dev->dev, skb->data, |
3089 | skb->len, DMA_TO_DEVICE)); | |
ce973b14 | 3090 | |
18a8e864 | 3091 | /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ |
ce973b14 LY |
3092 | |
3093 | bd_status = (bd_status & T_W) | T_R | T_I | T_L | skb->len; | |
3094 | ||
18a8e864 | 3095 | /* set bd status and length */ |
6fee40e9 | 3096 | out_be32((u32 __iomem *)bd, bd_status); |
ce973b14 LY |
3097 | |
3098 | dev->trans_start = jiffies; | |
3099 | ||
3100 | /* Move to next BD in the ring */ | |
3101 | if (!(bd_status & T_W)) | |
a394f013 | 3102 | bd += sizeof(struct qe_bd); |
ce973b14 | 3103 | else |
a394f013 | 3104 | bd = ugeth->p_tx_bd_ring[txQ]; |
ce973b14 LY |
3105 | |
3106 | /* If the next BD still needs to be cleaned up, then the bds | |
3107 | are full. We need to tell the kernel to stop sending us stuff. */ | |
3108 | if (bd == ugeth->confBd[txQ]) { | |
3109 | if (!netif_queue_stopped(dev)) | |
3110 | netif_stop_queue(dev); | |
3111 | } | |
3112 | ||
a394f013 LY |
3113 | ugeth->txBd[txQ] = bd; |
3114 | ||
ce973b14 LY |
3115 | if (ugeth->p_scheduler) { |
3116 | ugeth->cpucount[txQ]++; | |
3117 | /* Indicate to QE that there are more Tx bds ready for | |
3118 | transmission */ | |
3119 | /* This is done by writing a running counter of the bd | |
3120 | count to the scheduler PRAM. */ | |
3121 | out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]); | |
3122 | } | |
3123 | ||
d5b9049d MR |
3124 | #ifdef CONFIG_UGETH_TX_ON_DEMAND |
3125 | uccf = ugeth->uccf; | |
3126 | out_be16(uccf->p_utodr, UCC_FAST_TOD); | |
3127 | #endif | |
ce973b14 LY |
3128 | spin_unlock_irq(&ugeth->lock); |
3129 | ||
6f6881b8 | 3130 | return 0; |
ce973b14 LY |
3131 | } |
3132 | ||
18a8e864 | 3133 | static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit) |
ce973b14 LY |
3134 | { |
3135 | struct sk_buff *skb; | |
6fee40e9 | 3136 | u8 __iomem *bd; |
ce973b14 LY |
3137 | u16 length, howmany = 0; |
3138 | u32 bd_status; | |
3139 | u8 *bdBuffer; | |
4b8fdefa | 3140 | struct net_device *dev; |
ce973b14 | 3141 | |
b39d66a8 | 3142 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3143 | |
88a15f2e EM |
3144 | dev = ugeth->dev; |
3145 | ||
ce973b14 LY |
3146 | /* collect received buffers */ |
3147 | bd = ugeth->rxBd[rxQ]; | |
3148 | ||
6fee40e9 | 3149 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3150 | |
3151 | /* while there are received buffers and BD is full (~R_E) */ | |
3152 | while (!((bd_status & (R_E)) || (--rx_work_limit < 0))) { | |
6fee40e9 | 3153 | bdBuffer = (u8 *) in_be32(&((struct qe_bd __iomem *)bd)->buf); |
ce973b14 LY |
3154 | length = (u16) ((bd_status & BD_LENGTH_MASK) - 4); |
3155 | skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]]; | |
3156 | ||
3157 | /* determine whether buffer is first, last, first and last | |
3158 | (single buffer frame) or middle (not first and not last) */ | |
3159 | if (!skb || | |
3160 | (!(bd_status & (R_F | R_L))) || | |
3161 | (bd_status & R_ERRORS_FATAL)) { | |
890de95e LY |
3162 | if (netif_msg_rx_err(ugeth)) |
3163 | ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", | |
b39d66a8 | 3164 | __func__, __LINE__, (u32) skb); |
ce973b14 LY |
3165 | if (skb) |
3166 | dev_kfree_skb_any(skb); | |
3167 | ||
3168 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; | |
09f75cd7 | 3169 | dev->stats.rx_dropped++; |
ce973b14 | 3170 | } else { |
09f75cd7 | 3171 | dev->stats.rx_packets++; |
ce973b14 LY |
3172 | howmany++; |
3173 | ||
3174 | /* Prep the skb for the packet */ | |
3175 | skb_put(skb, length); | |
3176 | ||
3177 | /* Tell the skb what kind of packet this is */ | |
3178 | skb->protocol = eth_type_trans(skb, ugeth->dev); | |
3179 | ||
09f75cd7 | 3180 | dev->stats.rx_bytes += length; |
ce973b14 | 3181 | /* Send the packet up the stack */ |
ce973b14 | 3182 | netif_receive_skb(skb); |
ce973b14 LY |
3183 | } |
3184 | ||
ce973b14 LY |
3185 | skb = get_new_skb(ugeth, bd); |
3186 | if (!skb) { | |
890de95e | 3187 | if (netif_msg_rx_err(ugeth)) |
b39d66a8 | 3188 | ugeth_warn("%s: No Rx Data Buffer", __func__); |
09f75cd7 | 3189 | dev->stats.rx_dropped++; |
ce973b14 LY |
3190 | break; |
3191 | } | |
3192 | ||
3193 | ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb; | |
3194 | ||
3195 | /* update to point at the next skb */ | |
3196 | ugeth->skb_currx[rxQ] = | |
3197 | (ugeth->skb_currx[rxQ] + | |
3198 | 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]); | |
3199 | ||
3200 | if (bd_status & R_W) | |
3201 | bd = ugeth->p_rx_bd_ring[rxQ]; | |
3202 | else | |
18a8e864 | 3203 | bd += sizeof(struct qe_bd); |
ce973b14 | 3204 | |
6fee40e9 | 3205 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3206 | } |
3207 | ||
3208 | ugeth->rxBd[rxQ] = bd; | |
ce973b14 LY |
3209 | return howmany; |
3210 | } | |
3211 | ||
3212 | static int ucc_geth_tx(struct net_device *dev, u8 txQ) | |
3213 | { | |
3214 | /* Start from the next BD that should be filled */ | |
18a8e864 | 3215 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
6fee40e9 | 3216 | u8 __iomem *bd; /* BD pointer */ |
ce973b14 LY |
3217 | u32 bd_status; |
3218 | ||
3219 | bd = ugeth->confBd[txQ]; | |
6fee40e9 | 3220 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 LY |
3221 | |
3222 | /* Normal processing. */ | |
3223 | while ((bd_status & T_R) == 0) { | |
3224 | /* BD contains already transmitted buffer. */ | |
3225 | /* Handle the transmitted buffer and release */ | |
3226 | /* the BD to be used with the current frame */ | |
3227 | ||
a394f013 | 3228 | if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) |
ce973b14 LY |
3229 | break; |
3230 | ||
09f75cd7 | 3231 | dev->stats.tx_packets++; |
ce973b14 LY |
3232 | |
3233 | /* Free the sk buffer associated with this TxBD */ | |
3234 | dev_kfree_skb_irq(ugeth-> | |
3235 | tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); | |
3236 | ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; | |
3237 | ugeth->skb_dirtytx[txQ] = | |
3238 | (ugeth->skb_dirtytx[txQ] + | |
3239 | 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]); | |
3240 | ||
3241 | /* We freed a buffer, so now we can restart transmission */ | |
3242 | if (netif_queue_stopped(dev)) | |
3243 | netif_wake_queue(dev); | |
3244 | ||
3245 | /* Advance the confirmation BD pointer */ | |
3246 | if (!(bd_status & T_W)) | |
a394f013 | 3247 | bd += sizeof(struct qe_bd); |
ce973b14 | 3248 | else |
a394f013 | 3249 | bd = ugeth->p_tx_bd_ring[txQ]; |
6fee40e9 | 3250 | bd_status = in_be32((u32 __iomem *)bd); |
ce973b14 | 3251 | } |
a394f013 | 3252 | ugeth->confBd[txQ] = bd; |
ce973b14 LY |
3253 | return 0; |
3254 | } | |
3255 | ||
bea3348e | 3256 | static int ucc_geth_poll(struct napi_struct *napi, int budget) |
ce973b14 | 3257 | { |
bea3348e | 3258 | struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi); |
702ff12c | 3259 | struct ucc_geth_info *ug_info; |
bea3348e | 3260 | int howmany, i; |
ce973b14 | 3261 | |
702ff12c MR |
3262 | ug_info = ugeth->ug_info; |
3263 | ||
702ff12c | 3264 | howmany = 0; |
bea3348e SH |
3265 | for (i = 0; i < ug_info->numQueuesRx; i++) |
3266 | howmany += ucc_geth_rx(ugeth, i, budget - howmany); | |
702ff12c | 3267 | |
bea3348e | 3268 | if (howmany < budget) { |
288379f0 | 3269 | napi_complete(napi); |
3bc53427 | 3270 | setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS); |
702ff12c | 3271 | } |
ce973b14 | 3272 | |
bea3348e | 3273 | return howmany; |
ce973b14 | 3274 | } |
ce973b14 | 3275 | |
7d12e780 | 3276 | static irqreturn_t ucc_geth_irq_handler(int irq, void *info) |
ce973b14 | 3277 | { |
06efcad0 | 3278 | struct net_device *dev = info; |
18a8e864 LY |
3279 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
3280 | struct ucc_fast_private *uccf; | |
3281 | struct ucc_geth_info *ug_info; | |
702ff12c MR |
3282 | register u32 ucce; |
3283 | register u32 uccm; | |
702ff12c MR |
3284 | register u32 tx_mask; |
3285 | u8 i; | |
ce973b14 | 3286 | |
b39d66a8 | 3287 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3288 | |
ce973b14 LY |
3289 | uccf = ugeth->uccf; |
3290 | ug_info = ugeth->ug_info; | |
3291 | ||
702ff12c MR |
3292 | /* read and clear events */ |
3293 | ucce = (u32) in_be32(uccf->p_ucce); | |
3294 | uccm = (u32) in_be32(uccf->p_uccm); | |
3295 | ucce &= uccm; | |
3296 | out_be32(uccf->p_ucce, ucce); | |
ce973b14 | 3297 | |
702ff12c MR |
3298 | /* check for receive events that require processing */ |
3299 | if (ucce & UCCE_RX_EVENTS) { | |
288379f0 | 3300 | if (napi_schedule_prep(&ugeth->napi)) { |
bea3348e | 3301 | uccm &= ~UCCE_RX_EVENTS; |
702ff12c | 3302 | out_be32(uccf->p_uccm, uccm); |
288379f0 | 3303 | __napi_schedule(&ugeth->napi); |
702ff12c | 3304 | } |
702ff12c | 3305 | } |
ce973b14 | 3306 | |
702ff12c MR |
3307 | /* Tx event processing */ |
3308 | if (ucce & UCCE_TX_EVENTS) { | |
3309 | spin_lock(&ugeth->lock); | |
3bc53427 | 3310 | tx_mask = UCC_GETH_UCCE_TXB0; |
ce973b14 LY |
3311 | for (i = 0; i < ug_info->numQueuesTx; i++) { |
3312 | if (ucce & tx_mask) | |
3313 | ucc_geth_tx(dev, i); | |
3314 | ucce &= ~tx_mask; | |
3315 | tx_mask <<= 1; | |
3316 | } | |
702ff12c MR |
3317 | spin_unlock(&ugeth->lock); |
3318 | } | |
ce973b14 | 3319 | |
702ff12c MR |
3320 | /* Errors and other events */ |
3321 | if (ucce & UCCE_OTHER) { | |
3bc53427 | 3322 | if (ucce & UCC_GETH_UCCE_BSY) |
09f75cd7 | 3323 | dev->stats.rx_errors++; |
3bc53427 | 3324 | if (ucce & UCC_GETH_UCCE_TXE) |
09f75cd7 | 3325 | dev->stats.tx_errors++; |
ce973b14 | 3326 | } |
ce973b14 LY |
3327 | |
3328 | return IRQ_HANDLED; | |
3329 | } | |
3330 | ||
26d29ea7 AV |
3331 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3332 | /* | |
3333 | * Polling 'interrupt' - used by things like netconsole to send skbs | |
3334 | * without having to re-enable interrupts. It's not called while | |
3335 | * the interrupt routine is executing. | |
3336 | */ | |
3337 | static void ucc_netpoll(struct net_device *dev) | |
3338 | { | |
3339 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3340 | int irq = ugeth->ug_info->uf_info.irq; | |
3341 | ||
3342 | disable_irq(irq); | |
3343 | ucc_geth_irq_handler(irq, dev); | |
3344 | enable_irq(irq); | |
3345 | } | |
3346 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | |
3347 | ||
ce973b14 LY |
3348 | /* Called when something needs to use the ethernet device */ |
3349 | /* Returns 0 for success. */ | |
3350 | static int ucc_geth_open(struct net_device *dev) | |
3351 | { | |
18a8e864 | 3352 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
ce973b14 LY |
3353 | int err; |
3354 | ||
b39d66a8 | 3355 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 LY |
3356 | |
3357 | /* Test station address */ | |
3358 | if (dev->dev_addr[0] & ENET_GROUP_ADDR) { | |
890de95e LY |
3359 | if (netif_msg_ifup(ugeth)) |
3360 | ugeth_err("%s: Multicast address used for station address" | |
b39d66a8 | 3361 | " - is this what you wanted?", __func__); |
ce973b14 LY |
3362 | return -EINVAL; |
3363 | } | |
3364 | ||
728de4c9 KP |
3365 | err = ucc_struct_init(ugeth); |
3366 | if (err) { | |
890de95e LY |
3367 | if (netif_msg_ifup(ugeth)) |
3368 | ugeth_err("%s: Cannot configure internal struct, aborting.", dev->name); | |
3e73fc9a | 3369 | goto out_err_stop; |
728de4c9 KP |
3370 | } |
3371 | ||
bea3348e | 3372 | napi_enable(&ugeth->napi); |
1a342d22 | 3373 | |
ce973b14 LY |
3374 | err = ucc_geth_startup(ugeth); |
3375 | if (err) { | |
890de95e LY |
3376 | if (netif_msg_ifup(ugeth)) |
3377 | ugeth_err("%s: Cannot configure net device, aborting.", | |
3378 | dev->name); | |
bea3348e | 3379 | goto out_err; |
ce973b14 LY |
3380 | } |
3381 | ||
3382 | err = adjust_enet_interface(ugeth); | |
3383 | if (err) { | |
890de95e LY |
3384 | if (netif_msg_ifup(ugeth)) |
3385 | ugeth_err("%s: Cannot configure net device, aborting.", | |
3386 | dev->name); | |
bea3348e | 3387 | goto out_err; |
ce973b14 LY |
3388 | } |
3389 | ||
3390 | /* Set MACSTNADDR1, MACSTNADDR2 */ | |
3391 | /* For more details see the hardware spec. */ | |
3392 | init_mac_station_addr_regs(dev->dev_addr[0], | |
3393 | dev->dev_addr[1], | |
3394 | dev->dev_addr[2], | |
3395 | dev->dev_addr[3], | |
3396 | dev->dev_addr[4], | |
3397 | dev->dev_addr[5], | |
3398 | &ugeth->ug_regs->macstnaddr1, | |
3399 | &ugeth->ug_regs->macstnaddr2); | |
3400 | ||
3401 | err = init_phy(dev); | |
3402 | if (err) { | |
890de95e LY |
3403 | if (netif_msg_ifup(ugeth)) |
3404 | ugeth_err("%s: Cannot initialize PHY, aborting.", dev->name); | |
bea3348e | 3405 | goto out_err; |
ce973b14 | 3406 | } |
728de4c9 KP |
3407 | |
3408 | phy_start(ugeth->phydev); | |
3409 | ||
67c2fb8f | 3410 | err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); |
ce973b14 | 3411 | if (err) { |
890de95e | 3412 | if (netif_msg_ifup(ugeth)) |
67c2fb8f | 3413 | ugeth_err("%s: Cannot enable net device, aborting.", dev->name); |
bea3348e | 3414 | goto out_err; |
ce973b14 | 3415 | } |
ce973b14 | 3416 | |
67c2fb8f AV |
3417 | err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler, |
3418 | 0, "UCC Geth", dev); | |
ce973b14 | 3419 | if (err) { |
890de95e | 3420 | if (netif_msg_ifup(ugeth)) |
67c2fb8f AV |
3421 | ugeth_err("%s: Cannot get IRQ for net device, aborting.", |
3422 | dev->name); | |
bea3348e | 3423 | goto out_err; |
ce973b14 LY |
3424 | } |
3425 | ||
3426 | netif_start_queue(dev); | |
3427 | ||
3428 | return err; | |
bea3348e SH |
3429 | |
3430 | out_err: | |
bea3348e | 3431 | napi_disable(&ugeth->napi); |
3e73fc9a | 3432 | out_err_stop: |
ba574696 | 3433 | ucc_geth_stop(ugeth); |
bea3348e | 3434 | return err; |
ce973b14 LY |
3435 | } |
3436 | ||
3437 | /* Stops the kernel queue, and halts the controller */ | |
3438 | static int ucc_geth_close(struct net_device *dev) | |
3439 | { | |
18a8e864 | 3440 | struct ucc_geth_private *ugeth = netdev_priv(dev); |
ce973b14 | 3441 | |
b39d66a8 | 3442 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3443 | |
bea3348e | 3444 | napi_disable(&ugeth->napi); |
bea3348e | 3445 | |
ce973b14 LY |
3446 | ucc_geth_stop(ugeth); |
3447 | ||
67c2fb8f AV |
3448 | free_irq(ugeth->ug_info->uf_info.irq, ugeth->dev); |
3449 | ||
728de4c9 KP |
3450 | phy_disconnect(ugeth->phydev); |
3451 | ugeth->phydev = NULL; | |
ce973b14 LY |
3452 | |
3453 | netif_stop_queue(dev); | |
3454 | ||
3455 | return 0; | |
3456 | } | |
3457 | ||
fdb614c2 AV |
3458 | /* Reopen device. This will reset the MAC and PHY. */ |
3459 | static void ucc_geth_timeout_work(struct work_struct *work) | |
3460 | { | |
3461 | struct ucc_geth_private *ugeth; | |
3462 | struct net_device *dev; | |
3463 | ||
3464 | ugeth = container_of(work, struct ucc_geth_private, timeout_work); | |
3465 | dev = ugeth->dev; | |
3466 | ||
3467 | ugeth_vdbg("%s: IN", __func__); | |
3468 | ||
3469 | dev->stats.tx_errors++; | |
3470 | ||
3471 | ugeth_dump_regs(ugeth); | |
3472 | ||
3473 | if (dev->flags & IFF_UP) { | |
3474 | /* | |
3475 | * Must reset MAC *and* PHY. This is done by reopening | |
3476 | * the device. | |
3477 | */ | |
3478 | ucc_geth_close(dev); | |
3479 | ucc_geth_open(dev); | |
3480 | } | |
3481 | ||
3482 | netif_tx_schedule_all(dev); | |
3483 | } | |
3484 | ||
3485 | /* | |
3486 | * ucc_geth_timeout gets called when a packet has not been | |
3487 | * transmitted after a set amount of time. | |
3488 | */ | |
3489 | static void ucc_geth_timeout(struct net_device *dev) | |
3490 | { | |
3491 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3492 | ||
3493 | netif_carrier_off(dev); | |
3494 | schedule_work(&ugeth->timeout_work); | |
3495 | } | |
3496 | ||
4e19b5c1 | 3497 | static phy_interface_t to_phy_interface(const char *phy_connection_type) |
728de4c9 | 3498 | { |
4e19b5c1 | 3499 | if (strcasecmp(phy_connection_type, "mii") == 0) |
728de4c9 | 3500 | return PHY_INTERFACE_MODE_MII; |
4e19b5c1 | 3501 | if (strcasecmp(phy_connection_type, "gmii") == 0) |
728de4c9 | 3502 | return PHY_INTERFACE_MODE_GMII; |
4e19b5c1 | 3503 | if (strcasecmp(phy_connection_type, "tbi") == 0) |
728de4c9 | 3504 | return PHY_INTERFACE_MODE_TBI; |
4e19b5c1 | 3505 | if (strcasecmp(phy_connection_type, "rmii") == 0) |
728de4c9 | 3506 | return PHY_INTERFACE_MODE_RMII; |
4e19b5c1 | 3507 | if (strcasecmp(phy_connection_type, "rgmii") == 0) |
728de4c9 | 3508 | return PHY_INTERFACE_MODE_RGMII; |
4e19b5c1 | 3509 | if (strcasecmp(phy_connection_type, "rgmii-id") == 0) |
728de4c9 | 3510 | return PHY_INTERFACE_MODE_RGMII_ID; |
bd0ceaab KP |
3511 | if (strcasecmp(phy_connection_type, "rgmii-txid") == 0) |
3512 | return PHY_INTERFACE_MODE_RGMII_TXID; | |
3513 | if (strcasecmp(phy_connection_type, "rgmii-rxid") == 0) | |
3514 | return PHY_INTERFACE_MODE_RGMII_RXID; | |
4e19b5c1 | 3515 | if (strcasecmp(phy_connection_type, "rtbi") == 0) |
728de4c9 KP |
3516 | return PHY_INTERFACE_MODE_RTBI; |
3517 | ||
3518 | return PHY_INTERFACE_MODE_MII; | |
3519 | } | |
3520 | ||
18a8e864 | 3521 | static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *match) |
ce973b14 | 3522 | { |
18a8e864 LY |
3523 | struct device *device = &ofdev->dev; |
3524 | struct device_node *np = ofdev->node; | |
728de4c9 | 3525 | struct device_node *mdio; |
ce973b14 LY |
3526 | struct net_device *dev = NULL; |
3527 | struct ucc_geth_private *ugeth = NULL; | |
3528 | struct ucc_geth_info *ug_info; | |
18a8e864 LY |
3529 | struct resource res; |
3530 | struct device_node *phy; | |
728de4c9 | 3531 | int err, ucc_num, max_speed = 0; |
18a8e864 | 3532 | const phandle *ph; |
3d137fdd | 3533 | const u32 *fixed_link; |
18a8e864 | 3534 | const unsigned int *prop; |
9fb1e350 | 3535 | const char *sprop; |
9b4c7a4e | 3536 | const void *mac_addr; |
728de4c9 KP |
3537 | phy_interface_t phy_interface; |
3538 | static const int enet_to_speed[] = { | |
3539 | SPEED_10, SPEED_10, SPEED_10, | |
3540 | SPEED_100, SPEED_100, SPEED_100, | |
3541 | SPEED_1000, SPEED_1000, SPEED_1000, SPEED_1000, | |
3542 | }; | |
3543 | static const phy_interface_t enet_to_phy_interface[] = { | |
3544 | PHY_INTERFACE_MODE_MII, PHY_INTERFACE_MODE_RMII, | |
3545 | PHY_INTERFACE_MODE_RGMII, PHY_INTERFACE_MODE_MII, | |
3546 | PHY_INTERFACE_MODE_RMII, PHY_INTERFACE_MODE_RGMII, | |
3547 | PHY_INTERFACE_MODE_GMII, PHY_INTERFACE_MODE_RGMII, | |
3548 | PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, | |
3549 | }; | |
ce973b14 | 3550 | |
b39d66a8 | 3551 | ugeth_vdbg("%s: IN", __func__); |
ce973b14 | 3552 | |
56626f33 AV |
3553 | prop = of_get_property(np, "cell-index", NULL); |
3554 | if (!prop) { | |
3555 | prop = of_get_property(np, "device-id", NULL); | |
3556 | if (!prop) | |
3557 | return -ENODEV; | |
3558 | } | |
3559 | ||
18a8e864 LY |
3560 | ucc_num = *prop - 1; |
3561 | if ((ucc_num < 0) || (ucc_num > 7)) | |
3562 | return -ENODEV; | |
3563 | ||
3564 | ug_info = &ugeth_info[ucc_num]; | |
890de95e LY |
3565 | if (ug_info == NULL) { |
3566 | if (netif_msg_probe(&debug)) | |
3567 | ugeth_err("%s: [%d] Missing additional data!", | |
b39d66a8 | 3568 | __func__, ucc_num); |
890de95e LY |
3569 | return -ENODEV; |
3570 | } | |
3571 | ||
18a8e864 | 3572 | ug_info->uf_info.ucc_num = ucc_num; |
728de4c9 | 3573 | |
9fb1e350 TT |
3574 | sprop = of_get_property(np, "rx-clock-name", NULL); |
3575 | if (sprop) { | |
3576 | ug_info->uf_info.rx_clock = qe_clock_source(sprop); | |
3577 | if ((ug_info->uf_info.rx_clock < QE_CLK_NONE) || | |
3578 | (ug_info->uf_info.rx_clock > QE_CLK24)) { | |
3579 | printk(KERN_ERR | |
3580 | "ucc_geth: invalid rx-clock-name property\n"); | |
3581 | return -EINVAL; | |
3582 | } | |
3583 | } else { | |
3584 | prop = of_get_property(np, "rx-clock", NULL); | |
3585 | if (!prop) { | |
3586 | /* If both rx-clock-name and rx-clock are missing, | |
3587 | we want to tell people to use rx-clock-name. */ | |
3588 | printk(KERN_ERR | |
3589 | "ucc_geth: missing rx-clock-name property\n"); | |
3590 | return -EINVAL; | |
3591 | } | |
3592 | if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { | |
3593 | printk(KERN_ERR | |
3594 | "ucc_geth: invalid rx-clock propperty\n"); | |
3595 | return -EINVAL; | |
3596 | } | |
3597 | ug_info->uf_info.rx_clock = *prop; | |
3598 | } | |
3599 | ||
3600 | sprop = of_get_property(np, "tx-clock-name", NULL); | |
3601 | if (sprop) { | |
3602 | ug_info->uf_info.tx_clock = qe_clock_source(sprop); | |
3603 | if ((ug_info->uf_info.tx_clock < QE_CLK_NONE) || | |
3604 | (ug_info->uf_info.tx_clock > QE_CLK24)) { | |
3605 | printk(KERN_ERR | |
3606 | "ucc_geth: invalid tx-clock-name property\n"); | |
3607 | return -EINVAL; | |
3608 | } | |
3609 | } else { | |
e410553f | 3610 | prop = of_get_property(np, "tx-clock", NULL); |
9fb1e350 TT |
3611 | if (!prop) { |
3612 | printk(KERN_ERR | |
3613 | "ucc_geth: mising tx-clock-name property\n"); | |
3614 | return -EINVAL; | |
3615 | } | |
3616 | if ((*prop < QE_CLK_NONE) || (*prop > QE_CLK24)) { | |
3617 | printk(KERN_ERR | |
3618 | "ucc_geth: invalid tx-clock property\n"); | |
3619 | return -EINVAL; | |
3620 | } | |
3621 | ug_info->uf_info.tx_clock = *prop; | |
3622 | } | |
3623 | ||
18a8e864 LY |
3624 | err = of_address_to_resource(np, 0, &res); |
3625 | if (err) | |
3626 | return -EINVAL; | |
3627 | ||
3628 | ug_info->uf_info.regs = res.start; | |
3629 | ug_info->uf_info.irq = irq_of_parse_and_map(np, 0); | |
3d137fdd JT |
3630 | fixed_link = of_get_property(np, "fixed-link", NULL); |
3631 | if (fixed_link) { | |
f38d1008 | 3632 | snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "0"); |
3d137fdd JT |
3633 | ug_info->phy_address = fixed_link[0]; |
3634 | phy = NULL; | |
3635 | } else { | |
3636 | ph = of_get_property(np, "phy-handle", NULL); | |
3637 | phy = of_find_node_by_phandle(*ph); | |
18a8e864 | 3638 | |
3d137fdd JT |
3639 | if (phy == NULL) |
3640 | return -ENODEV; | |
ce973b14 | 3641 | |
3d137fdd JT |
3642 | /* set the PHY address */ |
3643 | prop = of_get_property(phy, "reg", NULL); | |
3644 | if (prop == NULL) | |
3645 | return -1; | |
3646 | ug_info->phy_address = *prop; | |
3647 | ||
3648 | /* Set the bus id */ | |
3649 | mdio = of_get_parent(phy); | |
3650 | ||
3651 | if (mdio == NULL) | |
3652 | return -1; | |
18a8e864 | 3653 | |
3d137fdd JT |
3654 | err = of_address_to_resource(mdio, 0, &res); |
3655 | of_node_put(mdio); | |
3656 | ||
3657 | if (err) | |
3658 | return -1; | |
3659 | ||
9d9326d3 | 3660 | snprintf(ug_info->mdio_bus, MII_BUS_ID_SIZE, "%x", res.start); |
3d137fdd | 3661 | } |
728de4c9 KP |
3662 | |
3663 | /* get the phy interface type, or default to MII */ | |
4e19b5c1 | 3664 | prop = of_get_property(np, "phy-connection-type", NULL); |
728de4c9 KP |
3665 | if (!prop) { |
3666 | /* handle interface property present in old trees */ | |
40cd3a45 | 3667 | prop = of_get_property(phy, "interface", NULL); |
4e19b5c1 | 3668 | if (prop != NULL) { |
728de4c9 | 3669 | phy_interface = enet_to_phy_interface[*prop]; |
4e19b5c1 KP |
3670 | max_speed = enet_to_speed[*prop]; |
3671 | } else | |
728de4c9 KP |
3672 | phy_interface = PHY_INTERFACE_MODE_MII; |
3673 | } else { | |
3674 | phy_interface = to_phy_interface((const char *)prop); | |
3675 | } | |
3676 | ||
4e19b5c1 KP |
3677 | /* get speed, or derive from PHY interface */ |
3678 | if (max_speed == 0) | |
728de4c9 KP |
3679 | switch (phy_interface) { |
3680 | case PHY_INTERFACE_MODE_GMII: | |
3681 | case PHY_INTERFACE_MODE_RGMII: | |
3682 | case PHY_INTERFACE_MODE_RGMII_ID: | |
bd0ceaab KP |
3683 | case PHY_INTERFACE_MODE_RGMII_RXID: |
3684 | case PHY_INTERFACE_MODE_RGMII_TXID: | |
728de4c9 KP |
3685 | case PHY_INTERFACE_MODE_TBI: |
3686 | case PHY_INTERFACE_MODE_RTBI: | |
3687 | max_speed = SPEED_1000; | |
3688 | break; | |
3689 | default: | |
3690 | max_speed = SPEED_100; | |
3691 | break; | |
3692 | } | |
728de4c9 KP |
3693 | |
3694 | if (max_speed == SPEED_1000) { | |
4e19b5c1 | 3695 | /* configure muram FIFOs for gigabit operation */ |
728de4c9 KP |
3696 | ug_info->uf_info.urfs = UCC_GETH_URFS_GIGA_INIT; |
3697 | ug_info->uf_info.urfet = UCC_GETH_URFET_GIGA_INIT; | |
3698 | ug_info->uf_info.urfset = UCC_GETH_URFSET_GIGA_INIT; | |
3699 | ug_info->uf_info.utfs = UCC_GETH_UTFS_GIGA_INIT; | |
3700 | ug_info->uf_info.utfet = UCC_GETH_UTFET_GIGA_INIT; | |
3701 | ug_info->uf_info.utftt = UCC_GETH_UTFTT_GIGA_INIT; | |
ffea31ed JT |
3702 | ug_info->numThreadsTx = UCC_GETH_NUM_OF_THREADS_4; |
3703 | ug_info->numThreadsRx = UCC_GETH_NUM_OF_THREADS_4; | |
728de4c9 KP |
3704 | } |
3705 | ||
890de95e LY |
3706 | if (netif_msg_probe(&debug)) |
3707 | printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", | |
3708 | ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, | |
3709 | ug_info->uf_info.irq); | |
ce973b14 | 3710 | |
ce973b14 LY |
3711 | /* Create an ethernet device instance */ |
3712 | dev = alloc_etherdev(sizeof(*ugeth)); | |
3713 | ||
3714 | if (dev == NULL) | |
3715 | return -ENOMEM; | |
3716 | ||
3717 | ugeth = netdev_priv(dev); | |
3718 | spin_lock_init(&ugeth->lock); | |
3719 | ||
80a9fad8 AV |
3720 | /* Create CQs for hash tables */ |
3721 | INIT_LIST_HEAD(&ugeth->group_hash_q); | |
3722 | INIT_LIST_HEAD(&ugeth->ind_hash_q); | |
3723 | ||
ce973b14 LY |
3724 | dev_set_drvdata(device, dev); |
3725 | ||
3726 | /* Set the dev->base_addr to the gfar reg region */ | |
3727 | dev->base_addr = (unsigned long)(ug_info->uf_info.regs); | |
3728 | ||
ce973b14 LY |
3729 | SET_NETDEV_DEV(dev, device); |
3730 | ||
3731 | /* Fill in the dev structure */ | |
ac421852 | 3732 | uec_set_ethtool_ops(dev); |
ce973b14 LY |
3733 | dev->open = ucc_geth_open; |
3734 | dev->hard_start_xmit = ucc_geth_start_xmit; | |
3735 | dev->tx_timeout = ucc_geth_timeout; | |
3736 | dev->watchdog_timeo = TX_TIMEOUT; | |
1762a29a | 3737 | INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work); |
bea3348e | 3738 | netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, UCC_GETH_DEV_WEIGHT); |
26d29ea7 AV |
3739 | #ifdef CONFIG_NET_POLL_CONTROLLER |
3740 | dev->poll_controller = ucc_netpoll; | |
3741 | #endif | |
ce973b14 | 3742 | dev->stop = ucc_geth_close; |
ce973b14 LY |
3743 | // dev->change_mtu = ucc_geth_change_mtu; |
3744 | dev->mtu = 1500; | |
3745 | dev->set_multicast_list = ucc_geth_set_multi; | |
ce973b14 | 3746 | |
890de95e | 3747 | ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT); |
728de4c9 KP |
3748 | ugeth->phy_interface = phy_interface; |
3749 | ugeth->max_speed = max_speed; | |
3750 | ||
ce973b14 LY |
3751 | err = register_netdev(dev); |
3752 | if (err) { | |
890de95e LY |
3753 | if (netif_msg_probe(ugeth)) |
3754 | ugeth_err("%s: Cannot register net device, aborting.", | |
3755 | dev->name); | |
ce973b14 LY |
3756 | free_netdev(dev); |
3757 | return err; | |
3758 | } | |
3759 | ||
e9eb70c9 | 3760 | mac_addr = of_get_mac_address(np); |
9b4c7a4e LY |
3761 | if (mac_addr) |
3762 | memcpy(dev->dev_addr, mac_addr, 6); | |
ce973b14 | 3763 | |
728de4c9 KP |
3764 | ugeth->ug_info = ug_info; |
3765 | ugeth->dev = dev; | |
b1c4a9dd | 3766 | ugeth->node = np; |
728de4c9 | 3767 | |
ce973b14 LY |
3768 | return 0; |
3769 | } | |
3770 | ||
18a8e864 | 3771 | static int ucc_geth_remove(struct of_device* ofdev) |
ce973b14 | 3772 | { |
18a8e864 | 3773 | struct device *device = &ofdev->dev; |
ce973b14 LY |
3774 | struct net_device *dev = dev_get_drvdata(device); |
3775 | struct ucc_geth_private *ugeth = netdev_priv(dev); | |
3776 | ||
80a9fad8 | 3777 | unregister_netdev(dev); |
ce973b14 | 3778 | free_netdev(dev); |
80a9fad8 AV |
3779 | ucc_geth_memclean(ugeth); |
3780 | dev_set_drvdata(device, NULL); | |
ce973b14 LY |
3781 | |
3782 | return 0; | |
3783 | } | |
3784 | ||
18a8e864 LY |
3785 | static struct of_device_id ucc_geth_match[] = { |
3786 | { | |
3787 | .type = "network", | |
3788 | .compatible = "ucc_geth", | |
3789 | }, | |
3790 | {}, | |
3791 | }; | |
3792 | ||
3793 | MODULE_DEVICE_TABLE(of, ucc_geth_match); | |
3794 | ||
3795 | static struct of_platform_driver ucc_geth_driver = { | |
3796 | .name = DRV_NAME, | |
3797 | .match_table = ucc_geth_match, | |
3798 | .probe = ucc_geth_probe, | |
3799 | .remove = ucc_geth_remove, | |
ce973b14 LY |
3800 | }; |
3801 | ||
3802 | static int __init ucc_geth_init(void) | |
3803 | { | |
728de4c9 KP |
3804 | int i, ret; |
3805 | ||
3806 | ret = uec_mdio_init(); | |
3807 | ||
3808 | if (ret) | |
3809 | return ret; | |
18a8e864 | 3810 | |
890de95e LY |
3811 | if (netif_msg_drv(&debug)) |
3812 | printk(KERN_INFO "ucc_geth: " DRV_DESC "\n"); | |
ce973b14 LY |
3813 | for (i = 0; i < 8; i++) |
3814 | memcpy(&(ugeth_info[i]), &ugeth_primary_info, | |
3815 | sizeof(ugeth_primary_info)); | |
3816 | ||
728de4c9 KP |
3817 | ret = of_register_platform_driver(&ucc_geth_driver); |
3818 | ||
3819 | if (ret) | |
3820 | uec_mdio_exit(); | |
3821 | ||
3822 | return ret; | |
ce973b14 LY |
3823 | } |
3824 | ||
3825 | static void __exit ucc_geth_exit(void) | |
3826 | { | |
a4f0c2ca | 3827 | of_unregister_platform_driver(&ucc_geth_driver); |
728de4c9 | 3828 | uec_mdio_exit(); |
ce973b14 LY |
3829 | } |
3830 | ||
3831 | module_init(ucc_geth_init); | |
3832 | module_exit(ucc_geth_exit); | |
3833 | ||
3834 | MODULE_AUTHOR("Freescale Semiconductor, Inc"); | |
3835 | MODULE_DESCRIPTION(DRV_DESC); | |
c2bcf00b | 3836 | MODULE_VERSION(DRV_VERSION); |
ce973b14 | 3837 | MODULE_LICENSE("GPL"); |