]>
Commit | Line | Data |
---|---|---|
d4c41139 KG |
1 | /* |
2 | * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC. | |
3 | * | |
0f73f2c5 | 4 | * 2005-2010 (c) Aeroflex Gaisler AB |
d4c41139 KG |
5 | * |
6 | * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs | |
7 | * available in the GRLIB VHDL IP core library. | |
8 | * | |
9 | * Full documentation of both cores can be found here: | |
10 | * http://www.gaisler.com/products/grlib/grip.pdf | |
11 | * | |
12 | * The Gigabit version supports scatter/gather DMA, any alignment of | |
13 | * buffers and checksum offloading. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License as published by the | |
17 | * Free Software Foundation; either version 2 of the License, or (at your | |
18 | * option) any later version. | |
19 | * | |
20 | * Contributors: Kristoffer Glembo | |
21 | * Daniel Hellstrom | |
22 | * Marko Isomaki | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/uaccess.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/netdevice.h> | |
29 | #include <linux/etherdevice.h> | |
30 | #include <linux/ethtool.h> | |
31 | #include <linux/skbuff.h> | |
32 | #include <linux/io.h> | |
33 | #include <linux/crc32.h> | |
34 | #include <linux/mii.h> | |
35 | #include <linux/of_device.h> | |
36 | #include <linux/of_platform.h> | |
5a0e3ad6 | 37 | #include <linux/slab.h> |
d4c41139 KG |
38 | #include <asm/cacheflush.h> |
39 | #include <asm/byteorder.h> | |
40 | ||
41 | #ifdef CONFIG_SPARC | |
42 | #include <asm/idprom.h> | |
43 | #endif | |
44 | ||
45 | #include "greth.h" | |
46 | ||
47 | #define GRETH_DEF_MSG_ENABLE \ | |
48 | (NETIF_MSG_DRV | \ | |
49 | NETIF_MSG_PROBE | \ | |
50 | NETIF_MSG_LINK | \ | |
51 | NETIF_MSG_IFDOWN | \ | |
52 | NETIF_MSG_IFUP | \ | |
53 | NETIF_MSG_RX_ERR | \ | |
54 | NETIF_MSG_TX_ERR) | |
55 | ||
56 | static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */ | |
57 | module_param(greth_debug, int, 0); | |
58 | MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value"); | |
59 | ||
60 | /* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */ | |
61 | static int macaddr[6]; | |
62 | module_param_array(macaddr, int, NULL, 0); | |
63 | MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address"); | |
64 | ||
65 | static int greth_edcl = 1; | |
66 | module_param(greth_edcl, int, 0); | |
67 | MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used."); | |
68 | ||
69 | static int greth_open(struct net_device *dev); | |
41a655ba | 70 | static netdev_tx_t greth_start_xmit(struct sk_buff *skb, |
71 | struct net_device *dev); | |
72 | static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb, | |
73 | struct net_device *dev); | |
d4c41139 KG |
74 | static int greth_rx(struct net_device *dev, int limit); |
75 | static int greth_rx_gbit(struct net_device *dev, int limit); | |
76 | static void greth_clean_tx(struct net_device *dev); | |
77 | static void greth_clean_tx_gbit(struct net_device *dev); | |
78 | static irqreturn_t greth_interrupt(int irq, void *dev_id); | |
79 | static int greth_close(struct net_device *dev); | |
80 | static int greth_set_mac_add(struct net_device *dev, void *p); | |
81 | static void greth_set_multicast_list(struct net_device *dev); | |
82 | ||
83 | #define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) | |
84 | #define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) | |
85 | #define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v)))) | |
86 | #define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v)))) | |
87 | ||
88 | #define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK) | |
89 | #define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK) | |
90 | #define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK) | |
91 | ||
92 | static void greth_print_rx_packet(void *addr, int len) | |
93 | { | |
94 | print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1, | |
95 | addr, len, true); | |
96 | } | |
97 | ||
98 | static void greth_print_tx_packet(struct sk_buff *skb) | |
99 | { | |
100 | int i; | |
101 | int length; | |
102 | ||
103 | if (skb_shinfo(skb)->nr_frags == 0) | |
104 | length = skb->len; | |
105 | else | |
106 | length = skb_headlen(skb); | |
107 | ||
108 | print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, | |
109 | skb->data, length, true); | |
110 | ||
111 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | |
112 | ||
113 | print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, | |
114 | phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) + | |
115 | skb_shinfo(skb)->frags[i].page_offset, | |
116 | length, true); | |
117 | } | |
118 | } | |
119 | ||
120 | static inline void greth_enable_tx(struct greth_private *greth) | |
121 | { | |
122 | wmb(); | |
123 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN); | |
124 | } | |
125 | ||
126 | static inline void greth_disable_tx(struct greth_private *greth) | |
127 | { | |
128 | GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); | |
129 | } | |
130 | ||
131 | static inline void greth_enable_rx(struct greth_private *greth) | |
132 | { | |
133 | wmb(); | |
134 | GRETH_REGORIN(greth->regs->control, GRETH_RXEN); | |
135 | } | |
136 | ||
137 | static inline void greth_disable_rx(struct greth_private *greth) | |
138 | { | |
139 | GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN); | |
140 | } | |
141 | ||
142 | static inline void greth_enable_irqs(struct greth_private *greth) | |
143 | { | |
144 | GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI); | |
145 | } | |
146 | ||
147 | static inline void greth_disable_irqs(struct greth_private *greth) | |
148 | { | |
149 | GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI)); | |
150 | } | |
151 | ||
152 | static inline void greth_write_bd(u32 *bd, u32 val) | |
153 | { | |
154 | __raw_writel(cpu_to_be32(val), bd); | |
155 | } | |
156 | ||
157 | static inline u32 greth_read_bd(u32 *bd) | |
158 | { | |
159 | return be32_to_cpu(__raw_readl(bd)); | |
160 | } | |
161 | ||
162 | static void greth_clean_rings(struct greth_private *greth) | |
163 | { | |
164 | int i; | |
165 | struct greth_bd *rx_bdp = greth->rx_bd_base; | |
166 | struct greth_bd *tx_bdp = greth->tx_bd_base; | |
167 | ||
168 | if (greth->gbit_mac) { | |
169 | ||
170 | /* Free and unmap RX buffers */ | |
171 | for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) { | |
172 | if (greth->rx_skbuff[i] != NULL) { | |
173 | dev_kfree_skb(greth->rx_skbuff[i]); | |
174 | dma_unmap_single(greth->dev, | |
175 | greth_read_bd(&rx_bdp->addr), | |
176 | MAX_FRAME_SIZE+NET_IP_ALIGN, | |
177 | DMA_FROM_DEVICE); | |
178 | } | |
179 | } | |
180 | ||
181 | /* TX buffers */ | |
182 | while (greth->tx_free < GRETH_TXBD_NUM) { | |
183 | ||
184 | struct sk_buff *skb = greth->tx_skbuff[greth->tx_last]; | |
185 | int nr_frags = skb_shinfo(skb)->nr_frags; | |
186 | tx_bdp = greth->tx_bd_base + greth->tx_last; | |
187 | greth->tx_last = NEXT_TX(greth->tx_last); | |
188 | ||
189 | dma_unmap_single(greth->dev, | |
190 | greth_read_bd(&tx_bdp->addr), | |
191 | skb_headlen(skb), | |
192 | DMA_TO_DEVICE); | |
193 | ||
194 | for (i = 0; i < nr_frags; i++) { | |
195 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
196 | tx_bdp = greth->tx_bd_base + greth->tx_last; | |
197 | ||
198 | dma_unmap_page(greth->dev, | |
199 | greth_read_bd(&tx_bdp->addr), | |
200 | frag->size, | |
201 | DMA_TO_DEVICE); | |
202 | ||
203 | greth->tx_last = NEXT_TX(greth->tx_last); | |
204 | } | |
205 | greth->tx_free += nr_frags+1; | |
206 | dev_kfree_skb(skb); | |
207 | } | |
208 | ||
209 | ||
210 | } else { /* 10/100 Mbps MAC */ | |
211 | ||
212 | for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) { | |
213 | kfree(greth->rx_bufs[i]); | |
214 | dma_unmap_single(greth->dev, | |
215 | greth_read_bd(&rx_bdp->addr), | |
216 | MAX_FRAME_SIZE, | |
217 | DMA_FROM_DEVICE); | |
218 | } | |
219 | for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) { | |
220 | kfree(greth->tx_bufs[i]); | |
221 | dma_unmap_single(greth->dev, | |
222 | greth_read_bd(&tx_bdp->addr), | |
223 | MAX_FRAME_SIZE, | |
224 | DMA_TO_DEVICE); | |
225 | } | |
226 | } | |
227 | } | |
228 | ||
229 | static int greth_init_rings(struct greth_private *greth) | |
230 | { | |
231 | struct sk_buff *skb; | |
232 | struct greth_bd *rx_bd, *tx_bd; | |
233 | u32 dma_addr; | |
234 | int i; | |
235 | ||
236 | rx_bd = greth->rx_bd_base; | |
237 | tx_bd = greth->tx_bd_base; | |
238 | ||
239 | /* Initialize descriptor rings and buffers */ | |
240 | if (greth->gbit_mac) { | |
241 | ||
242 | for (i = 0; i < GRETH_RXBD_NUM; i++) { | |
243 | skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN); | |
244 | if (skb == NULL) { | |
245 | if (netif_msg_ifup(greth)) | |
246 | dev_err(greth->dev, "Error allocating DMA ring.\n"); | |
247 | goto cleanup; | |
248 | } | |
249 | skb_reserve(skb, NET_IP_ALIGN); | |
250 | dma_addr = dma_map_single(greth->dev, | |
251 | skb->data, | |
252 | MAX_FRAME_SIZE+NET_IP_ALIGN, | |
253 | DMA_FROM_DEVICE); | |
254 | ||
255 | if (dma_mapping_error(greth->dev, dma_addr)) { | |
256 | if (netif_msg_ifup(greth)) | |
257 | dev_err(greth->dev, "Could not create initial DMA mapping\n"); | |
258 | goto cleanup; | |
259 | } | |
260 | greth->rx_skbuff[i] = skb; | |
261 | greth_write_bd(&rx_bd[i].addr, dma_addr); | |
262 | greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE); | |
263 | } | |
264 | ||
265 | } else { | |
266 | ||
267 | /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */ | |
268 | for (i = 0; i < GRETH_RXBD_NUM; i++) { | |
269 | ||
270 | greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL); | |
271 | ||
272 | if (greth->rx_bufs[i] == NULL) { | |
273 | if (netif_msg_ifup(greth)) | |
274 | dev_err(greth->dev, "Error allocating DMA ring.\n"); | |
275 | goto cleanup; | |
276 | } | |
277 | ||
278 | dma_addr = dma_map_single(greth->dev, | |
279 | greth->rx_bufs[i], | |
280 | MAX_FRAME_SIZE, | |
281 | DMA_FROM_DEVICE); | |
282 | ||
283 | if (dma_mapping_error(greth->dev, dma_addr)) { | |
284 | if (netif_msg_ifup(greth)) | |
285 | dev_err(greth->dev, "Could not create initial DMA mapping\n"); | |
286 | goto cleanup; | |
287 | } | |
288 | greth_write_bd(&rx_bd[i].addr, dma_addr); | |
289 | greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE); | |
290 | } | |
291 | for (i = 0; i < GRETH_TXBD_NUM; i++) { | |
292 | ||
293 | greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL); | |
294 | ||
295 | if (greth->tx_bufs[i] == NULL) { | |
296 | if (netif_msg_ifup(greth)) | |
297 | dev_err(greth->dev, "Error allocating DMA ring.\n"); | |
298 | goto cleanup; | |
299 | } | |
300 | ||
301 | dma_addr = dma_map_single(greth->dev, | |
302 | greth->tx_bufs[i], | |
303 | MAX_FRAME_SIZE, | |
304 | DMA_TO_DEVICE); | |
305 | ||
306 | if (dma_mapping_error(greth->dev, dma_addr)) { | |
307 | if (netif_msg_ifup(greth)) | |
308 | dev_err(greth->dev, "Could not create initial DMA mapping\n"); | |
309 | goto cleanup; | |
310 | } | |
311 | greth_write_bd(&tx_bd[i].addr, dma_addr); | |
312 | greth_write_bd(&tx_bd[i].stat, 0); | |
313 | } | |
314 | } | |
315 | greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat, | |
316 | greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR); | |
317 | ||
318 | /* Initialize pointers. */ | |
319 | greth->rx_cur = 0; | |
320 | greth->tx_next = 0; | |
321 | greth->tx_last = 0; | |
322 | greth->tx_free = GRETH_TXBD_NUM; | |
323 | ||
324 | /* Initialize descriptor base address */ | |
325 | GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys); | |
326 | GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys); | |
327 | ||
328 | return 0; | |
329 | ||
330 | cleanup: | |
331 | greth_clean_rings(greth); | |
332 | return -ENOMEM; | |
333 | } | |
334 | ||
335 | static int greth_open(struct net_device *dev) | |
336 | { | |
337 | struct greth_private *greth = netdev_priv(dev); | |
338 | int err; | |
339 | ||
340 | err = greth_init_rings(greth); | |
341 | if (err) { | |
342 | if (netif_msg_ifup(greth)) | |
343 | dev_err(&dev->dev, "Could not allocate memory for DMA rings\n"); | |
344 | return err; | |
345 | } | |
346 | ||
347 | err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev); | |
348 | if (err) { | |
349 | if (netif_msg_ifup(greth)) | |
350 | dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq); | |
351 | greth_clean_rings(greth); | |
352 | return err; | |
353 | } | |
354 | ||
355 | if (netif_msg_ifup(greth)) | |
356 | dev_dbg(&dev->dev, " starting queue\n"); | |
357 | netif_start_queue(dev); | |
358 | ||
bbe9e637 DH |
359 | GRETH_REGSAVE(greth->regs->status, 0xFF); |
360 | ||
d4c41139 KG |
361 | napi_enable(&greth->napi); |
362 | ||
363 | greth_enable_irqs(greth); | |
364 | greth_enable_tx(greth); | |
365 | greth_enable_rx(greth); | |
366 | return 0; | |
367 | ||
368 | } | |
369 | ||
370 | static int greth_close(struct net_device *dev) | |
371 | { | |
372 | struct greth_private *greth = netdev_priv(dev); | |
373 | ||
374 | napi_disable(&greth->napi); | |
375 | ||
bbe9e637 | 376 | greth_disable_irqs(greth); |
d4c41139 | 377 | greth_disable_tx(greth); |
bbe9e637 | 378 | greth_disable_rx(greth); |
d4c41139 KG |
379 | |
380 | netif_stop_queue(dev); | |
381 | ||
382 | free_irq(greth->irq, (void *) dev); | |
383 | ||
384 | greth_clean_rings(greth); | |
385 | ||
386 | return 0; | |
387 | } | |
388 | ||
41a655ba | 389 | static netdev_tx_t |
390 | greth_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
d4c41139 KG |
391 | { |
392 | struct greth_private *greth = netdev_priv(dev); | |
393 | struct greth_bd *bdp; | |
394 | int err = NETDEV_TX_OK; | |
0f73f2c5 DH |
395 | u32 status, dma_addr, ctrl; |
396 | unsigned long flags; | |
d4c41139 | 397 | |
0f73f2c5 DH |
398 | /* Clean TX Ring */ |
399 | greth_clean_tx(greth->netdev); | |
d4c41139 KG |
400 | |
401 | if (unlikely(greth->tx_free <= 0)) { | |
0f73f2c5 DH |
402 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ |
403 | ctrl = GRETH_REGLOAD(greth->regs->control); | |
404 | /* Enable TX IRQ only if not already in poll() routine */ | |
405 | if (ctrl & GRETH_RXI) | |
406 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | |
d4c41139 | 407 | netif_stop_queue(dev); |
0f73f2c5 | 408 | spin_unlock_irqrestore(&greth->devlock, flags); |
d4c41139 KG |
409 | return NETDEV_TX_BUSY; |
410 | } | |
411 | ||
412 | if (netif_msg_pktdata(greth)) | |
413 | greth_print_tx_packet(skb); | |
414 | ||
415 | ||
416 | if (unlikely(skb->len > MAX_FRAME_SIZE)) { | |
417 | dev->stats.tx_errors++; | |
418 | goto out; | |
419 | } | |
420 | ||
0f73f2c5 | 421 | bdp = greth->tx_bd_base + greth->tx_next; |
d4c41139 KG |
422 | dma_addr = greth_read_bd(&bdp->addr); |
423 | ||
424 | memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len); | |
425 | ||
426 | dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE); | |
427 | ||
0f73f2c5 | 428 | status = GRETH_BD_EN | GRETH_BD_IE | (skb->len & GRETH_BD_LEN); |
d4c41139 KG |
429 | |
430 | /* Wrap around descriptor ring */ | |
431 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) { | |
432 | status |= GRETH_BD_WR; | |
433 | } | |
434 | ||
435 | greth->tx_next = NEXT_TX(greth->tx_next); | |
436 | greth->tx_free--; | |
437 | ||
d4c41139 KG |
438 | /* Write descriptor control word and enable transmission */ |
439 | greth_write_bd(&bdp->stat, status); | |
0f73f2c5 | 440 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ |
d4c41139 | 441 | greth_enable_tx(greth); |
0f73f2c5 | 442 | spin_unlock_irqrestore(&greth->devlock, flags); |
d4c41139 KG |
443 | |
444 | out: | |
445 | dev_kfree_skb(skb); | |
446 | return err; | |
447 | } | |
448 | ||
449 | ||
41a655ba | 450 | static netdev_tx_t |
451 | greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |
d4c41139 KG |
452 | { |
453 | struct greth_private *greth = netdev_priv(dev); | |
454 | struct greth_bd *bdp; | |
0f73f2c5 | 455 | u32 status = 0, dma_addr, ctrl; |
d4c41139 | 456 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; |
0f73f2c5 | 457 | unsigned long flags; |
d4c41139 KG |
458 | |
459 | nr_frags = skb_shinfo(skb)->nr_frags; | |
460 | ||
0f73f2c5 DH |
461 | /* Clean TX Ring */ |
462 | greth_clean_tx_gbit(dev); | |
463 | ||
d4c41139 | 464 | if (greth->tx_free < nr_frags + 1) { |
0f73f2c5 DH |
465 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ |
466 | ctrl = GRETH_REGLOAD(greth->regs->control); | |
467 | /* Enable TX IRQ only if not already in poll() routine */ | |
468 | if (ctrl & GRETH_RXI) | |
469 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | |
d4c41139 | 470 | netif_stop_queue(dev); |
0f73f2c5 | 471 | spin_unlock_irqrestore(&greth->devlock, flags); |
d4c41139 KG |
472 | err = NETDEV_TX_BUSY; |
473 | goto out; | |
474 | } | |
475 | ||
476 | if (netif_msg_pktdata(greth)) | |
477 | greth_print_tx_packet(skb); | |
478 | ||
479 | if (unlikely(skb->len > MAX_FRAME_SIZE)) { | |
480 | dev->stats.tx_errors++; | |
481 | goto out; | |
482 | } | |
483 | ||
484 | /* Save skb pointer. */ | |
485 | greth->tx_skbuff[greth->tx_next] = skb; | |
486 | ||
487 | /* Linear buf */ | |
488 | if (nr_frags != 0) | |
489 | status = GRETH_TXBD_MORE; | |
490 | ||
491 | status |= GRETH_TXBD_CSALL; | |
492 | status |= skb_headlen(skb) & GRETH_BD_LEN; | |
493 | if (greth->tx_next == GRETH_TXBD_NUM_MASK) | |
494 | status |= GRETH_BD_WR; | |
495 | ||
496 | ||
497 | bdp = greth->tx_bd_base + greth->tx_next; | |
498 | greth_write_bd(&bdp->stat, status); | |
499 | dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); | |
500 | ||
501 | if (unlikely(dma_mapping_error(greth->dev, dma_addr))) | |
502 | goto map_error; | |
503 | ||
504 | greth_write_bd(&bdp->addr, dma_addr); | |
505 | ||
506 | curr_tx = NEXT_TX(greth->tx_next); | |
507 | ||
508 | /* Frags */ | |
509 | for (i = 0; i < nr_frags; i++) { | |
510 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
511 | greth->tx_skbuff[curr_tx] = NULL; | |
512 | bdp = greth->tx_bd_base + curr_tx; | |
513 | ||
2a2bc012 | 514 | status = GRETH_TXBD_CSALL | GRETH_BD_EN; |
d4c41139 KG |
515 | status |= frag->size & GRETH_BD_LEN; |
516 | ||
517 | /* Wrap around descriptor ring */ | |
518 | if (curr_tx == GRETH_TXBD_NUM_MASK) | |
519 | status |= GRETH_BD_WR; | |
520 | ||
521 | /* More fragments left */ | |
522 | if (i < nr_frags - 1) | |
523 | status |= GRETH_TXBD_MORE; | |
0f73f2c5 DH |
524 | else |
525 | status |= GRETH_BD_IE; /* enable IRQ on last fragment */ | |
d4c41139 KG |
526 | |
527 | greth_write_bd(&bdp->stat, status); | |
528 | ||
529 | dma_addr = dma_map_page(greth->dev, | |
530 | frag->page, | |
531 | frag->page_offset, | |
532 | frag->size, | |
533 | DMA_TO_DEVICE); | |
534 | ||
535 | if (unlikely(dma_mapping_error(greth->dev, dma_addr))) | |
536 | goto frag_map_error; | |
537 | ||
538 | greth_write_bd(&bdp->addr, dma_addr); | |
539 | ||
540 | curr_tx = NEXT_TX(curr_tx); | |
541 | } | |
542 | ||
543 | wmb(); | |
544 | ||
2a2bc012 DH |
545 | /* Enable the descriptor chain by enabling the first descriptor */ |
546 | bdp = greth->tx_bd_base + greth->tx_next; | |
547 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); | |
548 | greth->tx_next = curr_tx; | |
549 | greth->tx_free -= nr_frags + 1; | |
550 | ||
551 | wmb(); | |
d4c41139 | 552 | |
0f73f2c5 | 553 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ |
d4c41139 | 554 | greth_enable_tx(greth); |
0f73f2c5 | 555 | spin_unlock_irqrestore(&greth->devlock, flags); |
d4c41139 KG |
556 | |
557 | return NETDEV_TX_OK; | |
558 | ||
559 | frag_map_error: | |
2a2bc012 | 560 | /* Unmap SKB mappings that succeeded and disable descriptor */ |
d4c41139 KG |
561 | for (i = 0; greth->tx_next + i != curr_tx; i++) { |
562 | bdp = greth->tx_bd_base + greth->tx_next + i; | |
563 | dma_unmap_single(greth->dev, | |
564 | greth_read_bd(&bdp->addr), | |
565 | greth_read_bd(&bdp->stat) & GRETH_BD_LEN, | |
566 | DMA_TO_DEVICE); | |
2a2bc012 | 567 | greth_write_bd(&bdp->stat, 0); |
d4c41139 KG |
568 | } |
569 | map_error: | |
570 | if (net_ratelimit()) | |
571 | dev_warn(greth->dev, "Could not create TX DMA mapping\n"); | |
572 | dev_kfree_skb(skb); | |
d4c41139 KG |
573 | out: |
574 | return err; | |
575 | } | |
576 | ||
d4c41139 KG |
577 | static irqreturn_t greth_interrupt(int irq, void *dev_id) |
578 | { | |
579 | struct net_device *dev = dev_id; | |
580 | struct greth_private *greth; | |
0f73f2c5 | 581 | u32 status, ctrl; |
d4c41139 KG |
582 | irqreturn_t retval = IRQ_NONE; |
583 | ||
584 | greth = netdev_priv(dev); | |
585 | ||
586 | spin_lock(&greth->devlock); | |
587 | ||
588 | /* Get the interrupt events that caused us to be here. */ | |
589 | status = GRETH_REGLOAD(greth->regs->status); | |
590 | ||
0f73f2c5 DH |
591 | /* Must see if interrupts are enabled also, INT_TX|INT_RX flags may be |
592 | * set regardless of whether IRQ is enabled or not. Especially | |
593 | * important when shared IRQ. | |
594 | */ | |
595 | ctrl = GRETH_REGLOAD(greth->regs->control); | |
d4c41139 | 596 | |
0f73f2c5 DH |
597 | /* Handle rx and tx interrupts through poll */ |
598 | if (((status & (GRETH_INT_RE | GRETH_INT_RX)) && (ctrl & GRETH_RXI)) || | |
599 | ((status & (GRETH_INT_TE | GRETH_INT_TX)) && (ctrl & GRETH_TXI))) { | |
d4c41139 KG |
600 | retval = IRQ_HANDLED; |
601 | ||
602 | /* Disable interrupts and schedule poll() */ | |
603 | greth_disable_irqs(greth); | |
604 | napi_schedule(&greth->napi); | |
605 | } | |
606 | ||
607 | mmiowb(); | |
608 | spin_unlock(&greth->devlock); | |
609 | ||
610 | return retval; | |
611 | } | |
612 | ||
613 | static void greth_clean_tx(struct net_device *dev) | |
614 | { | |
615 | struct greth_private *greth; | |
616 | struct greth_bd *bdp; | |
617 | u32 stat; | |
618 | ||
619 | greth = netdev_priv(dev); | |
620 | ||
621 | while (1) { | |
622 | bdp = greth->tx_bd_base + greth->tx_last; | |
0f73f2c5 DH |
623 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); |
624 | mb(); | |
d4c41139 KG |
625 | stat = greth_read_bd(&bdp->stat); |
626 | ||
627 | if (unlikely(stat & GRETH_BD_EN)) | |
628 | break; | |
629 | ||
630 | if (greth->tx_free == GRETH_TXBD_NUM) | |
631 | break; | |
632 | ||
633 | /* Check status for errors */ | |
634 | if (unlikely(stat & GRETH_TXBD_STATUS)) { | |
635 | dev->stats.tx_errors++; | |
636 | if (stat & GRETH_TXBD_ERR_AL) | |
637 | dev->stats.tx_aborted_errors++; | |
638 | if (stat & GRETH_TXBD_ERR_UE) | |
639 | dev->stats.tx_fifo_errors++; | |
640 | } | |
641 | dev->stats.tx_packets++; | |
642 | greth->tx_last = NEXT_TX(greth->tx_last); | |
643 | greth->tx_free++; | |
644 | } | |
645 | ||
646 | if (greth->tx_free > 0) { | |
647 | netif_wake_queue(dev); | |
648 | } | |
649 | ||
650 | } | |
651 | ||
652 | static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) | |
653 | { | |
654 | /* Check status for errors */ | |
655 | if (unlikely(stat & GRETH_TXBD_STATUS)) { | |
656 | dev->stats.tx_errors++; | |
657 | if (stat & GRETH_TXBD_ERR_AL) | |
658 | dev->stats.tx_aborted_errors++; | |
659 | if (stat & GRETH_TXBD_ERR_UE) | |
660 | dev->stats.tx_fifo_errors++; | |
661 | if (stat & GRETH_TXBD_ERR_LC) | |
662 | dev->stats.tx_aborted_errors++; | |
663 | } | |
664 | dev->stats.tx_packets++; | |
665 | } | |
666 | ||
667 | static void greth_clean_tx_gbit(struct net_device *dev) | |
668 | { | |
669 | struct greth_private *greth; | |
670 | struct greth_bd *bdp, *bdp_last_frag; | |
671 | struct sk_buff *skb; | |
672 | u32 stat; | |
673 | int nr_frags, i; | |
674 | ||
675 | greth = netdev_priv(dev); | |
676 | ||
677 | while (greth->tx_free < GRETH_TXBD_NUM) { | |
678 | ||
679 | skb = greth->tx_skbuff[greth->tx_last]; | |
680 | ||
681 | nr_frags = skb_shinfo(skb)->nr_frags; | |
682 | ||
683 | /* We only clean fully completed SKBs */ | |
684 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); | |
0f73f2c5 DH |
685 | |
686 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); | |
687 | mb(); | |
688 | stat = greth_read_bd(&bdp_last_frag->stat); | |
d4c41139 KG |
689 | |
690 | if (stat & GRETH_BD_EN) | |
691 | break; | |
692 | ||
693 | greth->tx_skbuff[greth->tx_last] = NULL; | |
694 | ||
695 | greth_update_tx_stats(dev, stat); | |
696 | ||
697 | bdp = greth->tx_bd_base + greth->tx_last; | |
698 | ||
699 | greth->tx_last = NEXT_TX(greth->tx_last); | |
700 | ||
701 | dma_unmap_single(greth->dev, | |
702 | greth_read_bd(&bdp->addr), | |
703 | skb_headlen(skb), | |
704 | DMA_TO_DEVICE); | |
705 | ||
706 | for (i = 0; i < nr_frags; i++) { | |
707 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | |
708 | bdp = greth->tx_bd_base + greth->tx_last; | |
709 | ||
710 | dma_unmap_page(greth->dev, | |
711 | greth_read_bd(&bdp->addr), | |
712 | frag->size, | |
713 | DMA_TO_DEVICE); | |
714 | ||
715 | greth->tx_last = NEXT_TX(greth->tx_last); | |
716 | } | |
717 | greth->tx_free += nr_frags+1; | |
718 | dev_kfree_skb(skb); | |
719 | } | |
d4c41139 | 720 | |
0f73f2c5 DH |
721 | if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) |
722 | netif_wake_queue(dev); | |
d4c41139 KG |
723 | } |
724 | ||
725 | static int greth_rx(struct net_device *dev, int limit) | |
726 | { | |
727 | struct greth_private *greth; | |
728 | struct greth_bd *bdp; | |
729 | struct sk_buff *skb; | |
730 | int pkt_len; | |
731 | int bad, count; | |
732 | u32 status, dma_addr; | |
0f73f2c5 | 733 | unsigned long flags; |
d4c41139 KG |
734 | |
735 | greth = netdev_priv(dev); | |
736 | ||
737 | for (count = 0; count < limit; ++count) { | |
738 | ||
739 | bdp = greth->rx_bd_base + greth->rx_cur; | |
0f73f2c5 DH |
740 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); |
741 | mb(); | |
d4c41139 | 742 | status = greth_read_bd(&bdp->stat); |
d4c41139 KG |
743 | |
744 | if (unlikely(status & GRETH_BD_EN)) { | |
745 | break; | |
746 | } | |
747 | ||
0f73f2c5 DH |
748 | dma_addr = greth_read_bd(&bdp->addr); |
749 | bad = 0; | |
750 | ||
d4c41139 KG |
751 | /* Check status for errors. */ |
752 | if (unlikely(status & GRETH_RXBD_STATUS)) { | |
753 | if (status & GRETH_RXBD_ERR_FT) { | |
754 | dev->stats.rx_length_errors++; | |
755 | bad = 1; | |
756 | } | |
757 | if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) { | |
758 | dev->stats.rx_frame_errors++; | |
759 | bad = 1; | |
760 | } | |
761 | if (status & GRETH_RXBD_ERR_CRC) { | |
762 | dev->stats.rx_crc_errors++; | |
763 | bad = 1; | |
764 | } | |
765 | } | |
766 | if (unlikely(bad)) { | |
767 | dev->stats.rx_errors++; | |
768 | ||
769 | } else { | |
770 | ||
771 | pkt_len = status & GRETH_BD_LEN; | |
772 | ||
773 | skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN); | |
774 | ||
775 | if (unlikely(skb == NULL)) { | |
776 | ||
777 | if (net_ratelimit()) | |
778 | dev_warn(&dev->dev, "low on memory - " "packet dropped\n"); | |
779 | ||
780 | dev->stats.rx_dropped++; | |
781 | ||
782 | } else { | |
783 | skb_reserve(skb, NET_IP_ALIGN); | |
784 | skb->dev = dev; | |
785 | ||
786 | dma_sync_single_for_cpu(greth->dev, | |
787 | dma_addr, | |
788 | pkt_len, | |
789 | DMA_FROM_DEVICE); | |
790 | ||
791 | if (netif_msg_pktdata(greth)) | |
792 | greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len); | |
793 | ||
794 | memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len); | |
795 | ||
796 | skb->protocol = eth_type_trans(skb, dev); | |
797 | dev->stats.rx_packets++; | |
798 | netif_receive_skb(skb); | |
799 | } | |
800 | } | |
801 | ||
802 | status = GRETH_BD_EN | GRETH_BD_IE; | |
803 | if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { | |
804 | status |= GRETH_BD_WR; | |
805 | } | |
806 | ||
807 | wmb(); | |
808 | greth_write_bd(&bdp->stat, status); | |
809 | ||
810 | dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE); | |
811 | ||
0f73f2c5 | 812 | spin_lock_irqsave(&greth->devlock, flags); /* save from XMIT */ |
d4c41139 | 813 | greth_enable_rx(greth); |
0f73f2c5 | 814 | spin_unlock_irqrestore(&greth->devlock, flags); |
d4c41139 KG |
815 | |
816 | greth->rx_cur = NEXT_RX(greth->rx_cur); | |
817 | } | |
818 | ||
819 | return count; | |
820 | } | |
821 | ||
822 | static inline int hw_checksummed(u32 status) | |
823 | { | |
824 | ||
825 | if (status & GRETH_RXBD_IP_FRAG) | |
826 | return 0; | |
827 | ||
828 | if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR) | |
829 | return 0; | |
830 | ||
831 | if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR) | |
832 | return 0; | |
833 | ||
834 | if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR) | |
835 | return 0; | |
836 | ||
837 | return 1; | |
838 | } | |
839 | ||
840 | static int greth_rx_gbit(struct net_device *dev, int limit) | |
841 | { | |
842 | struct greth_private *greth; | |
843 | struct greth_bd *bdp; | |
844 | struct sk_buff *skb, *newskb; | |
845 | int pkt_len; | |
846 | int bad, count = 0; | |
847 | u32 status, dma_addr; | |
0f73f2c5 | 848 | unsigned long flags; |
d4c41139 KG |
849 | |
850 | greth = netdev_priv(dev); | |
851 | ||
852 | for (count = 0; count < limit; ++count) { | |
853 | ||
854 | bdp = greth->rx_bd_base + greth->rx_cur; | |
855 | skb = greth->rx_skbuff[greth->rx_cur]; | |
0f73f2c5 DH |
856 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_RE | GRETH_INT_RX); |
857 | mb(); | |
d4c41139 KG |
858 | status = greth_read_bd(&bdp->stat); |
859 | bad = 0; | |
860 | ||
861 | if (status & GRETH_BD_EN) | |
862 | break; | |
863 | ||
864 | /* Check status for errors. */ | |
865 | if (unlikely(status & GRETH_RXBD_STATUS)) { | |
866 | ||
867 | if (status & GRETH_RXBD_ERR_FT) { | |
868 | dev->stats.rx_length_errors++; | |
869 | bad = 1; | |
870 | } else if (status & | |
871 | (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) { | |
872 | dev->stats.rx_frame_errors++; | |
873 | bad = 1; | |
874 | } else if (status & GRETH_RXBD_ERR_CRC) { | |
875 | dev->stats.rx_crc_errors++; | |
876 | bad = 1; | |
877 | } | |
878 | } | |
879 | ||
b669e7f0 DH |
880 | /* Allocate new skb to replace current, not needed if the |
881 | * current skb can be reused */ | |
882 | if (!bad && (newskb=netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN))) { | |
d4c41139 KG |
883 | skb_reserve(newskb, NET_IP_ALIGN); |
884 | ||
885 | dma_addr = dma_map_single(greth->dev, | |
886 | newskb->data, | |
887 | MAX_FRAME_SIZE + NET_IP_ALIGN, | |
888 | DMA_FROM_DEVICE); | |
889 | ||
890 | if (!dma_mapping_error(greth->dev, dma_addr)) { | |
891 | /* Process the incoming frame. */ | |
892 | pkt_len = status & GRETH_BD_LEN; | |
893 | ||
894 | dma_unmap_single(greth->dev, | |
895 | greth_read_bd(&bdp->addr), | |
896 | MAX_FRAME_SIZE + NET_IP_ALIGN, | |
897 | DMA_FROM_DEVICE); | |
898 | ||
899 | if (netif_msg_pktdata(greth)) | |
900 | greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len); | |
901 | ||
902 | skb_put(skb, pkt_len); | |
903 | ||
131ae329 | 904 | if (dev->features & NETIF_F_RXCSUM && hw_checksummed(status)) |
d4c41139 KG |
905 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
906 | else | |
bc8acf2c | 907 | skb_checksum_none_assert(skb); |
d4c41139 | 908 | |
d4c41139 KG |
909 | skb->protocol = eth_type_trans(skb, dev); |
910 | dev->stats.rx_packets++; | |
911 | netif_receive_skb(skb); | |
912 | ||
913 | greth->rx_skbuff[greth->rx_cur] = newskb; | |
914 | greth_write_bd(&bdp->addr, dma_addr); | |
915 | } else { | |
916 | if (net_ratelimit()) | |
917 | dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n"); | |
918 | dev_kfree_skb(newskb); | |
b669e7f0 | 919 | /* reusing current skb, so it is a drop */ |
d4c41139 KG |
920 | dev->stats.rx_dropped++; |
921 | } | |
b669e7f0 DH |
922 | } else if (bad) { |
923 | /* Bad Frame transfer, the skb is reused */ | |
924 | dev->stats.rx_dropped++; | |
d4c41139 | 925 | } else { |
b669e7f0 DH |
926 | /* Failed Allocating a new skb. This is rather stupid |
927 | * but the current "filled" skb is reused, as if | |
928 | * transfer failure. One could argue that RX descriptor | |
929 | * table handling should be divided into cleaning and | |
930 | * filling as the TX part of the driver | |
931 | */ | |
d4c41139 KG |
932 | if (net_ratelimit()) |
933 | dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n"); | |
b669e7f0 | 934 | /* reusing current skb, so it is a drop */ |
d4c41139 KG |
935 | dev->stats.rx_dropped++; |
936 | } | |
937 | ||
938 | status = GRETH_BD_EN | GRETH_BD_IE; | |
939 | if (greth->rx_cur == GRETH_RXBD_NUM_MASK) { | |
940 | status |= GRETH_BD_WR; | |
941 | } | |
942 | ||
943 | wmb(); | |
944 | greth_write_bd(&bdp->stat, status); | |
0f73f2c5 | 945 | spin_lock_irqsave(&greth->devlock, flags); |
d4c41139 | 946 | greth_enable_rx(greth); |
0f73f2c5 | 947 | spin_unlock_irqrestore(&greth->devlock, flags); |
d4c41139 KG |
948 | greth->rx_cur = NEXT_RX(greth->rx_cur); |
949 | } | |
950 | ||
951 | return count; | |
952 | ||
953 | } | |
954 | ||
955 | static int greth_poll(struct napi_struct *napi, int budget) | |
956 | { | |
957 | struct greth_private *greth; | |
958 | int work_done = 0; | |
0f73f2c5 DH |
959 | unsigned long flags; |
960 | u32 mask, ctrl; | |
d4c41139 KG |
961 | greth = container_of(napi, struct greth_private, napi); |
962 | ||
0f73f2c5 DH |
963 | restart_txrx_poll: |
964 | if (netif_queue_stopped(greth->netdev)) { | |
965 | if (greth->gbit_mac) | |
966 | greth_clean_tx_gbit(greth->netdev); | |
967 | else | |
968 | greth_clean_tx(greth->netdev); | |
d4c41139 KG |
969 | } |
970 | ||
d4c41139 KG |
971 | if (greth->gbit_mac) { |
972 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); | |
973 | } else { | |
974 | work_done += greth_rx(greth->netdev, budget - work_done); | |
975 | } | |
976 | ||
977 | if (work_done < budget) { | |
978 | ||
0f73f2c5 | 979 | spin_lock_irqsave(&greth->devlock, flags); |
d4c41139 | 980 | |
0f73f2c5 DH |
981 | ctrl = GRETH_REGLOAD(greth->regs->control); |
982 | if (netif_queue_stopped(greth->netdev)) { | |
983 | GRETH_REGSAVE(greth->regs->control, | |
984 | ctrl | GRETH_TXI | GRETH_RXI); | |
985 | mask = GRETH_INT_RX | GRETH_INT_RE | | |
986 | GRETH_INT_TX | GRETH_INT_TE; | |
987 | } else { | |
988 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_RXI); | |
989 | mask = GRETH_INT_RX | GRETH_INT_RE; | |
990 | } | |
991 | ||
992 | if (GRETH_REGLOAD(greth->regs->status) & mask) { | |
993 | GRETH_REGSAVE(greth->regs->control, ctrl); | |
994 | spin_unlock_irqrestore(&greth->devlock, flags); | |
995 | goto restart_txrx_poll; | |
996 | } else { | |
997 | __napi_complete(napi); | |
998 | spin_unlock_irqrestore(&greth->devlock, flags); | |
d4c41139 KG |
999 | } |
1000 | } | |
1001 | ||
d4c41139 KG |
1002 | return work_done; |
1003 | } | |
1004 | ||
1005 | static int greth_set_mac_add(struct net_device *dev, void *p) | |
1006 | { | |
1007 | struct sockaddr *addr = p; | |
1008 | struct greth_private *greth; | |
1009 | struct greth_regs *regs; | |
1010 | ||
6e03718c | 1011 | greth = netdev_priv(dev); |
d4c41139 KG |
1012 | regs = (struct greth_regs *) greth->regs; |
1013 | ||
1014 | if (!is_valid_ether_addr(addr->sa_data)) | |
1015 | return -EINVAL; | |
1016 | ||
1017 | memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); | |
1018 | ||
1019 | GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]); | |
1020 | GRETH_REGSAVE(regs->esa_lsb, | |
1021 | addr->sa_data[2] << 24 | addr-> | |
1022 | sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]); | |
1023 | return 0; | |
1024 | } | |
1025 | ||
1026 | static u32 greth_hash_get_index(__u8 *addr) | |
1027 | { | |
1028 | return (ether_crc(6, addr)) & 0x3F; | |
1029 | } | |
1030 | ||
1031 | static void greth_set_hash_filter(struct net_device *dev) | |
1032 | { | |
22bedad3 | 1033 | struct netdev_hw_addr *ha; |
6e03718c | 1034 | struct greth_private *greth = netdev_priv(dev); |
d4c41139 KG |
1035 | struct greth_regs *regs = (struct greth_regs *) greth->regs; |
1036 | u32 mc_filter[2]; | |
6e03718c | 1037 | unsigned int bitnr; |
d4c41139 KG |
1038 | |
1039 | mc_filter[0] = mc_filter[1] = 0; | |
1040 | ||
22bedad3 JP |
1041 | netdev_for_each_mc_addr(ha, dev) { |
1042 | bitnr = greth_hash_get_index(ha->addr); | |
d4c41139 KG |
1043 | mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); |
1044 | } | |
1045 | ||
1046 | GRETH_REGSAVE(regs->hash_msb, mc_filter[1]); | |
1047 | GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]); | |
1048 | } | |
1049 | ||
1050 | static void greth_set_multicast_list(struct net_device *dev) | |
1051 | { | |
1052 | int cfg; | |
1053 | struct greth_private *greth = netdev_priv(dev); | |
1054 | struct greth_regs *regs = (struct greth_regs *) greth->regs; | |
1055 | ||
1056 | cfg = GRETH_REGLOAD(regs->control); | |
1057 | if (dev->flags & IFF_PROMISC) | |
1058 | cfg |= GRETH_CTRL_PR; | |
1059 | else | |
1060 | cfg &= ~GRETH_CTRL_PR; | |
1061 | ||
1062 | if (greth->multicast) { | |
1063 | if (dev->flags & IFF_ALLMULTI) { | |
1064 | GRETH_REGSAVE(regs->hash_msb, -1); | |
1065 | GRETH_REGSAVE(regs->hash_lsb, -1); | |
1066 | cfg |= GRETH_CTRL_MCEN; | |
1067 | GRETH_REGSAVE(regs->control, cfg); | |
1068 | return; | |
1069 | } | |
1070 | ||
6e03718c | 1071 | if (netdev_mc_empty(dev)) { |
d4c41139 KG |
1072 | cfg &= ~GRETH_CTRL_MCEN; |
1073 | GRETH_REGSAVE(regs->control, cfg); | |
1074 | return; | |
1075 | } | |
1076 | ||
1077 | /* Setup multicast filter */ | |
1078 | greth_set_hash_filter(dev); | |
1079 | cfg |= GRETH_CTRL_MCEN; | |
1080 | } | |
1081 | GRETH_REGSAVE(regs->control, cfg); | |
1082 | } | |
1083 | ||
1084 | static u32 greth_get_msglevel(struct net_device *dev) | |
1085 | { | |
1086 | struct greth_private *greth = netdev_priv(dev); | |
1087 | return greth->msg_enable; | |
1088 | } | |
1089 | ||
1090 | static void greth_set_msglevel(struct net_device *dev, u32 value) | |
1091 | { | |
1092 | struct greth_private *greth = netdev_priv(dev); | |
1093 | greth->msg_enable = value; | |
1094 | } | |
1095 | static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1096 | { | |
1097 | struct greth_private *greth = netdev_priv(dev); | |
1098 | struct phy_device *phy = greth->phy; | |
1099 | ||
1100 | if (!phy) | |
1101 | return -ENODEV; | |
1102 | ||
1103 | return phy_ethtool_gset(phy, cmd); | |
1104 | } | |
1105 | ||
1106 | static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |
1107 | { | |
1108 | struct greth_private *greth = netdev_priv(dev); | |
1109 | struct phy_device *phy = greth->phy; | |
1110 | ||
1111 | if (!phy) | |
1112 | return -ENODEV; | |
1113 | ||
1114 | return phy_ethtool_sset(phy, cmd); | |
1115 | } | |
1116 | ||
1117 | static int greth_get_regs_len(struct net_device *dev) | |
1118 | { | |
1119 | return sizeof(struct greth_regs); | |
1120 | } | |
1121 | ||
1122 | static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) | |
1123 | { | |
1124 | struct greth_private *greth = netdev_priv(dev); | |
1125 | ||
1126 | strncpy(info->driver, dev_driver_string(greth->dev), 32); | |
1127 | strncpy(info->version, "revision: 1.0", 32); | |
1128 | strncpy(info->bus_info, greth->dev->bus->name, 32); | |
1129 | strncpy(info->fw_version, "N/A", 32); | |
1130 | info->eedump_len = 0; | |
1131 | info->regdump_len = sizeof(struct greth_regs); | |
1132 | } | |
1133 | ||
1134 | static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) | |
1135 | { | |
1136 | int i; | |
1137 | struct greth_private *greth = netdev_priv(dev); | |
1138 | u32 __iomem *greth_regs = (u32 __iomem *) greth->regs; | |
1139 | u32 *buff = p; | |
1140 | ||
1141 | for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++) | |
1142 | buff[i] = greth_read_bd(&greth_regs[i]); | |
1143 | } | |
1144 | ||
d4c41139 KG |
1145 | static const struct ethtool_ops greth_ethtool_ops = { |
1146 | .get_msglevel = greth_get_msglevel, | |
1147 | .set_msglevel = greth_set_msglevel, | |
1148 | .get_settings = greth_get_settings, | |
1149 | .set_settings = greth_set_settings, | |
1150 | .get_drvinfo = greth_get_drvinfo, | |
1151 | .get_regs_len = greth_get_regs_len, | |
1152 | .get_regs = greth_get_regs, | |
d4c41139 KG |
1153 | .get_link = ethtool_op_get_link, |
1154 | }; | |
1155 | ||
1156 | static struct net_device_ops greth_netdev_ops = { | |
0f73f2c5 DH |
1157 | .ndo_open = greth_open, |
1158 | .ndo_stop = greth_close, | |
1159 | .ndo_start_xmit = greth_start_xmit, | |
1160 | .ndo_set_mac_address = greth_set_mac_add, | |
1161 | .ndo_validate_addr = eth_validate_addr, | |
d4c41139 KG |
1162 | }; |
1163 | ||
1164 | static inline int wait_for_mdio(struct greth_private *greth) | |
1165 | { | |
1166 | unsigned long timeout = jiffies + 4*HZ/100; | |
1167 | while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) { | |
1168 | if (time_after(jiffies, timeout)) | |
1169 | return 0; | |
1170 | } | |
1171 | return 1; | |
1172 | } | |
1173 | ||
1174 | static int greth_mdio_read(struct mii_bus *bus, int phy, int reg) | |
1175 | { | |
1176 | struct greth_private *greth = bus->priv; | |
1177 | int data; | |
1178 | ||
1179 | if (!wait_for_mdio(greth)) | |
1180 | return -EBUSY; | |
1181 | ||
1182 | GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2); | |
1183 | ||
1184 | if (!wait_for_mdio(greth)) | |
1185 | return -EBUSY; | |
1186 | ||
1187 | if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) { | |
1188 | data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF; | |
1189 | return data; | |
1190 | ||
1191 | } else { | |
1192 | return -1; | |
1193 | } | |
1194 | } | |
1195 | ||
1196 | static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) | |
1197 | { | |
1198 | struct greth_private *greth = bus->priv; | |
1199 | ||
1200 | if (!wait_for_mdio(greth)) | |
1201 | return -EBUSY; | |
1202 | ||
1203 | GRETH_REGSAVE(greth->regs->mdio, | |
1204 | ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1); | |
1205 | ||
1206 | if (!wait_for_mdio(greth)) | |
1207 | return -EBUSY; | |
1208 | ||
1209 | return 0; | |
1210 | } | |
1211 | ||
1212 | static int greth_mdio_reset(struct mii_bus *bus) | |
1213 | { | |
1214 | return 0; | |
1215 | } | |
1216 | ||
1217 | static void greth_link_change(struct net_device *dev) | |
1218 | { | |
1219 | struct greth_private *greth = netdev_priv(dev); | |
1220 | struct phy_device *phydev = greth->phy; | |
1221 | unsigned long flags; | |
d4c41139 | 1222 | int status_change = 0; |
2436af8c | 1223 | u32 ctrl; |
d4c41139 KG |
1224 | |
1225 | spin_lock_irqsave(&greth->devlock, flags); | |
1226 | ||
1227 | if (phydev->link) { | |
1228 | ||
1229 | if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) { | |
2436af8c DH |
1230 | ctrl = GRETH_REGLOAD(greth->regs->control) & |
1231 | ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB); | |
d4c41139 KG |
1232 | |
1233 | if (phydev->duplex) | |
2436af8c | 1234 | ctrl |= GRETH_CTRL_FD; |
d4c41139 | 1235 | |
2436af8c DH |
1236 | if (phydev->speed == SPEED_100) |
1237 | ctrl |= GRETH_CTRL_SP; | |
d4c41139 | 1238 | else if (phydev->speed == SPEED_1000) |
2436af8c | 1239 | ctrl |= GRETH_CTRL_GB; |
d4c41139 | 1240 | |
2436af8c | 1241 | GRETH_REGSAVE(greth->regs->control, ctrl); |
d4c41139 KG |
1242 | greth->speed = phydev->speed; |
1243 | greth->duplex = phydev->duplex; | |
1244 | status_change = 1; | |
1245 | } | |
1246 | } | |
1247 | ||
1248 | if (phydev->link != greth->link) { | |
1249 | if (!phydev->link) { | |
1250 | greth->speed = 0; | |
1251 | greth->duplex = -1; | |
1252 | } | |
1253 | greth->link = phydev->link; | |
1254 | ||
1255 | status_change = 1; | |
1256 | } | |
1257 | ||
1258 | spin_unlock_irqrestore(&greth->devlock, flags); | |
1259 | ||
1260 | if (status_change) { | |
1261 | if (phydev->link) | |
1262 | pr_debug("%s: link up (%d/%s)\n", | |
1263 | dev->name, phydev->speed, | |
1264 | DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); | |
1265 | else | |
1266 | pr_debug("%s: link down\n", dev->name); | |
1267 | } | |
1268 | } | |
1269 | ||
1270 | static int greth_mdio_probe(struct net_device *dev) | |
1271 | { | |
1272 | struct greth_private *greth = netdev_priv(dev); | |
1273 | struct phy_device *phy = NULL; | |
6e03718c | 1274 | int ret; |
d4c41139 KG |
1275 | |
1276 | /* Find the first PHY */ | |
6e03718c | 1277 | phy = phy_find_first(greth->mdio); |
1278 | ||
d4c41139 KG |
1279 | if (!phy) { |
1280 | if (netif_msg_probe(greth)) | |
1281 | dev_err(&dev->dev, "no PHY found\n"); | |
1282 | return -ENXIO; | |
1283 | } | |
1284 | ||
6e03718c | 1285 | ret = phy_connect_direct(dev, phy, &greth_link_change, |
1286 | 0, greth->gbit_mac ? | |
1287 | PHY_INTERFACE_MODE_GMII : | |
1288 | PHY_INTERFACE_MODE_MII); | |
1289 | if (ret) { | |
1290 | if (netif_msg_ifup(greth)) | |
1291 | dev_err(&dev->dev, "could not attach to PHY\n"); | |
1292 | return ret; | |
1293 | } | |
d4c41139 KG |
1294 | |
1295 | if (greth->gbit_mac) | |
1296 | phy->supported &= PHY_GBIT_FEATURES; | |
1297 | else | |
1298 | phy->supported &= PHY_BASIC_FEATURES; | |
1299 | ||
1300 | phy->advertising = phy->supported; | |
1301 | ||
d4c41139 KG |
1302 | greth->link = 0; |
1303 | greth->speed = 0; | |
1304 | greth->duplex = -1; | |
1305 | greth->phy = phy; | |
1306 | ||
1307 | return 0; | |
1308 | } | |
1309 | ||
1310 | static inline int phy_aneg_done(struct phy_device *phydev) | |
1311 | { | |
1312 | int retval; | |
1313 | ||
1314 | retval = phy_read(phydev, MII_BMSR); | |
1315 | ||
1316 | return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE); | |
1317 | } | |
1318 | ||
1319 | static int greth_mdio_init(struct greth_private *greth) | |
1320 | { | |
1321 | int ret, phy; | |
1322 | unsigned long timeout; | |
1323 | ||
1324 | greth->mdio = mdiobus_alloc(); | |
1325 | if (!greth->mdio) { | |
1326 | return -ENOMEM; | |
1327 | } | |
1328 | ||
1329 | greth->mdio->name = "greth-mdio"; | |
1330 | snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq); | |
1331 | greth->mdio->read = greth_mdio_read; | |
1332 | greth->mdio->write = greth_mdio_write; | |
1333 | greth->mdio->reset = greth_mdio_reset; | |
1334 | greth->mdio->priv = greth; | |
1335 | ||
1336 | greth->mdio->irq = greth->mdio_irqs; | |
1337 | ||
1338 | for (phy = 0; phy < PHY_MAX_ADDR; phy++) | |
1339 | greth->mdio->irq[phy] = PHY_POLL; | |
1340 | ||
1341 | ret = mdiobus_register(greth->mdio); | |
1342 | if (ret) { | |
1343 | goto error; | |
1344 | } | |
1345 | ||
1346 | ret = greth_mdio_probe(greth->netdev); | |
1347 | if (ret) { | |
1348 | if (netif_msg_probe(greth)) | |
1349 | dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n"); | |
1350 | goto unreg_mdio; | |
1351 | } | |
1352 | ||
1353 | phy_start(greth->phy); | |
1354 | ||
1355 | /* If Ethernet debug link is used make autoneg happen right away */ | |
1356 | if (greth->edcl && greth_edcl == 1) { | |
1357 | phy_start_aneg(greth->phy); | |
1358 | timeout = jiffies + 6*HZ; | |
1359 | while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) { | |
1360 | } | |
1361 | genphy_read_status(greth->phy); | |
1362 | greth_link_change(greth->netdev); | |
1363 | } | |
1364 | ||
1365 | return 0; | |
1366 | ||
1367 | unreg_mdio: | |
1368 | mdiobus_unregister(greth->mdio); | |
1369 | error: | |
1370 | mdiobus_free(greth->mdio); | |
1371 | return ret; | |
1372 | } | |
1373 | ||
1374 | /* Initialize the GRETH MAC */ | |
74888760 | 1375 | static int __devinit greth_of_probe(struct platform_device *ofdev) |
d4c41139 KG |
1376 | { |
1377 | struct net_device *dev; | |
1378 | struct greth_private *greth; | |
1379 | struct greth_regs *regs; | |
1380 | ||
1381 | int i; | |
1382 | int err; | |
1383 | int tmp; | |
1384 | unsigned long timeout; | |
1385 | ||
1386 | dev = alloc_etherdev(sizeof(struct greth_private)); | |
1387 | ||
1388 | if (dev == NULL) | |
1389 | return -ENOMEM; | |
1390 | ||
1391 | greth = netdev_priv(dev); | |
1392 | greth->netdev = dev; | |
1393 | greth->dev = &ofdev->dev; | |
1394 | ||
1395 | if (greth_debug > 0) | |
1396 | greth->msg_enable = greth_debug; | |
1397 | else | |
1398 | greth->msg_enable = GRETH_DEF_MSG_ENABLE; | |
1399 | ||
1400 | spin_lock_init(&greth->devlock); | |
1401 | ||
1402 | greth->regs = of_ioremap(&ofdev->resource[0], 0, | |
1403 | resource_size(&ofdev->resource[0]), | |
1404 | "grlib-greth regs"); | |
1405 | ||
1406 | if (greth->regs == NULL) { | |
1407 | if (netif_msg_probe(greth)) | |
1408 | dev_err(greth->dev, "ioremap failure.\n"); | |
1409 | err = -EIO; | |
1410 | goto error1; | |
1411 | } | |
1412 | ||
1413 | regs = (struct greth_regs *) greth->regs; | |
19e4875f | 1414 | greth->irq = ofdev->archdata.irqs[0]; |
d4c41139 KG |
1415 | |
1416 | dev_set_drvdata(greth->dev, dev); | |
1417 | SET_NETDEV_DEV(dev, greth->dev); | |
1418 | ||
1419 | if (netif_msg_probe(greth)) | |
1420 | dev_dbg(greth->dev, "reseting controller.\n"); | |
1421 | ||
1422 | /* Reset the controller. */ | |
1423 | GRETH_REGSAVE(regs->control, GRETH_RESET); | |
1424 | ||
1425 | /* Wait for MAC to reset itself */ | |
1426 | timeout = jiffies + HZ/100; | |
1427 | while (GRETH_REGLOAD(regs->control) & GRETH_RESET) { | |
1428 | if (time_after(jiffies, timeout)) { | |
1429 | err = -EIO; | |
1430 | if (netif_msg_probe(greth)) | |
1431 | dev_err(greth->dev, "timeout when waiting for reset.\n"); | |
1432 | goto error2; | |
1433 | } | |
1434 | } | |
1435 | ||
1436 | /* Get default PHY address */ | |
1437 | greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F; | |
1438 | ||
1439 | /* Check if we have GBIT capable MAC */ | |
1440 | tmp = GRETH_REGLOAD(regs->control); | |
1441 | greth->gbit_mac = (tmp >> 27) & 1; | |
1442 | ||
1443 | /* Check for multicast capability */ | |
1444 | greth->multicast = (tmp >> 25) & 1; | |
1445 | ||
1446 | greth->edcl = (tmp >> 31) & 1; | |
1447 | ||
1448 | /* If we have EDCL we disable the EDCL speed-duplex FSM so | |
1449 | * it doesn't interfere with the software */ | |
1450 | if (greth->edcl != 0) | |
1451 | GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX); | |
1452 | ||
1453 | /* Check if MAC can handle MDIO interrupts */ | |
1454 | greth->mdio_int_en = (tmp >> 26) & 1; | |
1455 | ||
1456 | err = greth_mdio_init(greth); | |
1457 | if (err) { | |
1458 | if (netif_msg_probe(greth)) | |
1459 | dev_err(greth->dev, "failed to register MDIO bus\n"); | |
1460 | goto error2; | |
1461 | } | |
1462 | ||
1463 | /* Allocate TX descriptor ring in coherent memory */ | |
1464 | greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, | |
1465 | 1024, | |
1466 | &greth->tx_bd_base_phys, | |
1467 | GFP_KERNEL); | |
1468 | ||
1469 | if (!greth->tx_bd_base) { | |
1470 | if (netif_msg_probe(greth)) | |
1471 | dev_err(&dev->dev, "could not allocate descriptor memory.\n"); | |
1472 | err = -ENOMEM; | |
1473 | goto error3; | |
1474 | } | |
1475 | ||
1476 | memset(greth->tx_bd_base, 0, 1024); | |
1477 | ||
1478 | /* Allocate RX descriptor ring in coherent memory */ | |
1479 | greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, | |
1480 | 1024, | |
1481 | &greth->rx_bd_base_phys, | |
1482 | GFP_KERNEL); | |
1483 | ||
1484 | if (!greth->rx_bd_base) { | |
1485 | if (netif_msg_probe(greth)) | |
1486 | dev_err(greth->dev, "could not allocate descriptor memory.\n"); | |
1487 | err = -ENOMEM; | |
1488 | goto error4; | |
1489 | } | |
1490 | ||
1491 | memset(greth->rx_bd_base, 0, 1024); | |
1492 | ||
1493 | /* Get MAC address from: module param, OF property or ID prom */ | |
1494 | for (i = 0; i < 6; i++) { | |
1495 | if (macaddr[i] != 0) | |
1496 | break; | |
1497 | } | |
1498 | if (i == 6) { | |
1499 | const unsigned char *addr; | |
1500 | int len; | |
61c7a080 GL |
1501 | addr = of_get_property(ofdev->dev.of_node, "local-mac-address", |
1502 | &len); | |
d4c41139 KG |
1503 | if (addr != NULL && len == 6) { |
1504 | for (i = 0; i < 6; i++) | |
1505 | macaddr[i] = (unsigned int) addr[i]; | |
1506 | } else { | |
1507 | #ifdef CONFIG_SPARC | |
1508 | for (i = 0; i < 6; i++) | |
1509 | macaddr[i] = (unsigned int) idprom->id_ethaddr[i]; | |
1510 | #endif | |
1511 | } | |
1512 | } | |
1513 | ||
1514 | for (i = 0; i < 6; i++) | |
1515 | dev->dev_addr[i] = macaddr[i]; | |
1516 | ||
1517 | macaddr[5]++; | |
1518 | ||
1519 | if (!is_valid_ether_addr(&dev->dev_addr[0])) { | |
1520 | if (netif_msg_probe(greth)) | |
1521 | dev_err(greth->dev, "no valid ethernet address, aborting.\n"); | |
1522 | err = -EINVAL; | |
1523 | goto error5; | |
1524 | } | |
1525 | ||
1526 | GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]); | |
1527 | GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 | | |
1528 | dev->dev_addr[4] << 8 | dev->dev_addr[5]); | |
1529 | ||
1530 | /* Clear all pending interrupts except PHY irq */ | |
1531 | GRETH_REGSAVE(regs->status, 0xFF); | |
1532 | ||
1533 | if (greth->gbit_mac) { | |
131ae329 MM |
1534 | dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | |
1535 | NETIF_F_RXCSUM; | |
1536 | dev->features = dev->hw_features | NETIF_F_HIGHDMA; | |
d4c41139 | 1537 | greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit; |
d4c41139 KG |
1538 | } |
1539 | ||
1540 | if (greth->multicast) { | |
1541 | greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list; | |
1542 | dev->flags |= IFF_MULTICAST; | |
1543 | } else { | |
1544 | dev->flags &= ~IFF_MULTICAST; | |
1545 | } | |
1546 | ||
1547 | dev->netdev_ops = &greth_netdev_ops; | |
1548 | dev->ethtool_ops = &greth_ethtool_ops; | |
1549 | ||
cb5d991a TK |
1550 | err = register_netdev(dev); |
1551 | if (err) { | |
d4c41139 KG |
1552 | if (netif_msg_probe(greth)) |
1553 | dev_err(greth->dev, "netdevice registration failed.\n"); | |
d4c41139 KG |
1554 | goto error5; |
1555 | } | |
1556 | ||
1557 | /* setup NAPI */ | |
d4c41139 KG |
1558 | netif_napi_add(dev, &greth->napi, greth_poll, 64); |
1559 | ||
1560 | return 0; | |
1561 | ||
1562 | error5: | |
1563 | dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys); | |
1564 | error4: | |
1565 | dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); | |
1566 | error3: | |
1567 | mdiobus_unregister(greth->mdio); | |
1568 | error2: | |
1569 | of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0])); | |
1570 | error1: | |
1571 | free_netdev(dev); | |
1572 | return err; | |
1573 | } | |
1574 | ||
2dc11581 | 1575 | static int __devexit greth_of_remove(struct platform_device *of_dev) |
d4c41139 KG |
1576 | { |
1577 | struct net_device *ndev = dev_get_drvdata(&of_dev->dev); | |
1578 | struct greth_private *greth = netdev_priv(ndev); | |
1579 | ||
1580 | /* Free descriptor areas */ | |
1581 | dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys); | |
1582 | ||
1583 | dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys); | |
1584 | ||
1585 | dev_set_drvdata(&of_dev->dev, NULL); | |
1586 | ||
1587 | if (greth->phy) | |
1588 | phy_stop(greth->phy); | |
1589 | mdiobus_unregister(greth->mdio); | |
1590 | ||
1591 | unregister_netdev(ndev); | |
1592 | free_netdev(ndev); | |
1593 | ||
1594 | of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0])); | |
1595 | ||
1596 | return 0; | |
1597 | } | |
1598 | ||
1599 | static struct of_device_id greth_of_match[] = { | |
1600 | { | |
1601 | .name = "GAISLER_ETHMAC", | |
1602 | }, | |
ad4650a8 DH |
1603 | { |
1604 | .name = "01_01d", | |
1605 | }, | |
d4c41139 KG |
1606 | {}, |
1607 | }; | |
1608 | ||
1609 | MODULE_DEVICE_TABLE(of, greth_of_match); | |
1610 | ||
74888760 | 1611 | static struct platform_driver greth_of_driver = { |
bc284f94 DM |
1612 | .driver = { |
1613 | .name = "grlib-greth", | |
1614 | .owner = THIS_MODULE, | |
1615 | .of_match_table = greth_of_match, | |
1616 | }, | |
d4c41139 KG |
1617 | .probe = greth_of_probe, |
1618 | .remove = __devexit_p(greth_of_remove), | |
d4c41139 KG |
1619 | }; |
1620 | ||
1621 | static int __init greth_init(void) | |
1622 | { | |
74888760 | 1623 | return platform_driver_register(&greth_of_driver); |
d4c41139 KG |
1624 | } |
1625 | ||
1626 | static void __exit greth_cleanup(void) | |
1627 | { | |
74888760 | 1628 | platform_driver_unregister(&greth_of_driver); |
d4c41139 KG |
1629 | } |
1630 | ||
1631 | module_init(greth_init); | |
1632 | module_exit(greth_cleanup); | |
1633 | ||
1634 | MODULE_AUTHOR("Aeroflex Gaisler AB."); | |
1635 | MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver"); | |
1636 | MODULE_LICENSE("GPL"); |