]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * rrunner.c: Linux driver for the Essential RoadRunner HIPPI board. | |
3 | * | |
4 | * Copyright (C) 1998-2002 by Jes Sorensen, <[email protected]>. | |
5 | * | |
6 | * Thanks to Essential Communication for providing us with hardware | |
7 | * and very comprehensive documentation without which I would not have | |
8 | * been able to write this driver. A special thank you to John Gibbon | |
9 | * for sorting out the legal issues, with the NDA, allowing the code to | |
10 | * be released under the GPL. | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * Thanks to Jayaram Bhat from ODS/Essential for fixing some of the | |
18 | * stupid bugs in my code. | |
19 | * | |
20 | * Softnet support and various other patches from Val Henson of | |
21 | * ODS/Essential. | |
22 | * | |
23 | * PCI DMA mapping code partly based on work by Francois Romieu. | |
24 | */ | |
25 | ||
26 | ||
27 | #define DEBUG 1 | |
28 | #define RX_DMA_SKBUFF 1 | |
29 | #define PKT_COPY_THRESHOLD 512 | |
30 | ||
1da177e4 LT |
31 | #include <linux/module.h> |
32 | #include <linux/types.h> | |
33 | #include <linux/errno.h> | |
34 | #include <linux/ioport.h> | |
35 | #include <linux/pci.h> | |
36 | #include <linux/kernel.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/hippidevice.h> | |
39 | #include <linux/skbuff.h> | |
40 | #include <linux/init.h> | |
41 | #include <linux/delay.h> | |
42 | #include <linux/mm.h> | |
43 | #include <net/sock.h> | |
44 | ||
45 | #include <asm/system.h> | |
46 | #include <asm/cache.h> | |
47 | #include <asm/byteorder.h> | |
48 | #include <asm/io.h> | |
49 | #include <asm/irq.h> | |
50 | #include <asm/uaccess.h> | |
51 | ||
52 | #define rr_if_busy(dev) netif_queue_stopped(dev) | |
53 | #define rr_if_running(dev) netif_running(dev) | |
54 | ||
55 | #include "rrunner.h" | |
56 | ||
57 | #define RUN_AT(x) (jiffies + (x)) | |
58 | ||
59 | ||
60 | MODULE_AUTHOR("Jes Sorensen <[email protected]>"); | |
61 | MODULE_DESCRIPTION("Essential RoadRunner HIPPI driver"); | |
62 | MODULE_LICENSE("GPL"); | |
63 | ||
64 | static char version[] __devinitdata = "rrunner.c: v0.50 11/11/2002 Jes Sorensen ([email protected])\n"; | |
65 | ||
748ff68f SH |
66 | |
67 | static const struct net_device_ops rr_netdev_ops = { | |
68 | .ndo_open = rr_open, | |
69 | .ndo_stop = rr_close, | |
70 | .ndo_do_ioctl = rr_ioctl, | |
71 | .ndo_start_xmit = rr_start_xmit, | |
72 | .ndo_change_mtu = hippi_change_mtu, | |
73 | .ndo_set_mac_address = hippi_mac_addr, | |
74 | }; | |
75 | ||
1da177e4 LT |
76 | /* |
77 | * Implementation notes: | |
78 | * | |
79 | * The DMA engine only allows for DMA within physical 64KB chunks of | |
80 | * memory. The current approach of the driver (and stack) is to use | |
81 | * linear blocks of memory for the skbuffs. However, as the data block | |
82 | * is always the first part of the skb and skbs are 2^n aligned so we | |
83 | * are guarantted to get the whole block within one 64KB align 64KB | |
84 | * chunk. | |
85 | * | |
86 | * On the long term, relying on being able to allocate 64KB linear | |
87 | * chunks of memory is not feasible and the skb handling code and the | |
88 | * stack will need to know about I/O vectors or something similar. | |
89 | */ | |
90 | ||
1da177e4 LT |
91 | static int __devinit rr_init_one(struct pci_dev *pdev, |
92 | const struct pci_device_id *ent) | |
93 | { | |
94 | struct net_device *dev; | |
95 | static int version_disp; | |
96 | u8 pci_latency; | |
97 | struct rr_private *rrpriv; | |
98 | void *tmpptr; | |
99 | dma_addr_t ring_dma; | |
100 | int ret = -ENOMEM; | |
101 | ||
102 | dev = alloc_hippi_dev(sizeof(struct rr_private)); | |
103 | if (!dev) | |
104 | goto out3; | |
105 | ||
106 | ret = pci_enable_device(pdev); | |
107 | if (ret) { | |
108 | ret = -ENODEV; | |
109 | goto out2; | |
110 | } | |
111 | ||
112 | rrpriv = netdev_priv(dev); | |
113 | ||
1da177e4 LT |
114 | SET_NETDEV_DEV(dev, &pdev->dev); |
115 | ||
116 | if (pci_request_regions(pdev, "rrunner")) { | |
117 | ret = -EIO; | |
118 | goto out; | |
119 | } | |
120 | ||
121 | pci_set_drvdata(pdev, dev); | |
122 | ||
123 | rrpriv->pci_dev = pdev; | |
124 | ||
125 | spin_lock_init(&rrpriv->lock); | |
126 | ||
127 | dev->irq = pdev->irq; | |
748ff68f | 128 | dev->netdev_ops = &rr_netdev_ops; |
1da177e4 LT |
129 | |
130 | dev->base_addr = pci_resource_start(pdev, 0); | |
131 | ||
132 | /* display version info if adapter is found */ | |
133 | if (!version_disp) { | |
134 | /* set display flag to TRUE so that */ | |
135 | /* we only display this string ONCE */ | |
136 | version_disp = 1; | |
137 | printk(version); | |
138 | } | |
139 | ||
140 | pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency); | |
141 | if (pci_latency <= 0x58){ | |
142 | pci_latency = 0x58; | |
143 | pci_write_config_byte(pdev, PCI_LATENCY_TIMER, pci_latency); | |
144 | } | |
145 | ||
146 | pci_set_master(pdev); | |
147 | ||
148 | printk(KERN_INFO "%s: Essential RoadRunner serial HIPPI " | |
149 | "at 0x%08lx, irq %i, PCI latency %i\n", dev->name, | |
150 | dev->base_addr, dev->irq, pci_latency); | |
151 | ||
152 | /* | |
153 | * Remap the regs into kernel space. | |
154 | */ | |
155 | ||
156 | rrpriv->regs = ioremap(dev->base_addr, 0x1000); | |
157 | ||
158 | if (!rrpriv->regs){ | |
159 | printk(KERN_ERR "%s: Unable to map I/O register, " | |
160 | "RoadRunner will be disabled.\n", dev->name); | |
161 | ret = -EIO; | |
162 | goto out; | |
163 | } | |
164 | ||
165 | tmpptr = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); | |
166 | rrpriv->tx_ring = tmpptr; | |
167 | rrpriv->tx_ring_dma = ring_dma; | |
168 | ||
169 | if (!tmpptr) { | |
170 | ret = -ENOMEM; | |
171 | goto out; | |
172 | } | |
173 | ||
174 | tmpptr = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); | |
175 | rrpriv->rx_ring = tmpptr; | |
176 | rrpriv->rx_ring_dma = ring_dma; | |
177 | ||
178 | if (!tmpptr) { | |
179 | ret = -ENOMEM; | |
180 | goto out; | |
181 | } | |
182 | ||
183 | tmpptr = pci_alloc_consistent(pdev, EVT_RING_SIZE, &ring_dma); | |
184 | rrpriv->evt_ring = tmpptr; | |
185 | rrpriv->evt_ring_dma = ring_dma; | |
186 | ||
187 | if (!tmpptr) { | |
188 | ret = -ENOMEM; | |
189 | goto out; | |
190 | } | |
191 | ||
192 | /* | |
193 | * Don't access any register before this point! | |
194 | */ | |
195 | #ifdef __BIG_ENDIAN | |
196 | writel(readl(&rrpriv->regs->HostCtrl) | NO_SWAP, | |
197 | &rrpriv->regs->HostCtrl); | |
198 | #endif | |
199 | /* | |
200 | * Need to add a case for little-endian 64-bit hosts here. | |
201 | */ | |
202 | ||
203 | rr_init(dev); | |
204 | ||
205 | dev->base_addr = 0; | |
206 | ||
207 | ret = register_netdev(dev); | |
208 | if (ret) | |
209 | goto out; | |
210 | return 0; | |
211 | ||
212 | out: | |
213 | if (rrpriv->rx_ring) | |
6aa20a22 | 214 | pci_free_consistent(pdev, RX_TOTAL_SIZE, rrpriv->rx_ring, |
1da177e4 LT |
215 | rrpriv->rx_ring_dma); |
216 | if (rrpriv->tx_ring) | |
217 | pci_free_consistent(pdev, TX_TOTAL_SIZE, rrpriv->tx_ring, | |
218 | rrpriv->tx_ring_dma); | |
219 | if (rrpriv->regs) | |
6aa20a22 | 220 | iounmap(rrpriv->regs); |
1da177e4 LT |
221 | if (pdev) { |
222 | pci_release_regions(pdev); | |
223 | pci_set_drvdata(pdev, NULL); | |
224 | } | |
225 | out2: | |
226 | free_netdev(dev); | |
227 | out3: | |
228 | return ret; | |
229 | } | |
230 | ||
231 | static void __devexit rr_remove_one (struct pci_dev *pdev) | |
232 | { | |
233 | struct net_device *dev = pci_get_drvdata(pdev); | |
234 | ||
235 | if (dev) { | |
236 | struct rr_private *rr = netdev_priv(dev); | |
237 | ||
238 | if (!(readl(&rr->regs->HostCtrl) & NIC_HALTED)){ | |
239 | printk(KERN_ERR "%s: trying to unload running NIC\n", | |
240 | dev->name); | |
241 | writel(HALT_NIC, &rr->regs->HostCtrl); | |
242 | } | |
243 | ||
244 | pci_free_consistent(pdev, EVT_RING_SIZE, rr->evt_ring, | |
245 | rr->evt_ring_dma); | |
246 | pci_free_consistent(pdev, RX_TOTAL_SIZE, rr->rx_ring, | |
247 | rr->rx_ring_dma); | |
248 | pci_free_consistent(pdev, TX_TOTAL_SIZE, rr->tx_ring, | |
249 | rr->tx_ring_dma); | |
250 | unregister_netdev(dev); | |
251 | iounmap(rr->regs); | |
252 | free_netdev(dev); | |
253 | pci_release_regions(pdev); | |
254 | pci_disable_device(pdev); | |
255 | pci_set_drvdata(pdev, NULL); | |
256 | } | |
257 | } | |
258 | ||
259 | ||
260 | /* | |
261 | * Commands are considered to be slow, thus there is no reason to | |
262 | * inline this. | |
263 | */ | |
264 | static void rr_issue_cmd(struct rr_private *rrpriv, struct cmd *cmd) | |
265 | { | |
266 | struct rr_regs __iomem *regs; | |
267 | u32 idx; | |
268 | ||
269 | regs = rrpriv->regs; | |
270 | /* | |
271 | * This is temporary - it will go away in the final version. | |
272 | * We probably also want to make this function inline. | |
273 | */ | |
274 | if (readl(®s->HostCtrl) & NIC_HALTED){ | |
275 | printk("issuing command for halted NIC, code 0x%x, " | |
276 | "HostCtrl %08x\n", cmd->code, readl(®s->HostCtrl)); | |
277 | if (readl(®s->Mode) & FATAL_ERR) | |
278 | printk("error codes Fail1 %02x, Fail2 %02x\n", | |
279 | readl(®s->Fail1), readl(®s->Fail2)); | |
280 | } | |
281 | ||
282 | idx = rrpriv->info->cmd_ctrl.pi; | |
283 | ||
284 | writel(*(u32*)(cmd), ®s->CmdRing[idx]); | |
285 | wmb(); | |
286 | ||
287 | idx = (idx - 1) % CMD_RING_ENTRIES; | |
288 | rrpriv->info->cmd_ctrl.pi = idx; | |
289 | wmb(); | |
290 | ||
291 | if (readl(®s->Mode) & FATAL_ERR) | |
292 | printk("error code %02x\n", readl(®s->Fail1)); | |
293 | } | |
294 | ||
295 | ||
296 | /* | |
297 | * Reset the board in a sensible manner. The NIC is already halted | |
298 | * when we get here and a spin-lock is held. | |
299 | */ | |
300 | static int rr_reset(struct net_device *dev) | |
301 | { | |
302 | struct rr_private *rrpriv; | |
303 | struct rr_regs __iomem *regs; | |
1da177e4 LT |
304 | u32 start_pc; |
305 | int i; | |
306 | ||
307 | rrpriv = netdev_priv(dev); | |
308 | regs = rrpriv->regs; | |
309 | ||
310 | rr_load_firmware(dev); | |
311 | ||
312 | writel(0x01000000, ®s->TX_state); | |
313 | writel(0xff800000, ®s->RX_state); | |
314 | writel(0, ®s->AssistState); | |
315 | writel(CLEAR_INTA, ®s->LocalCtrl); | |
316 | writel(0x01, ®s->BrkPt); | |
317 | writel(0, ®s->Timer); | |
318 | writel(0, ®s->TimerRef); | |
319 | writel(RESET_DMA, ®s->DmaReadState); | |
320 | writel(RESET_DMA, ®s->DmaWriteState); | |
321 | writel(0, ®s->DmaWriteHostHi); | |
322 | writel(0, ®s->DmaWriteHostLo); | |
323 | writel(0, ®s->DmaReadHostHi); | |
324 | writel(0, ®s->DmaReadHostLo); | |
325 | writel(0, ®s->DmaReadLen); | |
326 | writel(0, ®s->DmaWriteLen); | |
327 | writel(0, ®s->DmaWriteLcl); | |
328 | writel(0, ®s->DmaWriteIPchecksum); | |
329 | writel(0, ®s->DmaReadLcl); | |
330 | writel(0, ®s->DmaReadIPchecksum); | |
331 | writel(0, ®s->PciState); | |
332 | #if (BITS_PER_LONG == 64) && defined __LITTLE_ENDIAN | |
333 | writel(SWAP_DATA | PTR64BIT | PTR_WD_SWAP, ®s->Mode); | |
334 | #elif (BITS_PER_LONG == 64) | |
335 | writel(SWAP_DATA | PTR64BIT | PTR_WD_NOSWAP, ®s->Mode); | |
336 | #else | |
337 | writel(SWAP_DATA | PTR32BIT | PTR_WD_NOSWAP, ®s->Mode); | |
338 | #endif | |
339 | ||
340 | #if 0 | |
341 | /* | |
342 | * Don't worry, this is just black magic. | |
343 | */ | |
344 | writel(0xdf000, ®s->RxBase); | |
345 | writel(0xdf000, ®s->RxPrd); | |
346 | writel(0xdf000, ®s->RxCon); | |
347 | writel(0xce000, ®s->TxBase); | |
348 | writel(0xce000, ®s->TxPrd); | |
349 | writel(0xce000, ®s->TxCon); | |
350 | writel(0, ®s->RxIndPro); | |
351 | writel(0, ®s->RxIndCon); | |
352 | writel(0, ®s->RxIndRef); | |
353 | writel(0, ®s->TxIndPro); | |
354 | writel(0, ®s->TxIndCon); | |
355 | writel(0, ®s->TxIndRef); | |
356 | writel(0xcc000, ®s->pad10[0]); | |
357 | writel(0, ®s->DrCmndPro); | |
358 | writel(0, ®s->DrCmndCon); | |
359 | writel(0, ®s->DwCmndPro); | |
360 | writel(0, ®s->DwCmndCon); | |
361 | writel(0, ®s->DwCmndRef); | |
362 | writel(0, ®s->DrDataPro); | |
363 | writel(0, ®s->DrDataCon); | |
364 | writel(0, ®s->DrDataRef); | |
365 | writel(0, ®s->DwDataPro); | |
366 | writel(0, ®s->DwDataCon); | |
367 | writel(0, ®s->DwDataRef); | |
368 | #endif | |
369 | ||
370 | writel(0xffffffff, ®s->MbEvent); | |
371 | writel(0, ®s->Event); | |
372 | ||
373 | writel(0, ®s->TxPi); | |
374 | writel(0, ®s->IpRxPi); | |
375 | ||
376 | writel(0, ®s->EvtCon); | |
377 | writel(0, ®s->EvtPrd); | |
378 | ||
379 | rrpriv->info->evt_ctrl.pi = 0; | |
380 | ||
381 | for (i = 0; i < CMD_RING_ENTRIES; i++) | |
382 | writel(0, ®s->CmdRing[i]); | |
383 | ||
384 | /* | |
385 | * Why 32 ? is this not cache line size dependent? | |
386 | */ | |
387 | writel(RBURST_64|WBURST_64, ®s->PciState); | |
388 | wmb(); | |
389 | ||
cf962378 AV |
390 | start_pc = rr_read_eeprom_word(rrpriv, |
391 | offsetof(struct eeprom, rncd_info.FwStart)); | |
1da177e4 LT |
392 | |
393 | #if (DEBUG > 1) | |
394 | printk("%s: Executing firmware at address 0x%06x\n", | |
395 | dev->name, start_pc); | |
396 | #endif | |
397 | ||
398 | writel(start_pc + 0x800, ®s->Pc); | |
399 | wmb(); | |
400 | udelay(5); | |
401 | ||
402 | writel(start_pc, ®s->Pc); | |
403 | wmb(); | |
404 | ||
405 | return 0; | |
406 | } | |
407 | ||
408 | ||
409 | /* | |
410 | * Read a string from the EEPROM. | |
411 | */ | |
412 | static unsigned int rr_read_eeprom(struct rr_private *rrpriv, | |
413 | unsigned long offset, | |
414 | unsigned char *buf, | |
415 | unsigned long length) | |
416 | { | |
417 | struct rr_regs __iomem *regs = rrpriv->regs; | |
418 | u32 misc, io, host, i; | |
419 | ||
420 | io = readl(®s->ExtIo); | |
421 | writel(0, ®s->ExtIo); | |
422 | misc = readl(®s->LocalCtrl); | |
423 | writel(0, ®s->LocalCtrl); | |
424 | host = readl(®s->HostCtrl); | |
425 | writel(host | HALT_NIC, ®s->HostCtrl); | |
426 | mb(); | |
427 | ||
428 | for (i = 0; i < length; i++){ | |
429 | writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase); | |
430 | mb(); | |
431 | buf[i] = (readl(®s->WinData) >> 24) & 0xff; | |
432 | mb(); | |
433 | } | |
434 | ||
435 | writel(host, ®s->HostCtrl); | |
436 | writel(misc, ®s->LocalCtrl); | |
437 | writel(io, ®s->ExtIo); | |
438 | mb(); | |
439 | return i; | |
440 | } | |
441 | ||
442 | ||
443 | /* | |
444 | * Shortcut to read one word (4 bytes) out of the EEPROM and convert | |
445 | * it to our CPU byte-order. | |
446 | */ | |
447 | static u32 rr_read_eeprom_word(struct rr_private *rrpriv, | |
cf962378 | 448 | size_t offset) |
1da177e4 | 449 | { |
cf962378 | 450 | __be32 word; |
1da177e4 | 451 | |
cf962378 AV |
452 | if ((rr_read_eeprom(rrpriv, offset, |
453 | (unsigned char *)&word, 4) == 4)) | |
1da177e4 LT |
454 | return be32_to_cpu(word); |
455 | return 0; | |
456 | } | |
457 | ||
458 | ||
459 | /* | |
460 | * Write a string to the EEPROM. | |
461 | * | |
462 | * This is only called when the firmware is not running. | |
463 | */ | |
464 | static unsigned int write_eeprom(struct rr_private *rrpriv, | |
465 | unsigned long offset, | |
466 | unsigned char *buf, | |
467 | unsigned long length) | |
468 | { | |
469 | struct rr_regs __iomem *regs = rrpriv->regs; | |
470 | u32 misc, io, data, i, j, ready, error = 0; | |
471 | ||
472 | io = readl(®s->ExtIo); | |
473 | writel(0, ®s->ExtIo); | |
474 | misc = readl(®s->LocalCtrl); | |
475 | writel(ENABLE_EEPROM_WRITE, ®s->LocalCtrl); | |
476 | mb(); | |
477 | ||
478 | for (i = 0; i < length; i++){ | |
479 | writel((EEPROM_BASE + ((offset+i) << 3)), ®s->WinBase); | |
480 | mb(); | |
481 | data = buf[i] << 24; | |
482 | /* | |
483 | * Only try to write the data if it is not the same | |
484 | * value already. | |
485 | */ | |
486 | if ((readl(®s->WinData) & 0xff000000) != data){ | |
487 | writel(data, ®s->WinData); | |
488 | ready = 0; | |
489 | j = 0; | |
490 | mb(); | |
491 | while(!ready){ | |
492 | udelay(20); | |
493 | if ((readl(®s->WinData) & 0xff000000) == | |
494 | data) | |
495 | ready = 1; | |
496 | mb(); | |
497 | if (j++ > 5000){ | |
498 | printk("data mismatch: %08x, " | |
499 | "WinData %08x\n", data, | |
500 | readl(®s->WinData)); | |
501 | ready = 1; | |
502 | error = 1; | |
503 | } | |
504 | } | |
505 | } | |
506 | } | |
507 | ||
508 | writel(misc, ®s->LocalCtrl); | |
509 | writel(io, ®s->ExtIo); | |
510 | mb(); | |
511 | ||
512 | return error; | |
513 | } | |
514 | ||
515 | ||
4f092432 | 516 | static int __devinit rr_init(struct net_device *dev) |
1da177e4 LT |
517 | { |
518 | struct rr_private *rrpriv; | |
519 | struct rr_regs __iomem *regs; | |
1da177e4 | 520 | u32 sram_size, rev; |
1da177e4 LT |
521 | |
522 | rrpriv = netdev_priv(dev); | |
523 | regs = rrpriv->regs; | |
524 | ||
525 | rev = readl(®s->FwRev); | |
526 | rrpriv->fw_rev = rev; | |
527 | if (rev > 0x00020024) | |
528 | printk(" Firmware revision: %i.%i.%i\n", (rev >> 16), | |
529 | ((rev >> 8) & 0xff), (rev & 0xff)); | |
530 | else if (rev >= 0x00020000) { | |
531 | printk(" Firmware revision: %i.%i.%i (2.0.37 or " | |
532 | "later is recommended)\n", (rev >> 16), | |
533 | ((rev >> 8) & 0xff), (rev & 0xff)); | |
534 | }else{ | |
535 | printk(" Firmware revision too old: %i.%i.%i, please " | |
536 | "upgrade to 2.0.37 or later.\n", | |
537 | (rev >> 16), ((rev >> 8) & 0xff), (rev & 0xff)); | |
538 | } | |
539 | ||
540 | #if (DEBUG > 2) | |
541 | printk(" Maximum receive rings %i\n", readl(®s->MaxRxRng)); | |
542 | #endif | |
543 | ||
544 | /* | |
545 | * Read the hardware address from the eeprom. The HW address | |
546 | * is not really necessary for HIPPI but awfully convenient. | |
547 | * The pointer arithmetic to put it in dev_addr is ugly, but | |
548 | * Donald Becker does it this way for the GigE version of this | |
549 | * card and it's shorter and more portable than any | |
550 | * other method I've seen. -VAL | |
551 | */ | |
552 | ||
cf962378 AV |
553 | *(__be16 *)(dev->dev_addr) = |
554 | htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA))); | |
555 | *(__be32 *)(dev->dev_addr+2) = | |
556 | htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4]))); | |
6aa20a22 | 557 | |
e174961c | 558 | printk(" MAC: %pM\n", dev->dev_addr); |
1da177e4 | 559 | |
cf962378 | 560 | sram_size = rr_read_eeprom_word(rrpriv, 8); |
1da177e4 LT |
561 | printk(" SRAM size 0x%06x\n", sram_size); |
562 | ||
1da177e4 LT |
563 | return 0; |
564 | } | |
565 | ||
566 | ||
567 | static int rr_init1(struct net_device *dev) | |
568 | { | |
569 | struct rr_private *rrpriv; | |
570 | struct rr_regs __iomem *regs; | |
571 | unsigned long myjif, flags; | |
572 | struct cmd cmd; | |
573 | u32 hostctrl; | |
574 | int ecode = 0; | |
575 | short i; | |
576 | ||
577 | rrpriv = netdev_priv(dev); | |
578 | regs = rrpriv->regs; | |
579 | ||
580 | spin_lock_irqsave(&rrpriv->lock, flags); | |
581 | ||
582 | hostctrl = readl(®s->HostCtrl); | |
583 | writel(hostctrl | HALT_NIC | RR_CLEAR_INT, ®s->HostCtrl); | |
584 | wmb(); | |
585 | ||
586 | if (hostctrl & PARITY_ERR){ | |
587 | printk("%s: Parity error halting NIC - this is serious!\n", | |
588 | dev->name); | |
589 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
590 | ecode = -EFAULT; | |
591 | goto error; | |
592 | } | |
593 | ||
594 | set_rxaddr(regs, rrpriv->rx_ctrl_dma); | |
595 | set_infoaddr(regs, rrpriv->info_dma); | |
596 | ||
597 | rrpriv->info->evt_ctrl.entry_size = sizeof(struct event); | |
598 | rrpriv->info->evt_ctrl.entries = EVT_RING_ENTRIES; | |
599 | rrpriv->info->evt_ctrl.mode = 0; | |
600 | rrpriv->info->evt_ctrl.pi = 0; | |
601 | set_rraddr(&rrpriv->info->evt_ctrl.rngptr, rrpriv->evt_ring_dma); | |
602 | ||
603 | rrpriv->info->cmd_ctrl.entry_size = sizeof(struct cmd); | |
604 | rrpriv->info->cmd_ctrl.entries = CMD_RING_ENTRIES; | |
605 | rrpriv->info->cmd_ctrl.mode = 0; | |
606 | rrpriv->info->cmd_ctrl.pi = 15; | |
607 | ||
608 | for (i = 0; i < CMD_RING_ENTRIES; i++) { | |
609 | writel(0, ®s->CmdRing[i]); | |
610 | } | |
611 | ||
612 | for (i = 0; i < TX_RING_ENTRIES; i++) { | |
613 | rrpriv->tx_ring[i].size = 0; | |
614 | set_rraddr(&rrpriv->tx_ring[i].addr, 0); | |
615 | rrpriv->tx_skbuff[i] = NULL; | |
616 | } | |
617 | rrpriv->info->tx_ctrl.entry_size = sizeof(struct tx_desc); | |
618 | rrpriv->info->tx_ctrl.entries = TX_RING_ENTRIES; | |
619 | rrpriv->info->tx_ctrl.mode = 0; | |
620 | rrpriv->info->tx_ctrl.pi = 0; | |
621 | set_rraddr(&rrpriv->info->tx_ctrl.rngptr, rrpriv->tx_ring_dma); | |
622 | ||
623 | /* | |
624 | * Set dirty_tx before we start receiving interrupts, otherwise | |
625 | * the interrupt handler might think it is supposed to process | |
626 | * tx ints before we are up and running, which may cause a null | |
627 | * pointer access in the int handler. | |
628 | */ | |
629 | rrpriv->tx_full = 0; | |
630 | rrpriv->cur_rx = 0; | |
631 | rrpriv->dirty_rx = rrpriv->dirty_tx = 0; | |
632 | ||
633 | rr_reset(dev); | |
634 | ||
635 | /* Tuning values */ | |
636 | writel(0x5000, ®s->ConRetry); | |
637 | writel(0x100, ®s->ConRetryTmr); | |
638 | writel(0x500000, ®s->ConTmout); | |
639 | writel(0x60, ®s->IntrTmr); | |
640 | writel(0x500000, ®s->TxDataMvTimeout); | |
641 | writel(0x200000, ®s->RxDataMvTimeout); | |
642 | writel(0x80, ®s->WriteDmaThresh); | |
643 | writel(0x80, ®s->ReadDmaThresh); | |
644 | ||
645 | rrpriv->fw_running = 0; | |
646 | wmb(); | |
647 | ||
648 | hostctrl &= ~(HALT_NIC | INVALID_INST_B | PARITY_ERR); | |
649 | writel(hostctrl, ®s->HostCtrl); | |
650 | wmb(); | |
651 | ||
652 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
653 | ||
654 | for (i = 0; i < RX_RING_ENTRIES; i++) { | |
655 | struct sk_buff *skb; | |
656 | dma_addr_t addr; | |
657 | ||
658 | rrpriv->rx_ring[i].mode = 0; | |
659 | skb = alloc_skb(dev->mtu + HIPPI_HLEN, GFP_ATOMIC); | |
660 | if (!skb) { | |
661 | printk(KERN_WARNING "%s: Unable to allocate memory " | |
662 | "for receive ring - halting NIC\n", dev->name); | |
663 | ecode = -ENOMEM; | |
664 | goto error; | |
665 | } | |
666 | rrpriv->rx_skbuff[i] = skb; | |
667 | addr = pci_map_single(rrpriv->pci_dev, skb->data, | |
668 | dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); | |
669 | /* | |
670 | * Sanity test to see if we conflict with the DMA | |
671 | * limitations of the Roadrunner. | |
672 | */ | |
673 | if ((((unsigned long)skb->data) & 0xfff) > ~65320) | |
674 | printk("skb alloc error\n"); | |
675 | ||
676 | set_rraddr(&rrpriv->rx_ring[i].addr, addr); | |
677 | rrpriv->rx_ring[i].size = dev->mtu + HIPPI_HLEN; | |
678 | } | |
679 | ||
680 | rrpriv->rx_ctrl[4].entry_size = sizeof(struct rx_desc); | |
681 | rrpriv->rx_ctrl[4].entries = RX_RING_ENTRIES; | |
682 | rrpriv->rx_ctrl[4].mode = 8; | |
683 | rrpriv->rx_ctrl[4].pi = 0; | |
684 | wmb(); | |
685 | set_rraddr(&rrpriv->rx_ctrl[4].rngptr, rrpriv->rx_ring_dma); | |
686 | ||
687 | udelay(1000); | |
688 | ||
689 | /* | |
690 | * Now start the FirmWare. | |
691 | */ | |
692 | cmd.code = C_START_FW; | |
693 | cmd.ring = 0; | |
694 | cmd.index = 0; | |
695 | ||
696 | rr_issue_cmd(rrpriv, &cmd); | |
697 | ||
698 | /* | |
699 | * Give the FirmWare time to chew on the `get running' command. | |
700 | */ | |
701 | myjif = jiffies + 5 * HZ; | |
702 | while (time_before(jiffies, myjif) && !rrpriv->fw_running) | |
703 | cpu_relax(); | |
704 | ||
705 | netif_start_queue(dev); | |
706 | ||
707 | return ecode; | |
708 | ||
709 | error: | |
710 | /* | |
711 | * We might have gotten here because we are out of memory, | |
712 | * make sure we release everything we allocated before failing | |
713 | */ | |
714 | for (i = 0; i < RX_RING_ENTRIES; i++) { | |
715 | struct sk_buff *skb = rrpriv->rx_skbuff[i]; | |
716 | ||
717 | if (skb) { | |
6aa20a22 JG |
718 | pci_unmap_single(rrpriv->pci_dev, |
719 | rrpriv->rx_ring[i].addr.addrlo, | |
1da177e4 LT |
720 | dev->mtu + HIPPI_HLEN, |
721 | PCI_DMA_FROMDEVICE); | |
722 | rrpriv->rx_ring[i].size = 0; | |
723 | set_rraddr(&rrpriv->rx_ring[i].addr, 0); | |
724 | dev_kfree_skb(skb); | |
725 | rrpriv->rx_skbuff[i] = NULL; | |
726 | } | |
727 | } | |
728 | return ecode; | |
729 | } | |
730 | ||
731 | ||
732 | /* | |
733 | * All events are considered to be slow (RX/TX ints do not generate | |
734 | * events) and are handled here, outside the main interrupt handler, | |
735 | * to reduce the size of the handler. | |
736 | */ | |
737 | static u32 rr_handle_event(struct net_device *dev, u32 prodidx, u32 eidx) | |
738 | { | |
739 | struct rr_private *rrpriv; | |
740 | struct rr_regs __iomem *regs; | |
741 | u32 tmp; | |
742 | ||
743 | rrpriv = netdev_priv(dev); | |
744 | regs = rrpriv->regs; | |
745 | ||
746 | while (prodidx != eidx){ | |
747 | switch (rrpriv->evt_ring[eidx].code){ | |
748 | case E_NIC_UP: | |
749 | tmp = readl(®s->FwRev); | |
750 | printk(KERN_INFO "%s: Firmware revision %i.%i.%i " | |
751 | "up and running\n", dev->name, | |
752 | (tmp >> 16), ((tmp >> 8) & 0xff), (tmp & 0xff)); | |
753 | rrpriv->fw_running = 1; | |
754 | writel(RX_RING_ENTRIES - 1, ®s->IpRxPi); | |
755 | wmb(); | |
756 | break; | |
757 | case E_LINK_ON: | |
758 | printk(KERN_INFO "%s: Optical link ON\n", dev->name); | |
759 | break; | |
760 | case E_LINK_OFF: | |
761 | printk(KERN_INFO "%s: Optical link OFF\n", dev->name); | |
762 | break; | |
763 | case E_RX_IDLE: | |
764 | printk(KERN_WARNING "%s: RX data not moving\n", | |
765 | dev->name); | |
766 | goto drop; | |
767 | case E_WATCHDOG: | |
768 | printk(KERN_INFO "%s: The watchdog is here to see " | |
769 | "us\n", dev->name); | |
770 | break; | |
771 | case E_INTERN_ERR: | |
772 | printk(KERN_ERR "%s: HIPPI Internal NIC error\n", | |
773 | dev->name); | |
6aa20a22 | 774 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
775 | ®s->HostCtrl); |
776 | wmb(); | |
777 | break; | |
778 | case E_HOST_ERR: | |
779 | printk(KERN_ERR "%s: Host software error\n", | |
780 | dev->name); | |
6aa20a22 | 781 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
782 | ®s->HostCtrl); |
783 | wmb(); | |
784 | break; | |
785 | /* | |
786 | * TX events. | |
787 | */ | |
788 | case E_CON_REJ: | |
789 | printk(KERN_WARNING "%s: Connection rejected\n", | |
790 | dev->name); | |
09f75cd7 | 791 | dev->stats.tx_aborted_errors++; |
1da177e4 LT |
792 | break; |
793 | case E_CON_TMOUT: | |
794 | printk(KERN_WARNING "%s: Connection timeout\n", | |
795 | dev->name); | |
796 | break; | |
797 | case E_DISC_ERR: | |
798 | printk(KERN_WARNING "%s: HIPPI disconnect error\n", | |
799 | dev->name); | |
09f75cd7 | 800 | dev->stats.tx_aborted_errors++; |
1da177e4 LT |
801 | break; |
802 | case E_INT_PRTY: | |
803 | printk(KERN_ERR "%s: HIPPI Internal Parity error\n", | |
804 | dev->name); | |
6aa20a22 | 805 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
806 | ®s->HostCtrl); |
807 | wmb(); | |
808 | break; | |
809 | case E_TX_IDLE: | |
810 | printk(KERN_WARNING "%s: Transmitter idle\n", | |
811 | dev->name); | |
812 | break; | |
813 | case E_TX_LINK_DROP: | |
814 | printk(KERN_WARNING "%s: Link lost during transmit\n", | |
815 | dev->name); | |
09f75cd7 | 816 | dev->stats.tx_aborted_errors++; |
6aa20a22 | 817 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
818 | ®s->HostCtrl); |
819 | wmb(); | |
820 | break; | |
821 | case E_TX_INV_RNG: | |
822 | printk(KERN_ERR "%s: Invalid send ring block\n", | |
823 | dev->name); | |
6aa20a22 | 824 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
825 | ®s->HostCtrl); |
826 | wmb(); | |
827 | break; | |
828 | case E_TX_INV_BUF: | |
829 | printk(KERN_ERR "%s: Invalid send buffer address\n", | |
830 | dev->name); | |
6aa20a22 | 831 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
832 | ®s->HostCtrl); |
833 | wmb(); | |
834 | break; | |
835 | case E_TX_INV_DSC: | |
836 | printk(KERN_ERR "%s: Invalid descriptor address\n", | |
837 | dev->name); | |
6aa20a22 | 838 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
839 | ®s->HostCtrl); |
840 | wmb(); | |
841 | break; | |
842 | /* | |
843 | * RX events. | |
844 | */ | |
845 | case E_RX_RNG_OUT: | |
846 | printk(KERN_INFO "%s: Receive ring full\n", dev->name); | |
847 | break; | |
848 | ||
849 | case E_RX_PAR_ERR: | |
850 | printk(KERN_WARNING "%s: Receive parity error\n", | |
851 | dev->name); | |
852 | goto drop; | |
853 | case E_RX_LLRC_ERR: | |
854 | printk(KERN_WARNING "%s: Receive LLRC error\n", | |
855 | dev->name); | |
856 | goto drop; | |
857 | case E_PKT_LN_ERR: | |
858 | printk(KERN_WARNING "%s: Receive packet length " | |
859 | "error\n", dev->name); | |
860 | goto drop; | |
861 | case E_DTA_CKSM_ERR: | |
862 | printk(KERN_WARNING "%s: Data checksum error\n", | |
863 | dev->name); | |
864 | goto drop; | |
865 | case E_SHT_BST: | |
866 | printk(KERN_WARNING "%s: Unexpected short burst " | |
867 | "error\n", dev->name); | |
868 | goto drop; | |
869 | case E_STATE_ERR: | |
870 | printk(KERN_WARNING "%s: Recv. state transition" | |
871 | " error\n", dev->name); | |
872 | goto drop; | |
873 | case E_UNEXP_DATA: | |
874 | printk(KERN_WARNING "%s: Unexpected data error\n", | |
875 | dev->name); | |
876 | goto drop; | |
877 | case E_LST_LNK_ERR: | |
878 | printk(KERN_WARNING "%s: Link lost error\n", | |
879 | dev->name); | |
880 | goto drop; | |
881 | case E_FRM_ERR: | |
882 | printk(KERN_WARNING "%s: Framming Error\n", | |
883 | dev->name); | |
884 | goto drop; | |
885 | case E_FLG_SYN_ERR: | |
2450022a | 886 | printk(KERN_WARNING "%s: Flag sync. lost during " |
1da177e4 LT |
887 | "packet\n", dev->name); |
888 | goto drop; | |
889 | case E_RX_INV_BUF: | |
890 | printk(KERN_ERR "%s: Invalid receive buffer " | |
891 | "address\n", dev->name); | |
6aa20a22 | 892 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
893 | ®s->HostCtrl); |
894 | wmb(); | |
895 | break; | |
896 | case E_RX_INV_DSC: | |
897 | printk(KERN_ERR "%s: Invalid receive descriptor " | |
898 | "address\n", dev->name); | |
6aa20a22 | 899 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
900 | ®s->HostCtrl); |
901 | wmb(); | |
902 | break; | |
903 | case E_RNG_BLK: | |
904 | printk(KERN_ERR "%s: Invalid ring block\n", | |
905 | dev->name); | |
6aa20a22 | 906 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
907 | ®s->HostCtrl); |
908 | wmb(); | |
909 | break; | |
910 | drop: | |
911 | /* Label packet to be dropped. | |
912 | * Actual dropping occurs in rx | |
913 | * handling. | |
914 | * | |
915 | * The index of packet we get to drop is | |
916 | * the index of the packet following | |
917 | * the bad packet. -kbf | |
918 | */ | |
919 | { | |
920 | u16 index = rrpriv->evt_ring[eidx].index; | |
921 | index = (index + (RX_RING_ENTRIES - 1)) % | |
922 | RX_RING_ENTRIES; | |
923 | rrpriv->rx_ring[index].mode |= | |
924 | (PACKET_BAD | PACKET_END); | |
925 | } | |
926 | break; | |
927 | default: | |
928 | printk(KERN_WARNING "%s: Unhandled event 0x%02x\n", | |
929 | dev->name, rrpriv->evt_ring[eidx].code); | |
930 | } | |
931 | eidx = (eidx + 1) % EVT_RING_ENTRIES; | |
932 | } | |
933 | ||
934 | rrpriv->info->evt_ctrl.pi = eidx; | |
935 | wmb(); | |
936 | return eidx; | |
937 | } | |
938 | ||
939 | ||
940 | static void rx_int(struct net_device *dev, u32 rxlimit, u32 index) | |
941 | { | |
942 | struct rr_private *rrpriv = netdev_priv(dev); | |
943 | struct rr_regs __iomem *regs = rrpriv->regs; | |
944 | ||
945 | do { | |
946 | struct rx_desc *desc; | |
947 | u32 pkt_len; | |
948 | ||
949 | desc = &(rrpriv->rx_ring[index]); | |
950 | pkt_len = desc->size; | |
951 | #if (DEBUG > 2) | |
952 | printk("index %i, rxlimit %i\n", index, rxlimit); | |
953 | printk("len %x, mode %x\n", pkt_len, desc->mode); | |
954 | #endif | |
955 | if ( (rrpriv->rx_ring[index].mode & PACKET_BAD) == PACKET_BAD){ | |
09f75cd7 | 956 | dev->stats.rx_dropped++; |
1da177e4 LT |
957 | goto defer; |
958 | } | |
959 | ||
960 | if (pkt_len > 0){ | |
961 | struct sk_buff *skb, *rx_skb; | |
962 | ||
963 | rx_skb = rrpriv->rx_skbuff[index]; | |
964 | ||
965 | if (pkt_len < PKT_COPY_THRESHOLD) { | |
966 | skb = alloc_skb(pkt_len, GFP_ATOMIC); | |
967 | if (skb == NULL){ | |
968 | printk(KERN_WARNING "%s: Unable to allocate skb (%i bytes), deferring packet\n", dev->name, pkt_len); | |
09f75cd7 | 969 | dev->stats.rx_dropped++; |
1da177e4 LT |
970 | goto defer; |
971 | } else { | |
972 | pci_dma_sync_single_for_cpu(rrpriv->pci_dev, | |
973 | desc->addr.addrlo, | |
974 | pkt_len, | |
975 | PCI_DMA_FROMDEVICE); | |
976 | ||
977 | memcpy(skb_put(skb, pkt_len), | |
978 | rx_skb->data, pkt_len); | |
979 | ||
980 | pci_dma_sync_single_for_device(rrpriv->pci_dev, | |
981 | desc->addr.addrlo, | |
982 | pkt_len, | |
983 | PCI_DMA_FROMDEVICE); | |
984 | } | |
985 | }else{ | |
986 | struct sk_buff *newskb; | |
987 | ||
988 | newskb = alloc_skb(dev->mtu + HIPPI_HLEN, | |
989 | GFP_ATOMIC); | |
990 | if (newskb){ | |
991 | dma_addr_t addr; | |
992 | ||
6aa20a22 JG |
993 | pci_unmap_single(rrpriv->pci_dev, |
994 | desc->addr.addrlo, dev->mtu + | |
1da177e4 LT |
995 | HIPPI_HLEN, PCI_DMA_FROMDEVICE); |
996 | skb = rx_skb; | |
997 | skb_put(skb, pkt_len); | |
998 | rrpriv->rx_skbuff[index] = newskb; | |
6aa20a22 JG |
999 | addr = pci_map_single(rrpriv->pci_dev, |
1000 | newskb->data, | |
1001 | dev->mtu + HIPPI_HLEN, | |
1da177e4 LT |
1002 | PCI_DMA_FROMDEVICE); |
1003 | set_rraddr(&desc->addr, addr); | |
1004 | } else { | |
1005 | printk("%s: Out of memory, deferring " | |
1006 | "packet\n", dev->name); | |
09f75cd7 | 1007 | dev->stats.rx_dropped++; |
1da177e4 LT |
1008 | goto defer; |
1009 | } | |
1010 | } | |
1da177e4 LT |
1011 | skb->protocol = hippi_type_trans(skb, dev); |
1012 | ||
1013 | netif_rx(skb); /* send it up */ | |
1014 | ||
09f75cd7 JG |
1015 | dev->stats.rx_packets++; |
1016 | dev->stats.rx_bytes += pkt_len; | |
1da177e4 LT |
1017 | } |
1018 | defer: | |
1019 | desc->mode = 0; | |
1020 | desc->size = dev->mtu + HIPPI_HLEN; | |
1021 | ||
1022 | if ((index & 7) == 7) | |
1023 | writel(index, ®s->IpRxPi); | |
1024 | ||
1025 | index = (index + 1) % RX_RING_ENTRIES; | |
1026 | } while(index != rxlimit); | |
1027 | ||
1028 | rrpriv->cur_rx = index; | |
1029 | wmb(); | |
1030 | } | |
1031 | ||
1032 | ||
7d12e780 | 1033 | static irqreturn_t rr_interrupt(int irq, void *dev_id) |
1da177e4 LT |
1034 | { |
1035 | struct rr_private *rrpriv; | |
1036 | struct rr_regs __iomem *regs; | |
1037 | struct net_device *dev = (struct net_device *)dev_id; | |
1038 | u32 prodidx, rxindex, eidx, txcsmr, rxlimit, txcon; | |
1039 | ||
1040 | rrpriv = netdev_priv(dev); | |
1041 | regs = rrpriv->regs; | |
1042 | ||
1043 | if (!(readl(®s->HostCtrl) & RR_INT)) | |
1044 | return IRQ_NONE; | |
1045 | ||
1046 | spin_lock(&rrpriv->lock); | |
1047 | ||
1048 | prodidx = readl(®s->EvtPrd); | |
1049 | txcsmr = (prodidx >> 8) & 0xff; | |
1050 | rxlimit = (prodidx >> 16) & 0xff; | |
1051 | prodidx &= 0xff; | |
1052 | ||
1053 | #if (DEBUG > 2) | |
1054 | printk("%s: interrupt, prodidx = %i, eidx = %i\n", dev->name, | |
1055 | prodidx, rrpriv->info->evt_ctrl.pi); | |
1056 | #endif | |
1057 | /* | |
1058 | * Order here is important. We must handle events | |
1059 | * before doing anything else in order to catch | |
1060 | * such things as LLRC errors, etc -kbf | |
1061 | */ | |
1062 | ||
1063 | eidx = rrpriv->info->evt_ctrl.pi; | |
1064 | if (prodidx != eidx) | |
1065 | eidx = rr_handle_event(dev, prodidx, eidx); | |
1066 | ||
1067 | rxindex = rrpriv->cur_rx; | |
1068 | if (rxindex != rxlimit) | |
1069 | rx_int(dev, rxlimit, rxindex); | |
1070 | ||
1071 | txcon = rrpriv->dirty_tx; | |
1072 | if (txcsmr != txcon) { | |
1073 | do { | |
1074 | /* Due to occational firmware TX producer/consumer out | |
1075 | * of sync. error need to check entry in ring -kbf | |
1076 | */ | |
1077 | if(rrpriv->tx_skbuff[txcon]){ | |
1078 | struct tx_desc *desc; | |
1079 | struct sk_buff *skb; | |
1080 | ||
1081 | desc = &(rrpriv->tx_ring[txcon]); | |
1082 | skb = rrpriv->tx_skbuff[txcon]; | |
1083 | ||
09f75cd7 JG |
1084 | dev->stats.tx_packets++; |
1085 | dev->stats.tx_bytes += skb->len; | |
1da177e4 LT |
1086 | |
1087 | pci_unmap_single(rrpriv->pci_dev, | |
1088 | desc->addr.addrlo, skb->len, | |
1089 | PCI_DMA_TODEVICE); | |
1090 | dev_kfree_skb_irq(skb); | |
1091 | ||
1092 | rrpriv->tx_skbuff[txcon] = NULL; | |
1093 | desc->size = 0; | |
1094 | set_rraddr(&rrpriv->tx_ring[txcon].addr, 0); | |
1095 | desc->mode = 0; | |
1096 | } | |
1097 | txcon = (txcon + 1) % TX_RING_ENTRIES; | |
1098 | } while (txcsmr != txcon); | |
1099 | wmb(); | |
1100 | ||
1101 | rrpriv->dirty_tx = txcon; | |
1102 | if (rrpriv->tx_full && rr_if_busy(dev) && | |
1103 | (((rrpriv->info->tx_ctrl.pi + 1) % TX_RING_ENTRIES) | |
1104 | != rrpriv->dirty_tx)){ | |
1105 | rrpriv->tx_full = 0; | |
1106 | netif_wake_queue(dev); | |
1107 | } | |
1108 | } | |
1109 | ||
1110 | eidx |= ((txcsmr << 8) | (rxlimit << 16)); | |
1111 | writel(eidx, ®s->EvtCon); | |
1112 | wmb(); | |
1113 | ||
1114 | spin_unlock(&rrpriv->lock); | |
1115 | return IRQ_HANDLED; | |
1116 | } | |
1117 | ||
1118 | static inline void rr_raz_tx(struct rr_private *rrpriv, | |
1119 | struct net_device *dev) | |
1120 | { | |
1121 | int i; | |
1122 | ||
1123 | for (i = 0; i < TX_RING_ENTRIES; i++) { | |
1124 | struct sk_buff *skb = rrpriv->tx_skbuff[i]; | |
1125 | ||
1126 | if (skb) { | |
1127 | struct tx_desc *desc = &(rrpriv->tx_ring[i]); | |
1128 | ||
1129 | pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, | |
1130 | skb->len, PCI_DMA_TODEVICE); | |
1131 | desc->size = 0; | |
1132 | set_rraddr(&desc->addr, 0); | |
1133 | dev_kfree_skb(skb); | |
1134 | rrpriv->tx_skbuff[i] = NULL; | |
1135 | } | |
1136 | } | |
1137 | } | |
1138 | ||
1139 | ||
1140 | static inline void rr_raz_rx(struct rr_private *rrpriv, | |
1141 | struct net_device *dev) | |
1142 | { | |
1143 | int i; | |
1144 | ||
1145 | for (i = 0; i < RX_RING_ENTRIES; i++) { | |
1146 | struct sk_buff *skb = rrpriv->rx_skbuff[i]; | |
1147 | ||
1148 | if (skb) { | |
1149 | struct rx_desc *desc = &(rrpriv->rx_ring[i]); | |
1150 | ||
1151 | pci_unmap_single(rrpriv->pci_dev, desc->addr.addrlo, | |
1152 | dev->mtu + HIPPI_HLEN, PCI_DMA_FROMDEVICE); | |
1153 | desc->size = 0; | |
1154 | set_rraddr(&desc->addr, 0); | |
1155 | dev_kfree_skb(skb); | |
1156 | rrpriv->rx_skbuff[i] = NULL; | |
1157 | } | |
1158 | } | |
1159 | } | |
1160 | ||
1161 | static void rr_timer(unsigned long data) | |
1162 | { | |
1163 | struct net_device *dev = (struct net_device *)data; | |
1164 | struct rr_private *rrpriv = netdev_priv(dev); | |
1165 | struct rr_regs __iomem *regs = rrpriv->regs; | |
1166 | unsigned long flags; | |
1167 | ||
1168 | if (readl(®s->HostCtrl) & NIC_HALTED){ | |
1169 | printk("%s: Restarting nic\n", dev->name); | |
1170 | memset(rrpriv->rx_ctrl, 0, 256 * sizeof(struct ring_ctrl)); | |
1171 | memset(rrpriv->info, 0, sizeof(struct rr_info)); | |
1172 | wmb(); | |
1173 | ||
1174 | rr_raz_tx(rrpriv, dev); | |
1175 | rr_raz_rx(rrpriv, dev); | |
1176 | ||
1177 | if (rr_init1(dev)) { | |
1178 | spin_lock_irqsave(&rrpriv->lock, flags); | |
6aa20a22 | 1179 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, |
1da177e4 LT |
1180 | ®s->HostCtrl); |
1181 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
1182 | } | |
1183 | } | |
1184 | rrpriv->timer.expires = RUN_AT(5*HZ); | |
1185 | add_timer(&rrpriv->timer); | |
1186 | } | |
1187 | ||
1188 | ||
1189 | static int rr_open(struct net_device *dev) | |
1190 | { | |
1191 | struct rr_private *rrpriv = netdev_priv(dev); | |
1192 | struct pci_dev *pdev = rrpriv->pci_dev; | |
1193 | struct rr_regs __iomem *regs; | |
1194 | int ecode = 0; | |
1195 | unsigned long flags; | |
1196 | dma_addr_t dma_addr; | |
1197 | ||
1198 | regs = rrpriv->regs; | |
1199 | ||
1200 | if (rrpriv->fw_rev < 0x00020000) { | |
1201 | printk(KERN_WARNING "%s: trying to configure device with " | |
1202 | "obsolete firmware\n", dev->name); | |
1203 | ecode = -EBUSY; | |
1204 | goto error; | |
1205 | } | |
1206 | ||
1207 | rrpriv->rx_ctrl = pci_alloc_consistent(pdev, | |
1208 | 256 * sizeof(struct ring_ctrl), | |
1209 | &dma_addr); | |
1210 | if (!rrpriv->rx_ctrl) { | |
1211 | ecode = -ENOMEM; | |
1212 | goto error; | |
1213 | } | |
1214 | rrpriv->rx_ctrl_dma = dma_addr; | |
1215 | memset(rrpriv->rx_ctrl, 0, 256*sizeof(struct ring_ctrl)); | |
1216 | ||
1217 | rrpriv->info = pci_alloc_consistent(pdev, sizeof(struct rr_info), | |
1218 | &dma_addr); | |
1219 | if (!rrpriv->info) { | |
1220 | ecode = -ENOMEM; | |
1221 | goto error; | |
1222 | } | |
1223 | rrpriv->info_dma = dma_addr; | |
1224 | memset(rrpriv->info, 0, sizeof(struct rr_info)); | |
1225 | wmb(); | |
1226 | ||
1227 | spin_lock_irqsave(&rrpriv->lock, flags); | |
1228 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl); | |
1229 | readl(®s->HostCtrl); | |
1230 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
1231 | ||
1fb9df5d | 1232 | if (request_irq(dev->irq, rr_interrupt, IRQF_SHARED, dev->name, dev)) { |
1da177e4 LT |
1233 | printk(KERN_WARNING "%s: Requested IRQ %d is busy\n", |
1234 | dev->name, dev->irq); | |
1235 | ecode = -EAGAIN; | |
1236 | goto error; | |
1237 | } | |
1238 | ||
1239 | if ((ecode = rr_init1(dev))) | |
1240 | goto error; | |
1241 | ||
1242 | /* Set the timer to switch to check for link beat and perhaps switch | |
1243 | to an alternate media type. */ | |
1244 | init_timer(&rrpriv->timer); | |
1245 | rrpriv->timer.expires = RUN_AT(5*HZ); /* 5 sec. watchdog */ | |
1246 | rrpriv->timer.data = (unsigned long)dev; | |
1247 | rrpriv->timer.function = &rr_timer; /* timer handler */ | |
1248 | add_timer(&rrpriv->timer); | |
1249 | ||
1250 | netif_start_queue(dev); | |
1251 | ||
1252 | return ecode; | |
1253 | ||
1254 | error: | |
1255 | spin_lock_irqsave(&rrpriv->lock, flags); | |
1256 | writel(readl(®s->HostCtrl)|HALT_NIC|RR_CLEAR_INT, ®s->HostCtrl); | |
1257 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
1258 | ||
1259 | if (rrpriv->info) { | |
1260 | pci_free_consistent(pdev, sizeof(struct rr_info), rrpriv->info, | |
1261 | rrpriv->info_dma); | |
1262 | rrpriv->info = NULL; | |
1263 | } | |
1264 | if (rrpriv->rx_ctrl) { | |
1265 | pci_free_consistent(pdev, sizeof(struct ring_ctrl), | |
1266 | rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); | |
1267 | rrpriv->rx_ctrl = NULL; | |
1268 | } | |
1269 | ||
1270 | netif_stop_queue(dev); | |
6aa20a22 | 1271 | |
1da177e4 LT |
1272 | return ecode; |
1273 | } | |
1274 | ||
1275 | ||
1276 | static void rr_dump(struct net_device *dev) | |
1277 | { | |
1278 | struct rr_private *rrpriv; | |
1279 | struct rr_regs __iomem *regs; | |
1280 | u32 index, cons; | |
1281 | short i; | |
1282 | int len; | |
1283 | ||
1284 | rrpriv = netdev_priv(dev); | |
1285 | regs = rrpriv->regs; | |
1286 | ||
1287 | printk("%s: dumping NIC TX rings\n", dev->name); | |
1288 | ||
1289 | printk("RxPrd %08x, TxPrd %02x, EvtPrd %08x, TxPi %02x, TxCtrlPi %02x\n", | |
1290 | readl(®s->RxPrd), readl(®s->TxPrd), | |
1291 | readl(®s->EvtPrd), readl(®s->TxPi), | |
1292 | rrpriv->info->tx_ctrl.pi); | |
1293 | ||
1294 | printk("Error code 0x%x\n", readl(®s->Fail1)); | |
1295 | ||
1296 | index = (((readl(®s->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES; | |
1297 | cons = rrpriv->dirty_tx; | |
1298 | printk("TX ring index %i, TX consumer %i\n", | |
1299 | index, cons); | |
1300 | ||
1301 | if (rrpriv->tx_skbuff[index]){ | |
1302 | len = min_t(int, 0x80, rrpriv->tx_skbuff[index]->len); | |
1303 | printk("skbuff for index %i is valid - dumping data (0x%x bytes - DMA len 0x%x)\n", index, len, rrpriv->tx_ring[index].size); | |
1304 | for (i = 0; i < len; i++){ | |
1305 | if (!(i & 7)) | |
1306 | printk("\n"); | |
1307 | printk("%02x ", (unsigned char) rrpriv->tx_skbuff[index]->data[i]); | |
1308 | } | |
1309 | printk("\n"); | |
1310 | } | |
1311 | ||
1312 | if (rrpriv->tx_skbuff[cons]){ | |
1313 | len = min_t(int, 0x80, rrpriv->tx_skbuff[cons]->len); | |
1314 | printk("skbuff for cons %i is valid - dumping data (0x%x bytes - skbuff len 0x%x)\n", cons, len, rrpriv->tx_skbuff[cons]->len); | |
1315 | printk("mode 0x%x, size 0x%x,\n phys %08Lx, skbuff-addr %08lx, truesize 0x%x\n", | |
1316 | rrpriv->tx_ring[cons].mode, | |
1317 | rrpriv->tx_ring[cons].size, | |
1318 | (unsigned long long) rrpriv->tx_ring[cons].addr.addrlo, | |
1319 | (unsigned long)rrpriv->tx_skbuff[cons]->data, | |
1320 | (unsigned int)rrpriv->tx_skbuff[cons]->truesize); | |
1321 | for (i = 0; i < len; i++){ | |
1322 | if (!(i & 7)) | |
1323 | printk("\n"); | |
1324 | printk("%02x ", (unsigned char)rrpriv->tx_ring[cons].size); | |
1325 | } | |
1326 | printk("\n"); | |
1327 | } | |
1328 | ||
1329 | printk("dumping TX ring info:\n"); | |
1330 | for (i = 0; i < TX_RING_ENTRIES; i++) | |
1331 | printk("mode 0x%x, size 0x%x, phys-addr %08Lx\n", | |
1332 | rrpriv->tx_ring[i].mode, | |
1333 | rrpriv->tx_ring[i].size, | |
1334 | (unsigned long long) rrpriv->tx_ring[i].addr.addrlo); | |
1335 | ||
1336 | } | |
1337 | ||
1338 | ||
1339 | static int rr_close(struct net_device *dev) | |
1340 | { | |
1341 | struct rr_private *rrpriv; | |
1342 | struct rr_regs __iomem *regs; | |
1343 | unsigned long flags; | |
1344 | u32 tmp; | |
1345 | short i; | |
1346 | ||
1347 | netif_stop_queue(dev); | |
1348 | ||
1349 | rrpriv = netdev_priv(dev); | |
1350 | regs = rrpriv->regs; | |
1351 | ||
1352 | /* | |
1353 | * Lock to make sure we are not cleaning up while another CPU | |
1354 | * is handling interrupts. | |
1355 | */ | |
1356 | spin_lock_irqsave(&rrpriv->lock, flags); | |
1357 | ||
1358 | tmp = readl(®s->HostCtrl); | |
1359 | if (tmp & NIC_HALTED){ | |
1360 | printk("%s: NIC already halted\n", dev->name); | |
1361 | rr_dump(dev); | |
1362 | }else{ | |
1363 | tmp |= HALT_NIC | RR_CLEAR_INT; | |
1364 | writel(tmp, ®s->HostCtrl); | |
1365 | readl(®s->HostCtrl); | |
1366 | } | |
1367 | ||
1368 | rrpriv->fw_running = 0; | |
1369 | ||
1370 | del_timer_sync(&rrpriv->timer); | |
1371 | ||
1372 | writel(0, ®s->TxPi); | |
1373 | writel(0, ®s->IpRxPi); | |
1374 | ||
1375 | writel(0, ®s->EvtCon); | |
1376 | writel(0, ®s->EvtPrd); | |
1377 | ||
1378 | for (i = 0; i < CMD_RING_ENTRIES; i++) | |
1379 | writel(0, ®s->CmdRing[i]); | |
1380 | ||
1381 | rrpriv->info->tx_ctrl.entries = 0; | |
1382 | rrpriv->info->cmd_ctrl.pi = 0; | |
1383 | rrpriv->info->evt_ctrl.pi = 0; | |
1384 | rrpriv->rx_ctrl[4].entries = 0; | |
1385 | ||
1386 | rr_raz_tx(rrpriv, dev); | |
1387 | rr_raz_rx(rrpriv, dev); | |
1388 | ||
1389 | pci_free_consistent(rrpriv->pci_dev, 256 * sizeof(struct ring_ctrl), | |
1390 | rrpriv->rx_ctrl, rrpriv->rx_ctrl_dma); | |
1391 | rrpriv->rx_ctrl = NULL; | |
1392 | ||
1393 | pci_free_consistent(rrpriv->pci_dev, sizeof(struct rr_info), | |
1394 | rrpriv->info, rrpriv->info_dma); | |
1395 | rrpriv->info = NULL; | |
1396 | ||
1397 | free_irq(dev->irq, dev); | |
1398 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
1399 | ||
1400 | return 0; | |
1401 | } | |
1402 | ||
1403 | ||
1404 | static int rr_start_xmit(struct sk_buff *skb, struct net_device *dev) | |
1405 | { | |
1406 | struct rr_private *rrpriv = netdev_priv(dev); | |
1407 | struct rr_regs __iomem *regs = rrpriv->regs; | |
6f1cf165 | 1408 | struct hippi_cb *hcb = (struct hippi_cb *) skb->cb; |
1da177e4 LT |
1409 | struct ring_ctrl *txctrl; |
1410 | unsigned long flags; | |
1411 | u32 index, len = skb->len; | |
1412 | u32 *ifield; | |
1413 | struct sk_buff *new_skb; | |
1414 | ||
1415 | if (readl(®s->Mode) & FATAL_ERR) | |
1416 | printk("error codes Fail1 %02x, Fail2 %02x\n", | |
1417 | readl(®s->Fail1), readl(®s->Fail2)); | |
1418 | ||
1419 | /* | |
1420 | * We probably need to deal with tbusy here to prevent overruns. | |
1421 | */ | |
1422 | ||
1423 | if (skb_headroom(skb) < 8){ | |
1424 | printk("incoming skb too small - reallocating\n"); | |
1425 | if (!(new_skb = dev_alloc_skb(len + 8))) { | |
1426 | dev_kfree_skb(skb); | |
1427 | netif_wake_queue(dev); | |
1428 | return -EBUSY; | |
1429 | } | |
1430 | skb_reserve(new_skb, 8); | |
1431 | skb_put(new_skb, len); | |
d626f62b | 1432 | skb_copy_from_linear_data(skb, new_skb->data, len); |
1da177e4 LT |
1433 | dev_kfree_skb(skb); |
1434 | skb = new_skb; | |
1435 | } | |
1436 | ||
1437 | ifield = (u32 *)skb_push(skb, 8); | |
1438 | ||
1439 | ifield[0] = 0; | |
6f1cf165 | 1440 | ifield[1] = hcb->ifield; |
1da177e4 LT |
1441 | |
1442 | /* | |
1443 | * We don't need the lock before we are actually going to start | |
1444 | * fiddling with the control blocks. | |
1445 | */ | |
1446 | spin_lock_irqsave(&rrpriv->lock, flags); | |
1447 | ||
1448 | txctrl = &rrpriv->info->tx_ctrl; | |
1449 | ||
1450 | index = txctrl->pi; | |
1451 | ||
1452 | rrpriv->tx_skbuff[index] = skb; | |
1453 | set_rraddr(&rrpriv->tx_ring[index].addr, pci_map_single( | |
1454 | rrpriv->pci_dev, skb->data, len + 8, PCI_DMA_TODEVICE)); | |
1455 | rrpriv->tx_ring[index].size = len + 8; /* include IFIELD */ | |
1456 | rrpriv->tx_ring[index].mode = PACKET_START | PACKET_END; | |
1457 | txctrl->pi = (index + 1) % TX_RING_ENTRIES; | |
1458 | wmb(); | |
1459 | writel(txctrl->pi, ®s->TxPi); | |
1460 | ||
1461 | if (txctrl->pi == rrpriv->dirty_tx){ | |
1462 | rrpriv->tx_full = 1; | |
1463 | netif_stop_queue(dev); | |
1464 | } | |
1465 | ||
1466 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
1467 | ||
1468 | dev->trans_start = jiffies; | |
1469 | return 0; | |
1470 | } | |
1471 | ||
1472 | ||
1da177e4 LT |
1473 | /* |
1474 | * Read the firmware out of the EEPROM and put it into the SRAM | |
1475 | * (or from user space - later) | |
1476 | * | |
1477 | * This operation requires the NIC to be halted and is performed with | |
1478 | * interrupts disabled and with the spinlock hold. | |
1479 | */ | |
1480 | static int rr_load_firmware(struct net_device *dev) | |
1481 | { | |
1482 | struct rr_private *rrpriv; | |
1483 | struct rr_regs __iomem *regs; | |
cf962378 | 1484 | size_t eptr, segptr; |
1da177e4 LT |
1485 | int i, j; |
1486 | u32 localctrl, sptr, len, tmp; | |
1487 | u32 p2len, p2size, nr_seg, revision, io, sram_size; | |
1da177e4 LT |
1488 | |
1489 | rrpriv = netdev_priv(dev); | |
1490 | regs = rrpriv->regs; | |
1491 | ||
1492 | if (dev->flags & IFF_UP) | |
1493 | return -EBUSY; | |
1494 | ||
1495 | if (!(readl(®s->HostCtrl) & NIC_HALTED)){ | |
6aa20a22 | 1496 | printk("%s: Trying to load firmware to a running NIC.\n", |
1da177e4 LT |
1497 | dev->name); |
1498 | return -EBUSY; | |
1499 | } | |
1500 | ||
1501 | localctrl = readl(®s->LocalCtrl); | |
1502 | writel(0, ®s->LocalCtrl); | |
1503 | ||
1504 | writel(0, ®s->EvtPrd); | |
1505 | writel(0, ®s->RxPrd); | |
1506 | writel(0, ®s->TxPrd); | |
1507 | ||
1508 | /* | |
1509 | * First wipe the entire SRAM, otherwise we might run into all | |
1510 | * kinds of trouble ... sigh, this took almost all afternoon | |
1511 | * to track down ;-( | |
1512 | */ | |
1513 | io = readl(®s->ExtIo); | |
1514 | writel(0, ®s->ExtIo); | |
cf962378 | 1515 | sram_size = rr_read_eeprom_word(rrpriv, 8); |
1da177e4 LT |
1516 | |
1517 | for (i = 200; i < sram_size / 4; i++){ | |
1518 | writel(i * 4, ®s->WinBase); | |
1519 | mb(); | |
1520 | writel(0, ®s->WinData); | |
1521 | mb(); | |
1522 | } | |
1523 | writel(io, ®s->ExtIo); | |
1524 | mb(); | |
1525 | ||
cf962378 AV |
1526 | eptr = rr_read_eeprom_word(rrpriv, |
1527 | offsetof(struct eeprom, rncd_info.AddrRunCodeSegs)); | |
1da177e4 LT |
1528 | eptr = ((eptr & 0x1fffff) >> 3); |
1529 | ||
cf962378 | 1530 | p2len = rr_read_eeprom_word(rrpriv, 0x83*4); |
1da177e4 | 1531 | p2len = (p2len << 2); |
cf962378 | 1532 | p2size = rr_read_eeprom_word(rrpriv, 0x84*4); |
1da177e4 LT |
1533 | p2size = ((p2size & 0x1fffff) >> 3); |
1534 | ||
1535 | if ((eptr < p2size) || (eptr > (p2size + p2len))){ | |
1536 | printk("%s: eptr is invalid\n", dev->name); | |
1537 | goto out; | |
1538 | } | |
1539 | ||
cf962378 AV |
1540 | revision = rr_read_eeprom_word(rrpriv, |
1541 | offsetof(struct eeprom, manf.HeaderFmt)); | |
1da177e4 LT |
1542 | |
1543 | if (revision != 1){ | |
1544 | printk("%s: invalid firmware format (%i)\n", | |
1545 | dev->name, revision); | |
1546 | goto out; | |
1547 | } | |
1548 | ||
cf962378 | 1549 | nr_seg = rr_read_eeprom_word(rrpriv, eptr); |
1da177e4 LT |
1550 | eptr +=4; |
1551 | #if (DEBUG > 1) | |
1552 | printk("%s: nr_seg %i\n", dev->name, nr_seg); | |
1553 | #endif | |
1554 | ||
1555 | for (i = 0; i < nr_seg; i++){ | |
cf962378 | 1556 | sptr = rr_read_eeprom_word(rrpriv, eptr); |
1da177e4 | 1557 | eptr += 4; |
cf962378 | 1558 | len = rr_read_eeprom_word(rrpriv, eptr); |
1da177e4 | 1559 | eptr += 4; |
cf962378 | 1560 | segptr = rr_read_eeprom_word(rrpriv, eptr); |
1da177e4 LT |
1561 | segptr = ((segptr & 0x1fffff) >> 3); |
1562 | eptr += 4; | |
1563 | #if (DEBUG > 1) | |
1564 | printk("%s: segment %i, sram address %06x, length %04x, segptr %06x\n", | |
1565 | dev->name, i, sptr, len, segptr); | |
1566 | #endif | |
1567 | for (j = 0; j < len; j++){ | |
cf962378 | 1568 | tmp = rr_read_eeprom_word(rrpriv, segptr); |
1da177e4 LT |
1569 | writel(sptr, ®s->WinBase); |
1570 | mb(); | |
1571 | writel(tmp, ®s->WinData); | |
1572 | mb(); | |
1573 | segptr += 4; | |
1574 | sptr += 4; | |
1575 | } | |
1576 | } | |
1577 | ||
1578 | out: | |
1579 | writel(localctrl, ®s->LocalCtrl); | |
1580 | mb(); | |
1581 | return 0; | |
1582 | } | |
1583 | ||
1584 | ||
1585 | static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |
1586 | { | |
1587 | struct rr_private *rrpriv; | |
1588 | unsigned char *image, *oldimage; | |
1589 | unsigned long flags; | |
1590 | unsigned int i; | |
1591 | int error = -EOPNOTSUPP; | |
1592 | ||
1593 | rrpriv = netdev_priv(dev); | |
1594 | ||
1595 | switch(cmd){ | |
1596 | case SIOCRRGFW: | |
1597 | if (!capable(CAP_SYS_RAWIO)){ | |
1598 | return -EPERM; | |
1599 | } | |
1600 | ||
1601 | image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); | |
1602 | if (!image){ | |
1603 | printk(KERN_ERR "%s: Unable to allocate memory " | |
1604 | "for EEPROM image\n", dev->name); | |
1605 | return -ENOMEM; | |
1606 | } | |
1607 | ||
1608 | ||
1609 | if (rrpriv->fw_running){ | |
1610 | printk("%s: Firmware already running\n", dev->name); | |
1611 | error = -EPERM; | |
1612 | goto gf_out; | |
1613 | } | |
1614 | ||
1615 | spin_lock_irqsave(&rrpriv->lock, flags); | |
1616 | i = rr_read_eeprom(rrpriv, 0, image, EEPROM_BYTES); | |
1617 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
1618 | if (i != EEPROM_BYTES){ | |
1619 | printk(KERN_ERR "%s: Error reading EEPROM\n", | |
1620 | dev->name); | |
1621 | error = -EFAULT; | |
1622 | goto gf_out; | |
1623 | } | |
1624 | error = copy_to_user(rq->ifr_data, image, EEPROM_BYTES); | |
1625 | if (error) | |
1626 | error = -EFAULT; | |
1627 | gf_out: | |
1628 | kfree(image); | |
1629 | return error; | |
6aa20a22 | 1630 | |
1da177e4 LT |
1631 | case SIOCRRPFW: |
1632 | if (!capable(CAP_SYS_RAWIO)){ | |
1633 | return -EPERM; | |
1634 | } | |
1635 | ||
1636 | image = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); | |
1637 | oldimage = kmalloc(EEPROM_WORDS * sizeof(u32), GFP_KERNEL); | |
1638 | if (!image || !oldimage) { | |
1639 | printk(KERN_ERR "%s: Unable to allocate memory " | |
1640 | "for EEPROM image\n", dev->name); | |
1641 | error = -ENOMEM; | |
1642 | goto wf_out; | |
1643 | } | |
1644 | ||
1645 | error = copy_from_user(image, rq->ifr_data, EEPROM_BYTES); | |
1646 | if (error) { | |
1647 | error = -EFAULT; | |
1648 | goto wf_out; | |
1649 | } | |
1650 | ||
1651 | if (rrpriv->fw_running){ | |
1652 | printk("%s: Firmware already running\n", dev->name); | |
1653 | error = -EPERM; | |
1654 | goto wf_out; | |
1655 | } | |
1656 | ||
1657 | printk("%s: Updating EEPROM firmware\n", dev->name); | |
1658 | ||
1659 | spin_lock_irqsave(&rrpriv->lock, flags); | |
1660 | error = write_eeprom(rrpriv, 0, image, EEPROM_BYTES); | |
1661 | if (error) | |
1662 | printk(KERN_ERR "%s: Error writing EEPROM\n", | |
1663 | dev->name); | |
1664 | ||
1665 | i = rr_read_eeprom(rrpriv, 0, oldimage, EEPROM_BYTES); | |
1666 | spin_unlock_irqrestore(&rrpriv->lock, flags); | |
1667 | ||
1668 | if (i != EEPROM_BYTES) | |
1669 | printk(KERN_ERR "%s: Error reading back EEPROM " | |
1670 | "image\n", dev->name); | |
1671 | ||
1672 | error = memcmp(image, oldimage, EEPROM_BYTES); | |
1673 | if (error){ | |
1674 | printk(KERN_ERR "%s: Error verifying EEPROM image\n", | |
1675 | dev->name); | |
1676 | error = -EFAULT; | |
1677 | } | |
1678 | wf_out: | |
b4558ea9 JJ |
1679 | kfree(oldimage); |
1680 | kfree(image); | |
1da177e4 | 1681 | return error; |
6aa20a22 | 1682 | |
1da177e4 LT |
1683 | case SIOCRRID: |
1684 | return put_user(0x52523032, (int __user *)rq->ifr_data); | |
1685 | default: | |
1686 | return error; | |
1687 | } | |
1688 | } | |
1689 | ||
1690 | static struct pci_device_id rr_pci_tbl[] = { | |
1691 | { PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER, | |
1692 | PCI_ANY_ID, PCI_ANY_ID, }, | |
1693 | { 0,} | |
1694 | }; | |
1695 | MODULE_DEVICE_TABLE(pci, rr_pci_tbl); | |
1696 | ||
1697 | static struct pci_driver rr_driver = { | |
1698 | .name = "rrunner", | |
1699 | .id_table = rr_pci_tbl, | |
1700 | .probe = rr_init_one, | |
1701 | .remove = __devexit_p(rr_remove_one), | |
1702 | }; | |
1703 | ||
1704 | static int __init rr_init_module(void) | |
1705 | { | |
29917620 | 1706 | return pci_register_driver(&rr_driver); |
1da177e4 LT |
1707 | } |
1708 | ||
1709 | static void __exit rr_cleanup_module(void) | |
1710 | { | |
1711 | pci_unregister_driver(&rr_driver); | |
1712 | } | |
1713 | ||
1714 | module_init(rr_init_module); | |
1715 | module_exit(rr_cleanup_module); |