]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /****************************************************************************** |
2 | * | |
3 | * nicstar.c | |
4 | * | |
5 | * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. | |
6 | * | |
7 | * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME. | |
8 | * It was taken from the frle-0.22 device driver. | |
9 | * As the file doesn't have a copyright notice, in the file | |
10 | * nicstarmac.copyright I put the copyright notice from the | |
11 | * frle-0.22 device driver. | |
12 | * Some code is based on the nicstar driver by M. Welsh. | |
13 | * | |
14 | * Author: Rui Prior ([email protected]) | |
15 | * PowerPC support by Jay Talbott ([email protected]) April 1999 | |
16 | * | |
17 | * | |
18 | * (C) INESC 1999 | |
19 | * | |
20 | * | |
21 | ******************************************************************************/ | |
22 | ||
23 | ||
24 | /**** IMPORTANT INFORMATION *************************************************** | |
25 | * | |
26 | * There are currently three types of spinlocks: | |
27 | * | |
28 | * 1 - Per card interrupt spinlock (to protect structures and such) | |
29 | * 2 - Per SCQ scq spinlock | |
30 | * 3 - Per card resource spinlock (to access registers, etc.) | |
31 | * | |
32 | * These must NEVER be grabbed in reverse order. | |
33 | * | |
34 | ******************************************************************************/ | |
35 | ||
36 | /* Header files ***************************************************************/ | |
37 | ||
38 | #include <linux/module.h> | |
1da177e4 LT |
39 | #include <linux/kernel.h> |
40 | #include <linux/skbuff.h> | |
41 | #include <linux/atmdev.h> | |
42 | #include <linux/atm.h> | |
43 | #include <linux/pci.h> | |
44 | #include <linux/types.h> | |
45 | #include <linux/string.h> | |
46 | #include <linux/delay.h> | |
47 | #include <linux/init.h> | |
48 | #include <linux/sched.h> | |
49 | #include <linux/timer.h> | |
50 | #include <linux/interrupt.h> | |
51 | #include <linux/bitops.h> | |
5a0e3ad6 | 52 | #include <linux/slab.h> |
1da177e4 LT |
53 | #include <asm/io.h> |
54 | #include <asm/uaccess.h> | |
55 | #include <asm/atomic.h> | |
56 | #include "nicstar.h" | |
57 | #ifdef CONFIG_ATM_NICSTAR_USE_SUNI | |
58 | #include "suni.h" | |
59 | #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ | |
60 | #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 | |
61 | #include "idt77105.h" | |
62 | #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ | |
63 | ||
64 | #if BITS_PER_LONG != 32 | |
65 | # error FIXME: this driver requires a 32-bit platform | |
66 | #endif | |
67 | ||
68 | /* Additional code ************************************************************/ | |
69 | ||
70 | #include "nicstarmac.c" | |
71 | ||
72 | ||
73 | /* Configurable parameters ****************************************************/ | |
74 | ||
75 | #undef PHY_LOOPBACK | |
76 | #undef TX_DEBUG | |
77 | #undef RX_DEBUG | |
78 | #undef GENERAL_DEBUG | |
79 | #undef EXTRA_DEBUG | |
80 | ||
81 | #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know | |
82 | you're going to use only raw ATM */ | |
83 | ||
84 | ||
85 | /* Do not touch these *********************************************************/ | |
86 | ||
87 | #ifdef TX_DEBUG | |
88 | #define TXPRINTK(args...) printk(args) | |
89 | #else | |
90 | #define TXPRINTK(args...) | |
91 | #endif /* TX_DEBUG */ | |
92 | ||
93 | #ifdef RX_DEBUG | |
94 | #define RXPRINTK(args...) printk(args) | |
95 | #else | |
96 | #define RXPRINTK(args...) | |
97 | #endif /* RX_DEBUG */ | |
98 | ||
99 | #ifdef GENERAL_DEBUG | |
100 | #define PRINTK(args...) printk(args) | |
101 | #else | |
102 | #define PRINTK(args...) | |
103 | #endif /* GENERAL_DEBUG */ | |
104 | ||
105 | #ifdef EXTRA_DEBUG | |
106 | #define XPRINTK(args...) printk(args) | |
107 | #else | |
108 | #define XPRINTK(args...) | |
109 | #endif /* EXTRA_DEBUG */ | |
110 | ||
111 | ||
112 | /* Macros *********************************************************************/ | |
113 | ||
114 | #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) | |
115 | ||
116 | #define NS_DELAY mdelay(1) | |
117 | ||
118 | #define ALIGN_BUS_ADDR(addr, alignment) \ | |
119 | ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1)) | |
120 | #define ALIGN_ADDRESS(addr, alignment) \ | |
121 | bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment)) | |
122 | ||
123 | #undef CEIL | |
124 | ||
125 | #ifndef ATM_SKB | |
126 | #define ATM_SKB(s) (&(s)->atm) | |
127 | #endif | |
128 | ||
1da177e4 LT |
129 | |
130 | /* Function declarations ******************************************************/ | |
131 | ||
132 | static u32 ns_read_sram(ns_dev *card, u32 sram_address); | |
133 | static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count); | |
134 | static int __devinit ns_init_card(int i, struct pci_dev *pcidev); | |
135 | static void __devinit ns_init_card_error(ns_dev *card, int error); | |
136 | static scq_info *get_scq(int size, u32 scd); | |
137 | static void free_scq(scq_info *scq, struct atm_vcc *vcc); | |
8728b834 | 138 | static void push_rxbufs(ns_dev *, struct sk_buff *); |
7d12e780 | 139 | static irqreturn_t ns_irq_handler(int irq, void *dev_id); |
1da177e4 LT |
140 | static int ns_open(struct atm_vcc *vcc); |
141 | static void ns_close(struct atm_vcc *vcc); | |
142 | static void fill_tst(ns_dev *card, int n, vc_map *vc); | |
143 | static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); | |
144 | static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, | |
145 | struct sk_buff *skb); | |
146 | static void process_tsq(ns_dev *card); | |
147 | static void drain_scq(ns_dev *card, scq_info *scq, int pos); | |
148 | static void process_rsq(ns_dev *card); | |
149 | static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe); | |
150 | #ifdef NS_USE_DESTRUCTORS | |
151 | static void ns_sb_destructor(struct sk_buff *sb); | |
152 | static void ns_lb_destructor(struct sk_buff *lb); | |
153 | static void ns_hb_destructor(struct sk_buff *hb); | |
154 | #endif /* NS_USE_DESTRUCTORS */ | |
155 | static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb); | |
156 | static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count); | |
157 | static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb); | |
158 | static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb); | |
159 | static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb); | |
160 | static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page); | |
161 | static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg); | |
162 | static void which_list(ns_dev *card, struct sk_buff *skb); | |
163 | static void ns_poll(unsigned long arg); | |
164 | static int ns_parse_mac(char *mac, unsigned char *esi); | |
165 | static short ns_h2i(char c); | |
166 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, | |
167 | unsigned long addr); | |
168 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); | |
169 | ||
170 | ||
171 | ||
172 | /* Global variables ***********************************************************/ | |
173 | ||
174 | static struct ns_dev *cards[NS_MAX_CARDS]; | |
175 | static unsigned num_cards; | |
176 | static struct atmdev_ops atm_ops = | |
177 | { | |
178 | .open = ns_open, | |
179 | .close = ns_close, | |
180 | .ioctl = ns_ioctl, | |
181 | .send = ns_send, | |
182 | .phy_put = ns_phy_put, | |
183 | .phy_get = ns_phy_get, | |
184 | .proc_read = ns_proc_read, | |
185 | .owner = THIS_MODULE, | |
186 | }; | |
187 | static struct timer_list ns_timer; | |
188 | static char *mac[NS_MAX_CARDS]; | |
189 | module_param_array(mac, charp, NULL, 0); | |
190 | MODULE_LICENSE("GPL"); | |
191 | ||
192 | ||
193 | /* Functions*******************************************************************/ | |
194 | ||
195 | static int __devinit nicstar_init_one(struct pci_dev *pcidev, | |
196 | const struct pci_device_id *ent) | |
197 | { | |
198 | static int index = -1; | |
199 | unsigned int error; | |
200 | ||
201 | index++; | |
202 | cards[index] = NULL; | |
203 | ||
204 | error = ns_init_card(index, pcidev); | |
205 | if (error) { | |
206 | cards[index--] = NULL; /* don't increment index */ | |
207 | goto err_out; | |
208 | } | |
209 | ||
210 | return 0; | |
211 | err_out: | |
212 | return -ENODEV; | |
213 | } | |
214 | ||
215 | ||
216 | ||
217 | static void __devexit nicstar_remove_one(struct pci_dev *pcidev) | |
218 | { | |
219 | int i, j; | |
220 | ns_dev *card = pci_get_drvdata(pcidev); | |
221 | struct sk_buff *hb; | |
222 | struct sk_buff *iovb; | |
223 | struct sk_buff *lb; | |
224 | struct sk_buff *sb; | |
225 | ||
226 | i = card->index; | |
227 | ||
228 | if (cards[i] == NULL) | |
229 | return; | |
230 | ||
231 | if (card->atmdev->phy && card->atmdev->phy->stop) | |
232 | card->atmdev->phy->stop(card->atmdev); | |
233 | ||
234 | /* Stop everything */ | |
235 | writel(0x00000000, card->membase + CFG); | |
236 | ||
237 | /* De-register device */ | |
238 | atm_dev_deregister(card->atmdev); | |
239 | ||
240 | /* Disable PCI device */ | |
241 | pci_disable_device(pcidev); | |
242 | ||
243 | /* Free up resources */ | |
244 | j = 0; | |
245 | PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); | |
246 | while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) | |
247 | { | |
248 | dev_kfree_skb_any(hb); | |
249 | j++; | |
250 | } | |
251 | PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); | |
252 | j = 0; | |
253 | PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); | |
254 | while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) | |
255 | { | |
256 | dev_kfree_skb_any(iovb); | |
257 | j++; | |
258 | } | |
259 | PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); | |
260 | while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) | |
261 | dev_kfree_skb_any(lb); | |
262 | while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) | |
263 | dev_kfree_skb_any(sb); | |
264 | free_scq(card->scq0, NULL); | |
265 | for (j = 0; j < NS_FRSCD_NUM; j++) | |
266 | { | |
267 | if (card->scd2vc[j] != NULL) | |
268 | free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); | |
269 | } | |
270 | kfree(card->rsq.org); | |
271 | kfree(card->tsq.org); | |
272 | free_irq(card->pcidev->irq, card); | |
273 | iounmap(card->membase); | |
274 | kfree(card); | |
275 | } | |
276 | ||
277 | ||
278 | ||
279 | static struct pci_device_id nicstar_pci_tbl[] __devinitdata = | |
280 | { | |
281 | {PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201, | |
282 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, | |
283 | {0,} /* terminate list */ | |
284 | }; | |
285 | MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); | |
286 | ||
287 | ||
288 | ||
289 | static struct pci_driver nicstar_driver = { | |
290 | .name = "nicstar", | |
291 | .id_table = nicstar_pci_tbl, | |
292 | .probe = nicstar_init_one, | |
293 | .remove = __devexit_p(nicstar_remove_one), | |
294 | }; | |
295 | ||
296 | ||
297 | ||
298 | static int __init nicstar_init(void) | |
299 | { | |
300 | unsigned error = 0; /* Initialized to remove compile warning */ | |
301 | ||
302 | XPRINTK("nicstar: nicstar_init() called.\n"); | |
303 | ||
304 | error = pci_register_driver(&nicstar_driver); | |
305 | ||
306 | TXPRINTK("nicstar: TX debug enabled.\n"); | |
307 | RXPRINTK("nicstar: RX debug enabled.\n"); | |
308 | PRINTK("nicstar: General debug enabled.\n"); | |
309 | #ifdef PHY_LOOPBACK | |
310 | printk("nicstar: using PHY loopback.\n"); | |
311 | #endif /* PHY_LOOPBACK */ | |
312 | XPRINTK("nicstar: nicstar_init() returned.\n"); | |
313 | ||
314 | if (!error) { | |
315 | init_timer(&ns_timer); | |
316 | ns_timer.expires = jiffies + NS_POLL_PERIOD; | |
317 | ns_timer.data = 0UL; | |
318 | ns_timer.function = ns_poll; | |
319 | add_timer(&ns_timer); | |
320 | } | |
321 | ||
322 | return error; | |
323 | } | |
324 | ||
325 | ||
326 | ||
327 | static void __exit nicstar_cleanup(void) | |
328 | { | |
329 | XPRINTK("nicstar: nicstar_cleanup() called.\n"); | |
330 | ||
331 | del_timer(&ns_timer); | |
332 | ||
333 | pci_unregister_driver(&nicstar_driver); | |
334 | ||
335 | XPRINTK("nicstar: nicstar_cleanup() returned.\n"); | |
336 | } | |
337 | ||
338 | ||
339 | ||
340 | static u32 ns_read_sram(ns_dev *card, u32 sram_address) | |
341 | { | |
342 | unsigned long flags; | |
343 | u32 data; | |
344 | sram_address <<= 2; | |
345 | sram_address &= 0x0007FFFC; /* address must be dword aligned */ | |
346 | sram_address |= 0x50000000; /* SRAM read command */ | |
36ef4080 | 347 | spin_lock_irqsave(&card->res_lock, flags); |
1da177e4 LT |
348 | while (CMD_BUSY(card)); |
349 | writel(sram_address, card->membase + CMD); | |
350 | while (CMD_BUSY(card)); | |
351 | data = readl(card->membase + DR0); | |
352 | spin_unlock_irqrestore(&card->res_lock, flags); | |
353 | return data; | |
354 | } | |
355 | ||
356 | ||
357 | ||
358 | static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count) | |
359 | { | |
360 | unsigned long flags; | |
361 | int i, c; | |
362 | count--; /* count range now is 0..3 instead of 1..4 */ | |
363 | c = count; | |
364 | c <<= 2; /* to use increments of 4 */ | |
36ef4080 | 365 | spin_lock_irqsave(&card->res_lock, flags); |
1da177e4 LT |
366 | while (CMD_BUSY(card)); |
367 | for (i = 0; i <= c; i += 4) | |
368 | writel(*(value++), card->membase + i); | |
369 | /* Note: DR# registers are the first 4 dwords in nicstar's memspace, | |
370 | so card->membase + DR0 == card->membase */ | |
371 | sram_address <<= 2; | |
372 | sram_address &= 0x0007FFFC; | |
373 | sram_address |= (0x40000000 | count); | |
374 | writel(sram_address, card->membase + CMD); | |
375 | spin_unlock_irqrestore(&card->res_lock, flags); | |
376 | } | |
377 | ||
378 | ||
379 | static int __devinit ns_init_card(int i, struct pci_dev *pcidev) | |
380 | { | |
381 | int j; | |
382 | struct ns_dev *card = NULL; | |
383 | unsigned char pci_latency; | |
384 | unsigned error; | |
385 | u32 data; | |
386 | u32 u32d[4]; | |
387 | u32 ns_cfg_rctsize; | |
388 | int bcount; | |
389 | unsigned long membase; | |
390 | ||
391 | error = 0; | |
392 | ||
393 | if (pci_enable_device(pcidev)) | |
394 | { | |
395 | printk("nicstar%d: can't enable PCI device\n", i); | |
396 | error = 2; | |
397 | ns_init_card_error(card, error); | |
398 | return error; | |
399 | } | |
400 | ||
401 | if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) | |
402 | { | |
403 | printk("nicstar%d: can't allocate memory for device structure.\n", i); | |
404 | error = 2; | |
405 | ns_init_card_error(card, error); | |
406 | return error; | |
407 | } | |
408 | cards[i] = card; | |
409 | spin_lock_init(&card->int_lock); | |
410 | spin_lock_init(&card->res_lock); | |
411 | ||
412 | pci_set_drvdata(pcidev, card); | |
413 | ||
414 | card->index = i; | |
415 | card->atmdev = NULL; | |
416 | card->pcidev = pcidev; | |
417 | membase = pci_resource_start(pcidev, 1); | |
418 | card->membase = ioremap(membase, NS_IOREMAP_SIZE); | |
8da56309 | 419 | if (!card->membase) |
1da177e4 LT |
420 | { |
421 | printk("nicstar%d: can't ioremap() membase.\n",i); | |
422 | error = 3; | |
423 | ns_init_card_error(card, error); | |
424 | return error; | |
425 | } | |
426 | PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase); | |
427 | ||
428 | pci_set_master(pcidev); | |
429 | ||
430 | if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) | |
431 | { | |
432 | printk("nicstar%d: can't read PCI latency timer.\n", i); | |
433 | error = 6; | |
434 | ns_init_card_error(card, error); | |
435 | return error; | |
436 | } | |
437 | #ifdef NS_PCI_LATENCY | |
438 | if (pci_latency < NS_PCI_LATENCY) | |
439 | { | |
440 | PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); | |
441 | for (j = 1; j < 4; j++) | |
442 | { | |
443 | if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) | |
444 | break; | |
445 | } | |
446 | if (j == 4) | |
447 | { | |
448 | printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); | |
449 | error = 7; | |
450 | ns_init_card_error(card, error); | |
451 | return error; | |
452 | } | |
453 | } | |
454 | #endif /* NS_PCI_LATENCY */ | |
455 | ||
456 | /* Clear timer overflow */ | |
457 | data = readl(card->membase + STAT); | |
458 | if (data & NS_STAT_TMROF) | |
459 | writel(NS_STAT_TMROF, card->membase + STAT); | |
460 | ||
461 | /* Software reset */ | |
462 | writel(NS_CFG_SWRST, card->membase + CFG); | |
463 | NS_DELAY; | |
464 | writel(0x00000000, card->membase + CFG); | |
465 | ||
466 | /* PHY reset */ | |
467 | writel(0x00000008, card->membase + GP); | |
468 | NS_DELAY; | |
469 | writel(0x00000001, card->membase + GP); | |
470 | NS_DELAY; | |
471 | while (CMD_BUSY(card)); | |
472 | writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ | |
473 | NS_DELAY; | |
474 | ||
475 | /* Detect PHY type */ | |
476 | while (CMD_BUSY(card)); | |
477 | writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); | |
478 | while (CMD_BUSY(card)); | |
479 | data = readl(card->membase + DR0); | |
480 | switch(data) { | |
481 | case 0x00000009: | |
482 | printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); | |
483 | card->max_pcr = ATM_25_PCR; | |
484 | while(CMD_BUSY(card)); | |
485 | writel(0x00000008, card->membase + DR0); | |
486 | writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); | |
487 | /* Clear an eventual pending interrupt */ | |
488 | writel(NS_STAT_SFBQF, card->membase + STAT); | |
489 | #ifdef PHY_LOOPBACK | |
490 | while(CMD_BUSY(card)); | |
491 | writel(0x00000022, card->membase + DR0); | |
492 | writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); | |
493 | #endif /* PHY_LOOPBACK */ | |
494 | break; | |
495 | case 0x00000030: | |
496 | case 0x00000031: | |
497 | printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); | |
498 | card->max_pcr = ATM_OC3_PCR; | |
499 | #ifdef PHY_LOOPBACK | |
500 | while(CMD_BUSY(card)); | |
501 | writel(0x00000002, card->membase + DR0); | |
502 | writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); | |
503 | #endif /* PHY_LOOPBACK */ | |
504 | break; | |
505 | default: | |
506 | printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); | |
507 | error = 8; | |
508 | ns_init_card_error(card, error); | |
509 | return error; | |
510 | } | |
511 | writel(0x00000000, card->membase + GP); | |
512 | ||
513 | /* Determine SRAM size */ | |
514 | data = 0x76543210; | |
515 | ns_write_sram(card, 0x1C003, &data, 1); | |
516 | data = 0x89ABCDEF; | |
517 | ns_write_sram(card, 0x14003, &data, 1); | |
518 | if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && | |
519 | ns_read_sram(card, 0x1C003) == 0x76543210) | |
520 | card->sram_size = 128; | |
521 | else | |
522 | card->sram_size = 32; | |
523 | PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); | |
524 | ||
525 | card->rct_size = NS_MAX_RCTSIZE; | |
526 | ||
527 | #if (NS_MAX_RCTSIZE == 4096) | |
528 | if (card->sram_size == 128) | |
529 | printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); | |
530 | #elif (NS_MAX_RCTSIZE == 16384) | |
531 | if (card->sram_size == 32) | |
532 | { | |
533 | printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); | |
534 | card->rct_size = 4096; | |
535 | } | |
536 | #else | |
537 | #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c | |
538 | #endif | |
539 | ||
540 | card->vpibits = NS_VPIBITS; | |
541 | if (card->rct_size == 4096) | |
542 | card->vcibits = 12 - NS_VPIBITS; | |
543 | else /* card->rct_size == 16384 */ | |
544 | card->vcibits = 14 - NS_VPIBITS; | |
545 | ||
546 | /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ | |
547 | if (mac[i] == NULL) | |
548 | nicstar_init_eprom(card->membase); | |
549 | ||
1da177e4 LT |
550 | /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ |
551 | writel(0x00000000, card->membase + VPM); | |
552 | ||
553 | /* Initialize TSQ */ | |
554 | card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL); | |
555 | if (card->tsq.org == NULL) | |
556 | { | |
557 | printk("nicstar%d: can't allocate TSQ.\n", i); | |
558 | error = 10; | |
559 | ns_init_card_error(card, error); | |
560 | return error; | |
561 | } | |
562 | card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT); | |
563 | card->tsq.next = card->tsq.base; | |
564 | card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); | |
565 | for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) | |
566 | ns_tsi_init(card->tsq.base + j); | |
567 | writel(0x00000000, card->membase + TSQH); | |
568 | writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB); | |
569 | PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base, | |
570 | (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB)); | |
571 | ||
572 | /* Initialize RSQ */ | |
573 | card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL); | |
574 | if (card->rsq.org == NULL) | |
575 | { | |
576 | printk("nicstar%d: can't allocate RSQ.\n", i); | |
577 | error = 11; | |
578 | ns_init_card_error(card, error); | |
579 | return error; | |
580 | } | |
581 | card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT); | |
582 | card->rsq.next = card->rsq.base; | |
583 | card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); | |
584 | for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) | |
585 | ns_rsqe_init(card->rsq.base + j); | |
586 | writel(0x00000000, card->membase + RSQH); | |
587 | writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB); | |
588 | PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base); | |
589 | ||
590 | /* Initialize SCQ0, the only VBR SCQ used */ | |
a2c1aa54 JJ |
591 | card->scq1 = NULL; |
592 | card->scq2 = NULL; | |
1da177e4 | 593 | card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0); |
a2c1aa54 | 594 | if (card->scq0 == NULL) |
1da177e4 LT |
595 | { |
596 | printk("nicstar%d: can't get SCQ0.\n", i); | |
597 | error = 12; | |
598 | ns_init_card_error(card, error); | |
599 | return error; | |
600 | } | |
601 | u32d[0] = (u32) virt_to_bus(card->scq0->base); | |
602 | u32d[1] = (u32) 0x00000000; | |
603 | u32d[2] = (u32) 0xffffffff; | |
604 | u32d[3] = (u32) 0x00000000; | |
605 | ns_write_sram(card, NS_VRSCD0, u32d, 4); | |
606 | ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ | |
607 | ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ | |
608 | card->scq0->scd = NS_VRSCD0; | |
609 | PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base); | |
610 | ||
611 | /* Initialize TSTs */ | |
612 | card->tst_addr = NS_TST0; | |
613 | card->tst_free_entries = NS_TST_NUM_ENTRIES; | |
614 | data = NS_TST_OPCODE_VARIABLE; | |
615 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | |
616 | ns_write_sram(card, NS_TST0 + j, &data, 1); | |
617 | data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); | |
618 | ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); | |
619 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | |
620 | ns_write_sram(card, NS_TST1 + j, &data, 1); | |
621 | data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); | |
622 | ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); | |
623 | for (j = 0; j < NS_TST_NUM_ENTRIES; j++) | |
624 | card->tste2vc[j] = NULL; | |
625 | writel(NS_TST0 << 2, card->membase + TSTB); | |
626 | ||
627 | ||
628 | /* Initialize RCT. AAL type is set on opening the VC. */ | |
629 | #ifdef RCQ_SUPPORT | |
630 | u32d[0] = NS_RCTE_RAWCELLINTEN; | |
631 | #else | |
632 | u32d[0] = 0x00000000; | |
633 | #endif /* RCQ_SUPPORT */ | |
634 | u32d[1] = 0x00000000; | |
635 | u32d[2] = 0x00000000; | |
636 | u32d[3] = 0xFFFFFFFF; | |
637 | for (j = 0; j < card->rct_size; j++) | |
638 | ns_write_sram(card, j * 4, u32d, 4); | |
639 | ||
640 | memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); | |
641 | ||
642 | for (j = 0; j < NS_FRSCD_NUM; j++) | |
643 | card->scd2vc[j] = NULL; | |
644 | ||
645 | /* Initialize buffer levels */ | |
646 | card->sbnr.min = MIN_SB; | |
647 | card->sbnr.init = NUM_SB; | |
648 | card->sbnr.max = MAX_SB; | |
649 | card->lbnr.min = MIN_LB; | |
650 | card->lbnr.init = NUM_LB; | |
651 | card->lbnr.max = MAX_LB; | |
652 | card->iovnr.min = MIN_IOVB; | |
653 | card->iovnr.init = NUM_IOVB; | |
654 | card->iovnr.max = MAX_IOVB; | |
655 | card->hbnr.min = MIN_HB; | |
656 | card->hbnr.init = NUM_HB; | |
657 | card->hbnr.max = MAX_HB; | |
658 | ||
659 | card->sm_handle = 0x00000000; | |
660 | card->sm_addr = 0x00000000; | |
661 | card->lg_handle = 0x00000000; | |
662 | card->lg_addr = 0x00000000; | |
663 | ||
664 | card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ | |
665 | ||
666 | /* Pre-allocate some huge buffers */ | |
667 | skb_queue_head_init(&card->hbpool.queue); | |
668 | card->hbpool.count = 0; | |
669 | for (j = 0; j < NUM_HB; j++) | |
670 | { | |
671 | struct sk_buff *hb; | |
672 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | |
673 | if (hb == NULL) | |
674 | { | |
675 | printk("nicstar%d: can't allocate %dth of %d huge buffers.\n", | |
676 | i, j, NUM_HB); | |
677 | error = 13; | |
678 | ns_init_card_error(card, error); | |
679 | return error; | |
680 | } | |
8728b834 | 681 | NS_SKB_CB(hb)->buf_type = BUF_NONE; |
1da177e4 LT |
682 | skb_queue_tail(&card->hbpool.queue, hb); |
683 | card->hbpool.count++; | |
684 | } | |
685 | ||
686 | ||
687 | /* Allocate large buffers */ | |
688 | skb_queue_head_init(&card->lbpool.queue); | |
689 | card->lbpool.count = 0; /* Not used */ | |
690 | for (j = 0; j < NUM_LB; j++) | |
691 | { | |
692 | struct sk_buff *lb; | |
693 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | |
694 | if (lb == NULL) | |
695 | { | |
696 | printk("nicstar%d: can't allocate %dth of %d large buffers.\n", | |
697 | i, j, NUM_LB); | |
698 | error = 14; | |
699 | ns_init_card_error(card, error); | |
700 | return error; | |
701 | } | |
8728b834 | 702 | NS_SKB_CB(lb)->buf_type = BUF_LG; |
1da177e4 LT |
703 | skb_queue_tail(&card->lbpool.queue, lb); |
704 | skb_reserve(lb, NS_SMBUFSIZE); | |
8728b834 | 705 | push_rxbufs(card, lb); |
1da177e4 LT |
706 | /* Due to the implementation of push_rxbufs() this is 1, not 0 */ |
707 | if (j == 1) | |
708 | { | |
709 | card->rcbuf = lb; | |
710 | card->rawch = (u32) virt_to_bus(lb->data); | |
711 | } | |
712 | } | |
713 | /* Test for strange behaviour which leads to crashes */ | |
714 | if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) | |
715 | { | |
716 | printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", | |
717 | i, j, bcount); | |
718 | error = 14; | |
719 | ns_init_card_error(card, error); | |
720 | return error; | |
721 | } | |
722 | ||
723 | ||
724 | /* Allocate small buffers */ | |
725 | skb_queue_head_init(&card->sbpool.queue); | |
726 | card->sbpool.count = 0; /* Not used */ | |
727 | for (j = 0; j < NUM_SB; j++) | |
728 | { | |
729 | struct sk_buff *sb; | |
730 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | |
731 | if (sb == NULL) | |
732 | { | |
733 | printk("nicstar%d: can't allocate %dth of %d small buffers.\n", | |
734 | i, j, NUM_SB); | |
735 | error = 15; | |
736 | ns_init_card_error(card, error); | |
737 | return error; | |
738 | } | |
8728b834 | 739 | NS_SKB_CB(sb)->buf_type = BUF_SM; |
1da177e4 LT |
740 | skb_queue_tail(&card->sbpool.queue, sb); |
741 | skb_reserve(sb, NS_AAL0_HEADER); | |
8728b834 | 742 | push_rxbufs(card, sb); |
1da177e4 LT |
743 | } |
744 | /* Test for strange behaviour which leads to crashes */ | |
745 | if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) | |
746 | { | |
747 | printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", | |
748 | i, j, bcount); | |
749 | error = 15; | |
750 | ns_init_card_error(card, error); | |
751 | return error; | |
752 | } | |
753 | ||
754 | ||
755 | /* Allocate iovec buffers */ | |
756 | skb_queue_head_init(&card->iovpool.queue); | |
757 | card->iovpool.count = 0; | |
758 | for (j = 0; j < NUM_IOVB; j++) | |
759 | { | |
760 | struct sk_buff *iovb; | |
761 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); | |
762 | if (iovb == NULL) | |
763 | { | |
764 | printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n", | |
765 | i, j, NUM_IOVB); | |
766 | error = 16; | |
767 | ns_init_card_error(card, error); | |
768 | return error; | |
769 | } | |
8728b834 | 770 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; |
1da177e4 LT |
771 | skb_queue_tail(&card->iovpool.queue, iovb); |
772 | card->iovpool.count++; | |
773 | } | |
774 | ||
1da177e4 LT |
775 | /* Configure NICStAR */ |
776 | if (card->rct_size == 4096) | |
777 | ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; | |
778 | else /* (card->rct_size == 16384) */ | |
779 | ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; | |
780 | ||
781 | card->efbie = 1; | |
52961955 CW |
782 | |
783 | card->intcnt = 0; | |
784 | if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) | |
785 | { | |
786 | printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); | |
787 | error = 9; | |
788 | ns_init_card_error(card, error); | |
789 | return error; | |
790 | } | |
1da177e4 LT |
791 | |
792 | /* Register device */ | |
793 | card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); | |
794 | if (card->atmdev == NULL) | |
795 | { | |
796 | printk("nicstar%d: can't register device.\n", i); | |
797 | error = 17; | |
798 | ns_init_card_error(card, error); | |
799 | return error; | |
800 | } | |
801 | ||
802 | if (ns_parse_mac(mac[i], card->atmdev->esi)) { | |
803 | nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, | |
804 | card->atmdev->esi, 6); | |
805 | if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) { | |
806 | nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, | |
807 | card->atmdev->esi, 6); | |
808 | } | |
809 | } | |
810 | ||
1154b299 | 811 | printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); |
1da177e4 LT |
812 | |
813 | card->atmdev->dev_data = card; | |
814 | card->atmdev->ci_range.vpi_bits = card->vpibits; | |
815 | card->atmdev->ci_range.vci_bits = card->vcibits; | |
816 | card->atmdev->link_rate = card->max_pcr; | |
817 | card->atmdev->phy = NULL; | |
818 | ||
819 | #ifdef CONFIG_ATM_NICSTAR_USE_SUNI | |
820 | if (card->max_pcr == ATM_OC3_PCR) | |
821 | suni_init(card->atmdev); | |
822 | #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ | |
823 | ||
824 | #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 | |
825 | if (card->max_pcr == ATM_25_PCR) | |
826 | idt77105_init(card->atmdev); | |
827 | #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ | |
828 | ||
829 | if (card->atmdev->phy && card->atmdev->phy->start) | |
830 | card->atmdev->phy->start(card->atmdev); | |
831 | ||
832 | writel(NS_CFG_RXPATH | | |
833 | NS_CFG_SMBUFSIZE | | |
834 | NS_CFG_LGBUFSIZE | | |
835 | NS_CFG_EFBIE | | |
836 | NS_CFG_RSQSIZE | | |
837 | NS_CFG_VPIBITS | | |
838 | ns_cfg_rctsize | | |
839 | NS_CFG_RXINT_NODELAY | | |
840 | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ | |
841 | NS_CFG_RSQAFIE | | |
842 | NS_CFG_TXEN | | |
843 | NS_CFG_TXIE | | |
844 | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ | |
845 | NS_CFG_PHYIE, | |
846 | card->membase + CFG); | |
847 | ||
848 | num_cards++; | |
849 | ||
850 | return error; | |
851 | } | |
852 | ||
853 | ||
854 | ||
855 | static void __devinit ns_init_card_error(ns_dev *card, int error) | |
856 | { | |
857 | if (error >= 17) | |
858 | { | |
859 | writel(0x00000000, card->membase + CFG); | |
860 | } | |
861 | if (error >= 16) | |
862 | { | |
863 | struct sk_buff *iovb; | |
864 | while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) | |
865 | dev_kfree_skb_any(iovb); | |
866 | } | |
867 | if (error >= 15) | |
868 | { | |
869 | struct sk_buff *sb; | |
870 | while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) | |
871 | dev_kfree_skb_any(sb); | |
872 | free_scq(card->scq0, NULL); | |
873 | } | |
874 | if (error >= 14) | |
875 | { | |
876 | struct sk_buff *lb; | |
877 | while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) | |
878 | dev_kfree_skb_any(lb); | |
879 | } | |
880 | if (error >= 13) | |
881 | { | |
882 | struct sk_buff *hb; | |
883 | while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) | |
884 | dev_kfree_skb_any(hb); | |
885 | } | |
886 | if (error >= 12) | |
887 | { | |
888 | kfree(card->rsq.org); | |
889 | } | |
890 | if (error >= 11) | |
891 | { | |
892 | kfree(card->tsq.org); | |
893 | } | |
894 | if (error >= 10) | |
895 | { | |
896 | free_irq(card->pcidev->irq, card); | |
897 | } | |
898 | if (error >= 4) | |
899 | { | |
900 | iounmap(card->membase); | |
901 | } | |
902 | if (error >= 3) | |
903 | { | |
904 | pci_disable_device(card->pcidev); | |
905 | kfree(card); | |
906 | } | |
907 | } | |
908 | ||
909 | ||
910 | ||
911 | static scq_info *get_scq(int size, u32 scd) | |
912 | { | |
913 | scq_info *scq; | |
914 | int i; | |
915 | ||
916 | if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) | |
a2c1aa54 | 917 | return NULL; |
1da177e4 | 918 | |
5cbded58 | 919 | scq = kmalloc(sizeof(scq_info), GFP_KERNEL); |
a2c1aa54 JJ |
920 | if (scq == NULL) |
921 | return NULL; | |
1da177e4 LT |
922 | scq->org = kmalloc(2 * size, GFP_KERNEL); |
923 | if (scq->org == NULL) | |
924 | { | |
925 | kfree(scq); | |
a2c1aa54 | 926 | return NULL; |
1da177e4 | 927 | } |
5cbded58 | 928 | scq->skb = kmalloc(sizeof(struct sk_buff *) * |
1da177e4 | 929 | (size / NS_SCQE_SIZE), GFP_KERNEL); |
a2c1aa54 | 930 | if (scq->skb == NULL) |
1da177e4 LT |
931 | { |
932 | kfree(scq->org); | |
933 | kfree(scq); | |
a2c1aa54 | 934 | return NULL; |
1da177e4 LT |
935 | } |
936 | scq->num_entries = size / NS_SCQE_SIZE; | |
937 | scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size); | |
938 | scq->next = scq->base; | |
939 | scq->last = scq->base + (scq->num_entries - 1); | |
940 | scq->tail = scq->last; | |
941 | scq->scd = scd; | |
942 | scq->num_entries = size / NS_SCQE_SIZE; | |
943 | scq->tbd_count = 0; | |
944 | init_waitqueue_head(&scq->scqfull_waitq); | |
945 | scq->full = 0; | |
946 | spin_lock_init(&scq->lock); | |
947 | ||
948 | for (i = 0; i < scq->num_entries; i++) | |
949 | scq->skb[i] = NULL; | |
950 | ||
951 | return scq; | |
952 | } | |
953 | ||
954 | ||
955 | ||
956 | /* For variable rate SCQ vcc must be NULL */ | |
957 | static void free_scq(scq_info *scq, struct atm_vcc *vcc) | |
958 | { | |
959 | int i; | |
960 | ||
961 | if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) | |
962 | for (i = 0; i < scq->num_entries; i++) | |
963 | { | |
964 | if (scq->skb[i] != NULL) | |
965 | { | |
966 | vcc = ATM_SKB(scq->skb[i])->vcc; | |
967 | if (vcc->pop != NULL) | |
968 | vcc->pop(vcc, scq->skb[i]); | |
969 | else | |
970 | dev_kfree_skb_any(scq->skb[i]); | |
971 | } | |
972 | } | |
973 | else /* vcc must be != NULL */ | |
974 | { | |
975 | if (vcc == NULL) | |
976 | { | |
977 | printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); | |
978 | for (i = 0; i < scq->num_entries; i++) | |
979 | dev_kfree_skb_any(scq->skb[i]); | |
980 | } | |
981 | else | |
982 | for (i = 0; i < scq->num_entries; i++) | |
983 | { | |
984 | if (scq->skb[i] != NULL) | |
985 | { | |
986 | if (vcc->pop != NULL) | |
987 | vcc->pop(vcc, scq->skb[i]); | |
988 | else | |
989 | dev_kfree_skb_any(scq->skb[i]); | |
990 | } | |
991 | } | |
992 | } | |
993 | kfree(scq->skb); | |
994 | kfree(scq->org); | |
995 | kfree(scq); | |
996 | } | |
997 | ||
998 | ||
999 | ||
1000 | /* The handles passed must be pointers to the sk_buff containing the small | |
1001 | or large buffer(s) cast to u32. */ | |
8728b834 | 1002 | static void push_rxbufs(ns_dev *card, struct sk_buff *skb) |
1da177e4 | 1003 | { |
8728b834 DM |
1004 | struct ns_skb_cb *cb = NS_SKB_CB(skb); |
1005 | u32 handle1, addr1; | |
1006 | u32 handle2, addr2; | |
1da177e4 LT |
1007 | u32 stat; |
1008 | unsigned long flags; | |
1009 | ||
8728b834 DM |
1010 | /* *BARF* */ |
1011 | handle2 = addr2 = 0; | |
1012 | handle1 = (u32)skb; | |
1013 | addr1 = (u32)virt_to_bus(skb->data); | |
1da177e4 LT |
1014 | |
1015 | #ifdef GENERAL_DEBUG | |
1016 | if (!addr1) | |
1017 | printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); | |
1018 | #endif /* GENERAL_DEBUG */ | |
1019 | ||
1020 | stat = readl(card->membase + STAT); | |
1021 | card->sbfqc = ns_stat_sfbqc_get(stat); | |
1022 | card->lbfqc = ns_stat_lfbqc_get(stat); | |
8728b834 | 1023 | if (cb->buf_type == BUF_SM) |
1da177e4 LT |
1024 | { |
1025 | if (!addr2) | |
1026 | { | |
1027 | if (card->sm_addr) | |
1028 | { | |
1029 | addr2 = card->sm_addr; | |
1030 | handle2 = card->sm_handle; | |
1031 | card->sm_addr = 0x00000000; | |
1032 | card->sm_handle = 0x00000000; | |
1033 | } | |
1034 | else /* (!sm_addr) */ | |
1035 | { | |
1036 | card->sm_addr = addr1; | |
1037 | card->sm_handle = handle1; | |
1038 | } | |
1039 | } | |
1040 | } | |
8728b834 | 1041 | else /* buf_type == BUF_LG */ |
1da177e4 LT |
1042 | { |
1043 | if (!addr2) | |
1044 | { | |
1045 | if (card->lg_addr) | |
1046 | { | |
1047 | addr2 = card->lg_addr; | |
1048 | handle2 = card->lg_handle; | |
1049 | card->lg_addr = 0x00000000; | |
1050 | card->lg_handle = 0x00000000; | |
1051 | } | |
1052 | else /* (!lg_addr) */ | |
1053 | { | |
1054 | card->lg_addr = addr1; | |
1055 | card->lg_handle = handle1; | |
1056 | } | |
1057 | } | |
1058 | } | |
1059 | ||
1060 | if (addr2) | |
1061 | { | |
8728b834 | 1062 | if (cb->buf_type == BUF_SM) |
1da177e4 LT |
1063 | { |
1064 | if (card->sbfqc >= card->sbnr.max) | |
1065 | { | |
8728b834 | 1066 | skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue); |
1da177e4 | 1067 | dev_kfree_skb_any((struct sk_buff *) handle1); |
8728b834 | 1068 | skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue); |
1da177e4 LT |
1069 | dev_kfree_skb_any((struct sk_buff *) handle2); |
1070 | return; | |
1071 | } | |
1072 | else | |
1073 | card->sbfqc += 2; | |
1074 | } | |
8728b834 | 1075 | else /* (buf_type == BUF_LG) */ |
1da177e4 LT |
1076 | { |
1077 | if (card->lbfqc >= card->lbnr.max) | |
1078 | { | |
8728b834 | 1079 | skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue); |
1da177e4 | 1080 | dev_kfree_skb_any((struct sk_buff *) handle1); |
8728b834 | 1081 | skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue); |
1da177e4 LT |
1082 | dev_kfree_skb_any((struct sk_buff *) handle2); |
1083 | return; | |
1084 | } | |
1085 | else | |
1086 | card->lbfqc += 2; | |
1087 | } | |
1088 | ||
36ef4080 | 1089 | spin_lock_irqsave(&card->res_lock, flags); |
1da177e4 LT |
1090 | |
1091 | while (CMD_BUSY(card)); | |
1092 | writel(addr2, card->membase + DR3); | |
1093 | writel(handle2, card->membase + DR2); | |
1094 | writel(addr1, card->membase + DR1); | |
1095 | writel(handle1, card->membase + DR0); | |
8728b834 | 1096 | writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD); |
1da177e4 LT |
1097 | |
1098 | spin_unlock_irqrestore(&card->res_lock, flags); | |
1099 | ||
1100 | XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, | |
8728b834 | 1101 | (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2); |
1da177e4 LT |
1102 | } |
1103 | ||
1104 | if (!card->efbie && card->sbfqc >= card->sbnr.min && | |
1105 | card->lbfqc >= card->lbnr.min) | |
1106 | { | |
1107 | card->efbie = 1; | |
1108 | writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); | |
1109 | } | |
1110 | ||
1111 | return; | |
1112 | } | |
1113 | ||
1114 | ||
1115 | ||
7d12e780 | 1116 | static irqreturn_t ns_irq_handler(int irq, void *dev_id) |
1da177e4 LT |
1117 | { |
1118 | u32 stat_r; | |
1119 | ns_dev *card; | |
1120 | struct atm_dev *dev; | |
1121 | unsigned long flags; | |
1122 | ||
1123 | card = (ns_dev *) dev_id; | |
1124 | dev = card->atmdev; | |
1125 | card->intcnt++; | |
1126 | ||
1127 | PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); | |
1128 | ||
36ef4080 | 1129 | spin_lock_irqsave(&card->int_lock, flags); |
1da177e4 LT |
1130 | |
1131 | stat_r = readl(card->membase + STAT); | |
1132 | ||
1133 | /* Transmit Status Indicator has been written to T. S. Queue */ | |
1134 | if (stat_r & NS_STAT_TSIF) | |
1135 | { | |
1136 | TXPRINTK("nicstar%d: TSI interrupt\n", card->index); | |
1137 | process_tsq(card); | |
1138 | writel(NS_STAT_TSIF, card->membase + STAT); | |
1139 | } | |
1140 | ||
1141 | /* Incomplete CS-PDU has been transmitted */ | |
1142 | if (stat_r & NS_STAT_TXICP) | |
1143 | { | |
1144 | writel(NS_STAT_TXICP, card->membase + STAT); | |
1145 | TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", | |
1146 | card->index); | |
1147 | } | |
1148 | ||
1149 | /* Transmit Status Queue 7/8 full */ | |
1150 | if (stat_r & NS_STAT_TSQF) | |
1151 | { | |
1152 | writel(NS_STAT_TSQF, card->membase + STAT); | |
1153 | PRINTK("nicstar%d: TSQ full.\n", card->index); | |
1154 | process_tsq(card); | |
1155 | } | |
1156 | ||
1157 | /* Timer overflow */ | |
1158 | if (stat_r & NS_STAT_TMROF) | |
1159 | { | |
1160 | writel(NS_STAT_TMROF, card->membase + STAT); | |
1161 | PRINTK("nicstar%d: Timer overflow.\n", card->index); | |
1162 | } | |
1163 | ||
1164 | /* PHY device interrupt signal active */ | |
1165 | if (stat_r & NS_STAT_PHYI) | |
1166 | { | |
1167 | writel(NS_STAT_PHYI, card->membase + STAT); | |
1168 | PRINTK("nicstar%d: PHY interrupt.\n", card->index); | |
1169 | if (dev->phy && dev->phy->interrupt) { | |
1170 | dev->phy->interrupt(dev); | |
1171 | } | |
1172 | } | |
1173 | ||
1174 | /* Small Buffer Queue is full */ | |
1175 | if (stat_r & NS_STAT_SFBQF) | |
1176 | { | |
1177 | writel(NS_STAT_SFBQF, card->membase + STAT); | |
1178 | printk("nicstar%d: Small free buffer queue is full.\n", card->index); | |
1179 | } | |
1180 | ||
1181 | /* Large Buffer Queue is full */ | |
1182 | if (stat_r & NS_STAT_LFBQF) | |
1183 | { | |
1184 | writel(NS_STAT_LFBQF, card->membase + STAT); | |
1185 | printk("nicstar%d: Large free buffer queue is full.\n", card->index); | |
1186 | } | |
1187 | ||
1188 | /* Receive Status Queue is full */ | |
1189 | if (stat_r & NS_STAT_RSQF) | |
1190 | { | |
1191 | writel(NS_STAT_RSQF, card->membase + STAT); | |
1192 | printk("nicstar%d: RSQ full.\n", card->index); | |
1193 | process_rsq(card); | |
1194 | } | |
1195 | ||
1196 | /* Complete CS-PDU received */ | |
1197 | if (stat_r & NS_STAT_EOPDU) | |
1198 | { | |
1199 | RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); | |
1200 | process_rsq(card); | |
1201 | writel(NS_STAT_EOPDU, card->membase + STAT); | |
1202 | } | |
1203 | ||
1204 | /* Raw cell received */ | |
1205 | if (stat_r & NS_STAT_RAWCF) | |
1206 | { | |
1207 | writel(NS_STAT_RAWCF, card->membase + STAT); | |
1208 | #ifndef RCQ_SUPPORT | |
1209 | printk("nicstar%d: Raw cell received and no support yet...\n", | |
1210 | card->index); | |
1211 | #endif /* RCQ_SUPPORT */ | |
1212 | /* NOTE: the following procedure may keep a raw cell pending until the | |
1213 | next interrupt. As this preliminary support is only meant to | |
1214 | avoid buffer leakage, this is not an issue. */ | |
1215 | while (readl(card->membase + RAWCT) != card->rawch) | |
1216 | { | |
1217 | ns_rcqe *rawcell; | |
1218 | ||
1219 | rawcell = (ns_rcqe *) bus_to_virt(card->rawch); | |
1220 | if (ns_rcqe_islast(rawcell)) | |
1221 | { | |
1222 | struct sk_buff *oldbuf; | |
1223 | ||
1224 | oldbuf = card->rcbuf; | |
1225 | card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell); | |
1226 | card->rawch = (u32) virt_to_bus(card->rcbuf->data); | |
1227 | recycle_rx_buf(card, oldbuf); | |
1228 | } | |
1229 | else | |
1230 | card->rawch += NS_RCQE_SIZE; | |
1231 | } | |
1232 | } | |
1233 | ||
1234 | /* Small buffer queue is empty */ | |
1235 | if (stat_r & NS_STAT_SFBQE) | |
1236 | { | |
1237 | int i; | |
1238 | struct sk_buff *sb; | |
1239 | ||
1240 | writel(NS_STAT_SFBQE, card->membase + STAT); | |
1241 | printk("nicstar%d: Small free buffer queue empty.\n", | |
1242 | card->index); | |
1243 | for (i = 0; i < card->sbnr.min; i++) | |
1244 | { | |
1245 | sb = dev_alloc_skb(NS_SMSKBSIZE); | |
1246 | if (sb == NULL) | |
1247 | { | |
1248 | writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); | |
1249 | card->efbie = 0; | |
1250 | break; | |
1251 | } | |
8728b834 | 1252 | NS_SKB_CB(sb)->buf_type = BUF_SM; |
1da177e4 LT |
1253 | skb_queue_tail(&card->sbpool.queue, sb); |
1254 | skb_reserve(sb, NS_AAL0_HEADER); | |
8728b834 | 1255 | push_rxbufs(card, sb); |
1da177e4 LT |
1256 | } |
1257 | card->sbfqc = i; | |
1258 | process_rsq(card); | |
1259 | } | |
1260 | ||
1261 | /* Large buffer queue empty */ | |
1262 | if (stat_r & NS_STAT_LFBQE) | |
1263 | { | |
1264 | int i; | |
1265 | struct sk_buff *lb; | |
1266 | ||
1267 | writel(NS_STAT_LFBQE, card->membase + STAT); | |
1268 | printk("nicstar%d: Large free buffer queue empty.\n", | |
1269 | card->index); | |
1270 | for (i = 0; i < card->lbnr.min; i++) | |
1271 | { | |
1272 | lb = dev_alloc_skb(NS_LGSKBSIZE); | |
1273 | if (lb == NULL) | |
1274 | { | |
1275 | writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); | |
1276 | card->efbie = 0; | |
1277 | break; | |
1278 | } | |
8728b834 | 1279 | NS_SKB_CB(lb)->buf_type = BUF_LG; |
1da177e4 LT |
1280 | skb_queue_tail(&card->lbpool.queue, lb); |
1281 | skb_reserve(lb, NS_SMBUFSIZE); | |
8728b834 | 1282 | push_rxbufs(card, lb); |
1da177e4 LT |
1283 | } |
1284 | card->lbfqc = i; | |
1285 | process_rsq(card); | |
1286 | } | |
1287 | ||
1288 | /* Receive Status Queue is 7/8 full */ | |
1289 | if (stat_r & NS_STAT_RSQAF) | |
1290 | { | |
1291 | writel(NS_STAT_RSQAF, card->membase + STAT); | |
1292 | RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); | |
1293 | process_rsq(card); | |
1294 | } | |
1295 | ||
1296 | spin_unlock_irqrestore(&card->int_lock, flags); | |
1297 | PRINTK("nicstar%d: end of interrupt service\n", card->index); | |
1298 | return IRQ_HANDLED; | |
1299 | } | |
1300 | ||
1301 | ||
1302 | ||
1303 | static int ns_open(struct atm_vcc *vcc) | |
1304 | { | |
1305 | ns_dev *card; | |
1306 | vc_map *vc; | |
1307 | unsigned long tmpl, modl; | |
1308 | int tcr, tcra; /* target cell rate, and absolute value */ | |
1309 | int n = 0; /* Number of entries in the TST. Initialized to remove | |
1310 | the compiler warning. */ | |
1311 | u32 u32d[4]; | |
1312 | int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler | |
1313 | warning. How I wish compilers were clever enough to | |
1314 | tell which variables can truly be used | |
1315 | uninitialized... */ | |
1316 | int inuse; /* tx or rx vc already in use by another vcc */ | |
1317 | short vpi = vcc->vpi; | |
1318 | int vci = vcc->vci; | |
1319 | ||
1320 | card = (ns_dev *) vcc->dev->dev_data; | |
1321 | PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci); | |
1322 | if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) | |
1323 | { | |
1324 | PRINTK("nicstar%d: unsupported AAL.\n", card->index); | |
1325 | return -EINVAL; | |
1326 | } | |
1327 | ||
1328 | vc = &(card->vcmap[vpi << card->vcibits | vci]); | |
1329 | vcc->dev_data = vc; | |
1330 | ||
1331 | inuse = 0; | |
1332 | if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) | |
1333 | inuse = 1; | |
1334 | if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) | |
1335 | inuse += 2; | |
1336 | if (inuse) | |
1337 | { | |
1338 | printk("nicstar%d: %s vci already in use.\n", card->index, | |
1339 | inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); | |
1340 | return -EINVAL; | |
1341 | } | |
1342 | ||
1343 | set_bit(ATM_VF_ADDR,&vcc->flags); | |
1344 | ||
1345 | /* NOTE: You are not allowed to modify an open connection's QOS. To change | |
1346 | that, remove the ATM_VF_PARTIAL flag checking. There may be other changes | |
1347 | needed to do that. */ | |
1348 | if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) | |
1349 | { | |
1350 | scq_info *scq; | |
1351 | ||
1352 | set_bit(ATM_VF_PARTIAL,&vcc->flags); | |
1353 | if (vcc->qos.txtp.traffic_class == ATM_CBR) | |
1354 | { | |
1355 | /* Check requested cell rate and availability of SCD */ | |
1356 | if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && | |
1357 | vcc->qos.txtp.min_pcr == 0) | |
1358 | { | |
1359 | PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", | |
1360 | card->index); | |
1361 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | |
1362 | clear_bit(ATM_VF_ADDR,&vcc->flags); | |
1363 | return -EINVAL; | |
1364 | } | |
1365 | ||
1366 | tcr = atm_pcr_goal(&(vcc->qos.txtp)); | |
1367 | tcra = tcr >= 0 ? tcr : -tcr; | |
1368 | ||
1369 | PRINTK("nicstar%d: target cell rate = %d.\n", card->index, | |
1370 | vcc->qos.txtp.max_pcr); | |
1371 | ||
1372 | tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES; | |
1373 | modl = tmpl % card->max_pcr; | |
1374 | ||
1375 | n = (int)(tmpl / card->max_pcr); | |
1376 | if (tcr > 0) | |
1377 | { | |
1378 | if (modl > 0) n++; | |
1379 | } | |
1380 | else if (tcr == 0) | |
1381 | { | |
1382 | if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) | |
1383 | { | |
1384 | PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index); | |
1385 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | |
1386 | clear_bit(ATM_VF_ADDR,&vcc->flags); | |
1387 | return -EINVAL; | |
1388 | } | |
1389 | } | |
1390 | ||
1391 | if (n == 0) | |
1392 | { | |
1393 | printk("nicstar%d: selected bandwidth < granularity.\n", card->index); | |
1394 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | |
1395 | clear_bit(ATM_VF_ADDR,&vcc->flags); | |
1396 | return -EINVAL; | |
1397 | } | |
1398 | ||
1399 | if (n > (card->tst_free_entries - NS_TST_RESERVED)) | |
1400 | { | |
1401 | PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index); | |
1402 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | |
1403 | clear_bit(ATM_VF_ADDR,&vcc->flags); | |
1404 | return -EINVAL; | |
1405 | } | |
1406 | else | |
1407 | card->tst_free_entries -= n; | |
1408 | ||
1409 | XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); | |
1410 | for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) | |
1411 | { | |
1412 | if (card->scd2vc[frscdi] == NULL) | |
1413 | { | |
1414 | card->scd2vc[frscdi] = vc; | |
1415 | break; | |
1416 | } | |
1417 | } | |
1418 | if (frscdi == NS_FRSCD_NUM) | |
1419 | { | |
1420 | PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index); | |
1421 | card->tst_free_entries += n; | |
1422 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | |
1423 | clear_bit(ATM_VF_ADDR,&vcc->flags); | |
1424 | return -EBUSY; | |
1425 | } | |
1426 | ||
1427 | vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; | |
1428 | ||
1429 | scq = get_scq(CBR_SCQSIZE, vc->cbr_scd); | |
a2c1aa54 | 1430 | if (scq == NULL) |
1da177e4 LT |
1431 | { |
1432 | PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); | |
1433 | card->scd2vc[frscdi] = NULL; | |
1434 | card->tst_free_entries += n; | |
1435 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | |
1436 | clear_bit(ATM_VF_ADDR,&vcc->flags); | |
1437 | return -ENOMEM; | |
1438 | } | |
1439 | vc->scq = scq; | |
1440 | u32d[0] = (u32) virt_to_bus(scq->base); | |
1441 | u32d[1] = (u32) 0x00000000; | |
1442 | u32d[2] = (u32) 0xffffffff; | |
1443 | u32d[3] = (u32) 0x00000000; | |
1444 | ns_write_sram(card, vc->cbr_scd, u32d, 4); | |
1445 | ||
1446 | fill_tst(card, n, vc); | |
1447 | } | |
1448 | else if (vcc->qos.txtp.traffic_class == ATM_UBR) | |
1449 | { | |
1450 | vc->cbr_scd = 0x00000000; | |
1451 | vc->scq = card->scq0; | |
1452 | } | |
1453 | ||
1454 | if (vcc->qos.txtp.traffic_class != ATM_NONE) | |
1455 | { | |
1456 | vc->tx = 1; | |
1457 | vc->tx_vcc = vcc; | |
1458 | vc->tbd_count = 0; | |
1459 | } | |
1460 | if (vcc->qos.rxtp.traffic_class != ATM_NONE) | |
1461 | { | |
1462 | u32 status; | |
1463 | ||
1464 | vc->rx = 1; | |
1465 | vc->rx_vcc = vcc; | |
1466 | vc->rx_iov = NULL; | |
1467 | ||
1468 | /* Open the connection in hardware */ | |
1469 | if (vcc->qos.aal == ATM_AAL5) | |
1470 | status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; | |
1471 | else /* vcc->qos.aal == ATM_AAL0 */ | |
1472 | status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; | |
1473 | #ifdef RCQ_SUPPORT | |
1474 | status |= NS_RCTE_RAWCELLINTEN; | |
1475 | #endif /* RCQ_SUPPORT */ | |
1476 | ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * | |
1477 | NS_RCT_ENTRY_SIZE, &status, 1); | |
1478 | } | |
1479 | ||
1480 | } | |
1481 | ||
1482 | set_bit(ATM_VF_READY,&vcc->flags); | |
1483 | return 0; | |
1484 | } | |
1485 | ||
1486 | ||
1487 | ||
1488 | static void ns_close(struct atm_vcc *vcc) | |
1489 | { | |
1490 | vc_map *vc; | |
1491 | ns_dev *card; | |
1492 | u32 data; | |
1493 | int i; | |
1494 | ||
1495 | vc = vcc->dev_data; | |
1496 | card = vcc->dev->dev_data; | |
1497 | PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, | |
1498 | (int) vcc->vpi, vcc->vci); | |
1499 | ||
1500 | clear_bit(ATM_VF_READY,&vcc->flags); | |
1501 | ||
1502 | if (vcc->qos.rxtp.traffic_class != ATM_NONE) | |
1503 | { | |
1504 | u32 addr; | |
1505 | unsigned long flags; | |
1506 | ||
1507 | addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; | |
36ef4080 | 1508 | spin_lock_irqsave(&card->res_lock, flags); |
1da177e4 LT |
1509 | while(CMD_BUSY(card)); |
1510 | writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); | |
1511 | spin_unlock_irqrestore(&card->res_lock, flags); | |
1512 | ||
1513 | vc->rx = 0; | |
1514 | if (vc->rx_iov != NULL) | |
1515 | { | |
1516 | struct sk_buff *iovb; | |
1517 | u32 stat; | |
1518 | ||
1519 | stat = readl(card->membase + STAT); | |
1520 | card->sbfqc = ns_stat_sfbqc_get(stat); | |
1521 | card->lbfqc = ns_stat_lfbqc_get(stat); | |
1522 | ||
1523 | PRINTK("nicstar%d: closing a VC with pending rx buffers.\n", | |
1524 | card->index); | |
1525 | iovb = vc->rx_iov; | |
1526 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, | |
1527 | NS_SKB(iovb)->iovcnt); | |
1528 | NS_SKB(iovb)->iovcnt = 0; | |
1529 | NS_SKB(iovb)->vcc = NULL; | |
36ef4080 | 1530 | spin_lock_irqsave(&card->int_lock, flags); |
1da177e4 LT |
1531 | recycle_iov_buf(card, iovb); |
1532 | spin_unlock_irqrestore(&card->int_lock, flags); | |
1533 | vc->rx_iov = NULL; | |
1534 | } | |
1535 | } | |
1536 | ||
1537 | if (vcc->qos.txtp.traffic_class != ATM_NONE) | |
1538 | { | |
1539 | vc->tx = 0; | |
1540 | } | |
1541 | ||
1542 | if (vcc->qos.txtp.traffic_class == ATM_CBR) | |
1543 | { | |
1544 | unsigned long flags; | |
1545 | ns_scqe *scqep; | |
1546 | scq_info *scq; | |
1547 | ||
1548 | scq = vc->scq; | |
1549 | ||
1550 | for (;;) | |
1551 | { | |
36ef4080 | 1552 | spin_lock_irqsave(&scq->lock, flags); |
1da177e4 LT |
1553 | scqep = scq->next; |
1554 | if (scqep == scq->base) | |
1555 | scqep = scq->last; | |
1556 | else | |
1557 | scqep--; | |
1558 | if (scqep == scq->tail) | |
1559 | { | |
1560 | spin_unlock_irqrestore(&scq->lock, flags); | |
1561 | break; | |
1562 | } | |
1563 | /* If the last entry is not a TSR, place one in the SCQ in order to | |
1564 | be able to completely drain it and then close. */ | |
1565 | if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) | |
1566 | { | |
1567 | ns_scqe tsr; | |
1568 | u32 scdi, scqi; | |
1569 | u32 data; | |
1570 | int index; | |
1571 | ||
1572 | tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); | |
1573 | scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; | |
1574 | scqi = scq->next - scq->base; | |
1575 | tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); | |
1576 | tsr.word_3 = 0x00000000; | |
1577 | tsr.word_4 = 0x00000000; | |
1578 | *scq->next = tsr; | |
1579 | index = (int) scqi; | |
1580 | scq->skb[index] = NULL; | |
1581 | if (scq->next == scq->last) | |
1582 | scq->next = scq->base; | |
1583 | else | |
1584 | scq->next++; | |
1585 | data = (u32) virt_to_bus(scq->next); | |
1586 | ns_write_sram(card, scq->scd, &data, 1); | |
1587 | } | |
1588 | spin_unlock_irqrestore(&scq->lock, flags); | |
1589 | schedule(); | |
1590 | } | |
1591 | ||
1592 | /* Free all TST entries */ | |
1593 | data = NS_TST_OPCODE_VARIABLE; | |
1594 | for (i = 0; i < NS_TST_NUM_ENTRIES; i++) | |
1595 | { | |
1596 | if (card->tste2vc[i] == vc) | |
1597 | { | |
1598 | ns_write_sram(card, card->tst_addr + i, &data, 1); | |
1599 | card->tste2vc[i] = NULL; | |
1600 | card->tst_free_entries++; | |
1601 | } | |
1602 | } | |
1603 | ||
1604 | card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; | |
1605 | free_scq(vc->scq, vcc); | |
1606 | } | |
1607 | ||
1608 | /* remove all references to vcc before deleting it */ | |
1609 | if (vcc->qos.txtp.traffic_class != ATM_NONE) | |
1610 | { | |
1611 | unsigned long flags; | |
1612 | scq_info *scq = card->scq0; | |
1613 | ||
36ef4080 | 1614 | spin_lock_irqsave(&scq->lock, flags); |
1da177e4 LT |
1615 | |
1616 | for(i = 0; i < scq->num_entries; i++) { | |
1617 | if(scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { | |
1618 | ATM_SKB(scq->skb[i])->vcc = NULL; | |
1619 | atm_return(vcc, scq->skb[i]->truesize); | |
1620 | PRINTK("nicstar: deleted pending vcc mapping\n"); | |
1621 | } | |
1622 | } | |
1623 | ||
1624 | spin_unlock_irqrestore(&scq->lock, flags); | |
1625 | } | |
1626 | ||
1627 | vcc->dev_data = NULL; | |
1628 | clear_bit(ATM_VF_PARTIAL,&vcc->flags); | |
1629 | clear_bit(ATM_VF_ADDR,&vcc->flags); | |
1630 | ||
1631 | #ifdef RX_DEBUG | |
1632 | { | |
1633 | u32 stat, cfg; | |
1634 | stat = readl(card->membase + STAT); | |
1635 | cfg = readl(card->membase + CFG); | |
1636 | printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); | |
1637 | printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n", | |
1638 | (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last, | |
1639 | readl(card->membase + TSQT)); | |
1640 | printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n", | |
1641 | (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last, | |
1642 | readl(card->membase + RSQT)); | |
1643 | printk("Empty free buffer queue interrupt %s \n", | |
1644 | card->efbie ? "enabled" : "disabled"); | |
1645 | printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", | |
1646 | ns_stat_sfbqc_get(stat), card->sbpool.count, | |
1647 | ns_stat_lfbqc_get(stat), card->lbpool.count); | |
1648 | printk("hbpool.count = %d iovpool.count = %d \n", | |
1649 | card->hbpool.count, card->iovpool.count); | |
1650 | } | |
1651 | #endif /* RX_DEBUG */ | |
1652 | } | |
1653 | ||
1654 | ||
1655 | ||
1656 | static void fill_tst(ns_dev *card, int n, vc_map *vc) | |
1657 | { | |
1658 | u32 new_tst; | |
1659 | unsigned long cl; | |
1660 | int e, r; | |
1661 | u32 data; | |
1662 | ||
1663 | /* It would be very complicated to keep the two TSTs synchronized while | |
1664 | assuring that writes are only made to the inactive TST. So, for now I | |
1665 | will use only one TST. If problems occur, I will change this again */ | |
1666 | ||
1667 | new_tst = card->tst_addr; | |
1668 | ||
1669 | /* Fill procedure */ | |
1670 | ||
1671 | for (e = 0; e < NS_TST_NUM_ENTRIES; e++) | |
1672 | { | |
1673 | if (card->tste2vc[e] == NULL) | |
1674 | break; | |
1675 | } | |
1676 | if (e == NS_TST_NUM_ENTRIES) { | |
1677 | printk("nicstar%d: No free TST entries found. \n", card->index); | |
1678 | return; | |
1679 | } | |
1680 | ||
1681 | r = n; | |
1682 | cl = NS_TST_NUM_ENTRIES; | |
1683 | data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); | |
1684 | ||
1685 | while (r > 0) | |
1686 | { | |
1687 | if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) | |
1688 | { | |
1689 | card->tste2vc[e] = vc; | |
1690 | ns_write_sram(card, new_tst + e, &data, 1); | |
1691 | cl -= NS_TST_NUM_ENTRIES; | |
1692 | r--; | |
1693 | } | |
1694 | ||
1695 | if (++e == NS_TST_NUM_ENTRIES) { | |
1696 | e = 0; | |
1697 | } | |
1698 | cl += n; | |
1699 | } | |
1700 | ||
1701 | /* End of fill procedure */ | |
1702 | ||
1703 | data = ns_tste_make(NS_TST_OPCODE_END, new_tst); | |
1704 | ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); | |
1705 | ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); | |
1706 | card->tst_addr = new_tst; | |
1707 | } | |
1708 | ||
1709 | ||
1710 | ||
1711 | static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) | |
1712 | { | |
1713 | ns_dev *card; | |
1714 | vc_map *vc; | |
1715 | scq_info *scq; | |
1716 | unsigned long buflen; | |
1717 | ns_scqe scqe; | |
1718 | u32 flags; /* TBD flags, not CPU flags */ | |
1719 | ||
1720 | card = vcc->dev->dev_data; | |
1721 | TXPRINTK("nicstar%d: ns_send() called.\n", card->index); | |
1722 | if ((vc = (vc_map *) vcc->dev_data) == NULL) | |
1723 | { | |
1724 | printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); | |
1725 | atomic_inc(&vcc->stats->tx_err); | |
1726 | dev_kfree_skb_any(skb); | |
1727 | return -EINVAL; | |
1728 | } | |
1729 | ||
1730 | if (!vc->tx) | |
1731 | { | |
1732 | printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); | |
1733 | atomic_inc(&vcc->stats->tx_err); | |
1734 | dev_kfree_skb_any(skb); | |
1735 | return -EINVAL; | |
1736 | } | |
1737 | ||
1738 | if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) | |
1739 | { | |
1740 | printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); | |
1741 | atomic_inc(&vcc->stats->tx_err); | |
1742 | dev_kfree_skb_any(skb); | |
1743 | return -EINVAL; | |
1744 | } | |
1745 | ||
1746 | if (skb_shinfo(skb)->nr_frags != 0) | |
1747 | { | |
1748 | printk("nicstar%d: No scatter-gather yet.\n", card->index); | |
1749 | atomic_inc(&vcc->stats->tx_err); | |
1750 | dev_kfree_skb_any(skb); | |
1751 | return -EINVAL; | |
1752 | } | |
1753 | ||
1754 | ATM_SKB(skb)->vcc = vcc; | |
1755 | ||
1756 | if (vcc->qos.aal == ATM_AAL5) | |
1757 | { | |
1758 | buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ | |
1759 | flags = NS_TBD_AAL5; | |
1760 | scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data)); | |
1761 | scqe.word_3 = cpu_to_le32((u32) skb->len); | |
1762 | scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, | |
1763 | ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0); | |
1764 | flags |= NS_TBD_EOPDU; | |
1765 | } | |
1766 | else /* (vcc->qos.aal == ATM_AAL0) */ | |
1767 | { | |
1768 | buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ | |
1769 | flags = NS_TBD_AAL0; | |
1770 | scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER); | |
1771 | scqe.word_3 = cpu_to_le32(0x00000000); | |
1772 | if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ | |
1773 | flags |= NS_TBD_EOPDU; | |
1774 | scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); | |
1775 | /* Force the VPI/VCI to be the same as in VCC struct */ | |
1776 | scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT | | |
1777 | ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) & | |
1778 | NS_TBD_VC_MASK); | |
1779 | } | |
1780 | ||
1781 | if (vcc->qos.txtp.traffic_class == ATM_CBR) | |
1782 | { | |
1783 | scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); | |
1784 | scq = ((vc_map *) vcc->dev_data)->scq; | |
1785 | } | |
1786 | else | |
1787 | { | |
1788 | scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); | |
1789 | scq = card->scq0; | |
1790 | } | |
1791 | ||
1792 | if (push_scqe(card, vc, scq, &scqe, skb) != 0) | |
1793 | { | |
1794 | atomic_inc(&vcc->stats->tx_err); | |
1795 | dev_kfree_skb_any(skb); | |
1796 | return -EIO; | |
1797 | } | |
1798 | atomic_inc(&vcc->stats->tx); | |
1799 | ||
1800 | return 0; | |
1801 | } | |
1802 | ||
1803 | ||
1804 | ||
1805 | static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, | |
1806 | struct sk_buff *skb) | |
1807 | { | |
1808 | unsigned long flags; | |
1809 | ns_scqe tsr; | |
1810 | u32 scdi, scqi; | |
1811 | int scq_is_vbr; | |
1812 | u32 data; | |
1813 | int index; | |
1814 | ||
36ef4080 | 1815 | spin_lock_irqsave(&scq->lock, flags); |
1da177e4 LT |
1816 | while (scq->tail == scq->next) |
1817 | { | |
1818 | if (in_interrupt()) { | |
1819 | spin_unlock_irqrestore(&scq->lock, flags); | |
1820 | printk("nicstar%d: Error pushing TBD.\n", card->index); | |
1821 | return 1; | |
1822 | } | |
1823 | ||
1824 | scq->full = 1; | |
1825 | spin_unlock_irqrestore(&scq->lock, flags); | |
1826 | interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); | |
36ef4080 | 1827 | spin_lock_irqsave(&scq->lock, flags); |
1da177e4 LT |
1828 | |
1829 | if (scq->full) { | |
1830 | spin_unlock_irqrestore(&scq->lock, flags); | |
1831 | printk("nicstar%d: Timeout pushing TBD.\n", card->index); | |
1832 | return 1; | |
1833 | } | |
1834 | } | |
1835 | *scq->next = *tbd; | |
1836 | index = (int) (scq->next - scq->base); | |
1837 | scq->skb[index] = skb; | |
1838 | XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n", | |
1839 | card->index, (u32) skb, index); | |
1840 | XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", | |
1841 | card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), | |
1842 | le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), | |
1843 | (u32) scq->next); | |
1844 | if (scq->next == scq->last) | |
1845 | scq->next = scq->base; | |
1846 | else | |
1847 | scq->next++; | |
1848 | ||
1849 | vc->tbd_count++; | |
1850 | if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) | |
1851 | { | |
1852 | scq->tbd_count++; | |
1853 | scq_is_vbr = 1; | |
1854 | } | |
1855 | else | |
1856 | scq_is_vbr = 0; | |
1857 | ||
1858 | if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) | |
1859 | { | |
1860 | int has_run = 0; | |
1861 | ||
1862 | while (scq->tail == scq->next) | |
1863 | { | |
1864 | if (in_interrupt()) { | |
1865 | data = (u32) virt_to_bus(scq->next); | |
1866 | ns_write_sram(card, scq->scd, &data, 1); | |
1867 | spin_unlock_irqrestore(&scq->lock, flags); | |
1868 | printk("nicstar%d: Error pushing TSR.\n", card->index); | |
1869 | return 0; | |
1870 | } | |
1871 | ||
1872 | scq->full = 1; | |
1873 | if (has_run++) break; | |
1874 | spin_unlock_irqrestore(&scq->lock, flags); | |
1875 | interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); | |
36ef4080 | 1876 | spin_lock_irqsave(&scq->lock, flags); |
1da177e4 LT |
1877 | } |
1878 | ||
1879 | if (!scq->full) | |
1880 | { | |
1881 | tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); | |
1882 | if (scq_is_vbr) | |
1883 | scdi = NS_TSR_SCDISVBR; | |
1884 | else | |
1885 | scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; | |
1886 | scqi = scq->next - scq->base; | |
1887 | tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); | |
1888 | tsr.word_3 = 0x00000000; | |
1889 | tsr.word_4 = 0x00000000; | |
1890 | ||
1891 | *scq->next = tsr; | |
1892 | index = (int) scqi; | |
1893 | scq->skb[index] = NULL; | |
1894 | XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", | |
1895 | card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), | |
1896 | le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), | |
1897 | (u32) scq->next); | |
1898 | if (scq->next == scq->last) | |
1899 | scq->next = scq->base; | |
1900 | else | |
1901 | scq->next++; | |
1902 | vc->tbd_count = 0; | |
1903 | scq->tbd_count = 0; | |
1904 | } | |
1905 | else | |
1906 | PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); | |
1907 | } | |
1908 | data = (u32) virt_to_bus(scq->next); | |
1909 | ns_write_sram(card, scq->scd, &data, 1); | |
1910 | ||
1911 | spin_unlock_irqrestore(&scq->lock, flags); | |
1912 | ||
1913 | return 0; | |
1914 | } | |
1915 | ||
1916 | ||
1917 | ||
1918 | static void process_tsq(ns_dev *card) | |
1919 | { | |
1920 | u32 scdi; | |
1921 | scq_info *scq; | |
1922 | ns_tsi *previous = NULL, *one_ahead, *two_ahead; | |
1923 | int serviced_entries; /* flag indicating at least on entry was serviced */ | |
1924 | ||
1925 | serviced_entries = 0; | |
1926 | ||
1927 | if (card->tsq.next == card->tsq.last) | |
1928 | one_ahead = card->tsq.base; | |
1929 | else | |
1930 | one_ahead = card->tsq.next + 1; | |
1931 | ||
1932 | if (one_ahead == card->tsq.last) | |
1933 | two_ahead = card->tsq.base; | |
1934 | else | |
1935 | two_ahead = one_ahead + 1; | |
1936 | ||
1937 | while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || | |
1938 | !ns_tsi_isempty(two_ahead)) | |
1939 | /* At most two empty, as stated in the 77201 errata */ | |
1940 | { | |
1941 | serviced_entries = 1; | |
1942 | ||
1943 | /* Skip the one or two possible empty entries */ | |
1944 | while (ns_tsi_isempty(card->tsq.next)) { | |
1945 | if (card->tsq.next == card->tsq.last) | |
1946 | card->tsq.next = card->tsq.base; | |
1947 | else | |
1948 | card->tsq.next++; | |
1949 | } | |
1950 | ||
1951 | if (!ns_tsi_tmrof(card->tsq.next)) | |
1952 | { | |
1953 | scdi = ns_tsi_getscdindex(card->tsq.next); | |
1954 | if (scdi == NS_TSI_SCDISVBR) | |
1955 | scq = card->scq0; | |
1956 | else | |
1957 | { | |
1958 | if (card->scd2vc[scdi] == NULL) | |
1959 | { | |
1960 | printk("nicstar%d: could not find VC from SCD index.\n", | |
1961 | card->index); | |
1962 | ns_tsi_init(card->tsq.next); | |
1963 | return; | |
1964 | } | |
1965 | scq = card->scd2vc[scdi]->scq; | |
1966 | } | |
1967 | drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); | |
1968 | scq->full = 0; | |
1969 | wake_up_interruptible(&(scq->scqfull_waitq)); | |
1970 | } | |
1971 | ||
1972 | ns_tsi_init(card->tsq.next); | |
1973 | previous = card->tsq.next; | |
1974 | if (card->tsq.next == card->tsq.last) | |
1975 | card->tsq.next = card->tsq.base; | |
1976 | else | |
1977 | card->tsq.next++; | |
1978 | ||
1979 | if (card->tsq.next == card->tsq.last) | |
1980 | one_ahead = card->tsq.base; | |
1981 | else | |
1982 | one_ahead = card->tsq.next + 1; | |
1983 | ||
1984 | if (one_ahead == card->tsq.last) | |
1985 | two_ahead = card->tsq.base; | |
1986 | else | |
1987 | two_ahead = one_ahead + 1; | |
1988 | } | |
1989 | ||
1990 | if (serviced_entries) { | |
1991 | writel((((u32) previous) - ((u32) card->tsq.base)), | |
1992 | card->membase + TSQH); | |
1993 | } | |
1994 | } | |
1995 | ||
1996 | ||
1997 | ||
1998 | static void drain_scq(ns_dev *card, scq_info *scq, int pos) | |
1999 | { | |
2000 | struct atm_vcc *vcc; | |
2001 | struct sk_buff *skb; | |
2002 | int i; | |
2003 | unsigned long flags; | |
2004 | ||
2005 | XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n", | |
2006 | card->index, (u32) scq, pos); | |
2007 | if (pos >= scq->num_entries) | |
2008 | { | |
2009 | printk("nicstar%d: Bad index on drain_scq().\n", card->index); | |
2010 | return; | |
2011 | } | |
2012 | ||
36ef4080 | 2013 | spin_lock_irqsave(&scq->lock, flags); |
1da177e4 LT |
2014 | i = (int) (scq->tail - scq->base); |
2015 | if (++i == scq->num_entries) | |
2016 | i = 0; | |
2017 | while (i != pos) | |
2018 | { | |
2019 | skb = scq->skb[i]; | |
2020 | XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n", | |
2021 | card->index, (u32) skb, i); | |
2022 | if (skb != NULL) | |
2023 | { | |
2024 | vcc = ATM_SKB(skb)->vcc; | |
2025 | if (vcc && vcc->pop != NULL) { | |
2026 | vcc->pop(vcc, skb); | |
2027 | } else { | |
2028 | dev_kfree_skb_irq(skb); | |
2029 | } | |
2030 | scq->skb[i] = NULL; | |
2031 | } | |
2032 | if (++i == scq->num_entries) | |
2033 | i = 0; | |
2034 | } | |
2035 | scq->tail = scq->base + pos; | |
2036 | spin_unlock_irqrestore(&scq->lock, flags); | |
2037 | } | |
2038 | ||
2039 | ||
2040 | ||
2041 | static void process_rsq(ns_dev *card) | |
2042 | { | |
2043 | ns_rsqe *previous; | |
2044 | ||
2045 | if (!ns_rsqe_valid(card->rsq.next)) | |
2046 | return; | |
2087ff3e | 2047 | do { |
1da177e4 LT |
2048 | dequeue_rx(card, card->rsq.next); |
2049 | ns_rsqe_init(card->rsq.next); | |
2050 | previous = card->rsq.next; | |
2051 | if (card->rsq.next == card->rsq.last) | |
2052 | card->rsq.next = card->rsq.base; | |
2053 | else | |
2054 | card->rsq.next++; | |
2087ff3e | 2055 | } while (ns_rsqe_valid(card->rsq.next)); |
1da177e4 LT |
2056 | writel((((u32) previous) - ((u32) card->rsq.base)), |
2057 | card->membase + RSQH); | |
2058 | } | |
2059 | ||
2060 | ||
2061 | ||
2062 | static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) | |
2063 | { | |
2064 | u32 vpi, vci; | |
2065 | vc_map *vc; | |
2066 | struct sk_buff *iovb; | |
2067 | struct iovec *iov; | |
2068 | struct atm_vcc *vcc; | |
2069 | struct sk_buff *skb; | |
2070 | unsigned short aal5_len; | |
2071 | int len; | |
2072 | u32 stat; | |
2073 | ||
2074 | stat = readl(card->membase + STAT); | |
2075 | card->sbfqc = ns_stat_sfbqc_get(stat); | |
2076 | card->lbfqc = ns_stat_lfbqc_get(stat); | |
2077 | ||
2078 | skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle); | |
2079 | vpi = ns_rsqe_vpi(rsqe); | |
2080 | vci = ns_rsqe_vci(rsqe); | |
2081 | if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) | |
2082 | { | |
2083 | printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", | |
2084 | card->index, vpi, vci); | |
2085 | recycle_rx_buf(card, skb); | |
2086 | return; | |
2087 | } | |
2088 | ||
2089 | vc = &(card->vcmap[vpi << card->vcibits | vci]); | |
2090 | if (!vc->rx) | |
2091 | { | |
2092 | RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", | |
2093 | card->index, vpi, vci); | |
2094 | recycle_rx_buf(card, skb); | |
2095 | return; | |
2096 | } | |
2097 | ||
2098 | vcc = vc->rx_vcc; | |
2099 | ||
2100 | if (vcc->qos.aal == ATM_AAL0) | |
2101 | { | |
2102 | struct sk_buff *sb; | |
2103 | unsigned char *cell; | |
2104 | int i; | |
2105 | ||
2106 | cell = skb->data; | |
2107 | for (i = ns_rsqe_cellcount(rsqe); i; i--) | |
2108 | { | |
2109 | if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) | |
2110 | { | |
2111 | printk("nicstar%d: Can't allocate buffers for aal0.\n", | |
2112 | card->index); | |
2113 | atomic_add(i,&vcc->stats->rx_drop); | |
2114 | break; | |
2115 | } | |
2116 | if (!atm_charge(vcc, sb->truesize)) | |
2117 | { | |
2118 | RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n", | |
2119 | card->index); | |
2120 | atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ | |
2121 | dev_kfree_skb_any(sb); | |
2122 | break; | |
2123 | } | |
2124 | /* Rebuild the header */ | |
2125 | *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | | |
2126 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); | |
2127 | if (i == 1 && ns_rsqe_eopdu(rsqe)) | |
2128 | *((u32 *) sb->data) |= 0x00000002; | |
2129 | skb_put(sb, NS_AAL0_HEADER); | |
27a884dc | 2130 | memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); |
1da177e4 LT |
2131 | skb_put(sb, ATM_CELL_PAYLOAD); |
2132 | ATM_SKB(sb)->vcc = vcc; | |
a61bbcf2 | 2133 | __net_timestamp(sb); |
1da177e4 LT |
2134 | vcc->push(vcc, sb); |
2135 | atomic_inc(&vcc->stats->rx); | |
2136 | cell += ATM_CELL_PAYLOAD; | |
2137 | } | |
2138 | ||
2139 | recycle_rx_buf(card, skb); | |
2140 | return; | |
2141 | } | |
2142 | ||
2143 | /* To reach this point, the AAL layer can only be AAL5 */ | |
2144 | ||
2145 | if ((iovb = vc->rx_iov) == NULL) | |
2146 | { | |
2147 | iovb = skb_dequeue(&(card->iovpool.queue)); | |
2148 | if (iovb == NULL) /* No buffers in the queue */ | |
2149 | { | |
2150 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); | |
2151 | if (iovb == NULL) | |
2152 | { | |
2153 | printk("nicstar%d: Out of iovec buffers.\n", card->index); | |
2154 | atomic_inc(&vcc->stats->rx_drop); | |
2155 | recycle_rx_buf(card, skb); | |
2156 | return; | |
2157 | } | |
8728b834 | 2158 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; |
1da177e4 LT |
2159 | } |
2160 | else | |
2161 | if (--card->iovpool.count < card->iovnr.min) | |
2162 | { | |
2163 | struct sk_buff *new_iovb; | |
2164 | if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) | |
2165 | { | |
8728b834 | 2166 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; |
1da177e4 LT |
2167 | skb_queue_tail(&card->iovpool.queue, new_iovb); |
2168 | card->iovpool.count++; | |
2169 | } | |
2170 | } | |
2171 | vc->rx_iov = iovb; | |
2172 | NS_SKB(iovb)->iovcnt = 0; | |
2173 | iovb->len = 0; | |
27a884dc ACM |
2174 | iovb->data = iovb->head; |
2175 | skb_reset_tail_pointer(iovb); | |
1da177e4 LT |
2176 | NS_SKB(iovb)->vcc = vcc; |
2177 | /* IMPORTANT: a pointer to the sk_buff containing the small or large | |
2178 | buffer is stored as iovec base, NOT a pointer to the | |
2179 | small or large buffer itself. */ | |
2180 | } | |
2181 | else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) | |
2182 | { | |
2183 | printk("nicstar%d: received too big AAL5 SDU.\n", card->index); | |
2184 | atomic_inc(&vcc->stats->rx_err); | |
2185 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); | |
2186 | NS_SKB(iovb)->iovcnt = 0; | |
2187 | iovb->len = 0; | |
27a884dc ACM |
2188 | iovb->data = iovb->head; |
2189 | skb_reset_tail_pointer(iovb); | |
1da177e4 LT |
2190 | NS_SKB(iovb)->vcc = vcc; |
2191 | } | |
2192 | iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; | |
2193 | iov->iov_base = (void *) skb; | |
2194 | iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; | |
2195 | iovb->len += iov->iov_len; | |
2196 | ||
2197 | if (NS_SKB(iovb)->iovcnt == 1) | |
2198 | { | |
8728b834 | 2199 | if (NS_SKB_CB(skb)->buf_type != BUF_SM) |
1da177e4 LT |
2200 | { |
2201 | printk("nicstar%d: Expected a small buffer, and this is not one.\n", | |
2202 | card->index); | |
2203 | which_list(card, skb); | |
2204 | atomic_inc(&vcc->stats->rx_err); | |
2205 | recycle_rx_buf(card, skb); | |
2206 | vc->rx_iov = NULL; | |
2207 | recycle_iov_buf(card, iovb); | |
2208 | return; | |
2209 | } | |
2210 | } | |
2211 | else /* NS_SKB(iovb)->iovcnt >= 2 */ | |
2212 | { | |
8728b834 | 2213 | if (NS_SKB_CB(skb)->buf_type != BUF_LG) |
1da177e4 LT |
2214 | { |
2215 | printk("nicstar%d: Expected a large buffer, and this is not one.\n", | |
2216 | card->index); | |
2217 | which_list(card, skb); | |
2218 | atomic_inc(&vcc->stats->rx_err); | |
2219 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, | |
2220 | NS_SKB(iovb)->iovcnt); | |
2221 | vc->rx_iov = NULL; | |
2222 | recycle_iov_buf(card, iovb); | |
2223 | return; | |
2224 | } | |
2225 | } | |
2226 | ||
2227 | if (ns_rsqe_eopdu(rsqe)) | |
2228 | { | |
2229 | /* This works correctly regardless of the endianness of the host */ | |
2230 | unsigned char *L1L2 = (unsigned char *)((u32)skb->data + | |
2231 | iov->iov_len - 6); | |
2232 | aal5_len = L1L2[0] << 8 | L1L2[1]; | |
2233 | len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; | |
2234 | if (ns_rsqe_crcerr(rsqe) || | |
2235 | len + 8 > iovb->len || len + (47 + 8) < iovb->len) | |
2236 | { | |
2237 | printk("nicstar%d: AAL5 CRC error", card->index); | |
2238 | if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) | |
2239 | printk(" - PDU size mismatch.\n"); | |
2240 | else | |
2241 | printk(".\n"); | |
2242 | atomic_inc(&vcc->stats->rx_err); | |
2243 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, | |
2244 | NS_SKB(iovb)->iovcnt); | |
2245 | vc->rx_iov = NULL; | |
2246 | recycle_iov_buf(card, iovb); | |
2247 | return; | |
2248 | } | |
2249 | ||
2250 | /* By this point we (hopefully) have a complete SDU without errors. */ | |
2251 | ||
2252 | if (NS_SKB(iovb)->iovcnt == 1) /* Just a small buffer */ | |
2253 | { | |
2254 | /* skb points to a small buffer */ | |
2255 | if (!atm_charge(vcc, skb->truesize)) | |
2256 | { | |
8728b834 | 2257 | push_rxbufs(card, skb); |
1da177e4 LT |
2258 | atomic_inc(&vcc->stats->rx_drop); |
2259 | } | |
2260 | else | |
2261 | { | |
2262 | skb_put(skb, len); | |
2263 | dequeue_sm_buf(card, skb); | |
2264 | #ifdef NS_USE_DESTRUCTORS | |
2265 | skb->destructor = ns_sb_destructor; | |
2266 | #endif /* NS_USE_DESTRUCTORS */ | |
2267 | ATM_SKB(skb)->vcc = vcc; | |
a61bbcf2 | 2268 | __net_timestamp(skb); |
1da177e4 LT |
2269 | vcc->push(vcc, skb); |
2270 | atomic_inc(&vcc->stats->rx); | |
2271 | } | |
2272 | } | |
2273 | else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */ | |
2274 | { | |
2275 | struct sk_buff *sb; | |
2276 | ||
2277 | sb = (struct sk_buff *) (iov - 1)->iov_base; | |
2278 | /* skb points to a large buffer */ | |
2279 | ||
2280 | if (len <= NS_SMBUFSIZE) | |
2281 | { | |
2282 | if (!atm_charge(vcc, sb->truesize)) | |
2283 | { | |
8728b834 | 2284 | push_rxbufs(card, sb); |
1da177e4 LT |
2285 | atomic_inc(&vcc->stats->rx_drop); |
2286 | } | |
2287 | else | |
2288 | { | |
2289 | skb_put(sb, len); | |
2290 | dequeue_sm_buf(card, sb); | |
2291 | #ifdef NS_USE_DESTRUCTORS | |
2292 | sb->destructor = ns_sb_destructor; | |
2293 | #endif /* NS_USE_DESTRUCTORS */ | |
2294 | ATM_SKB(sb)->vcc = vcc; | |
a61bbcf2 | 2295 | __net_timestamp(sb); |
1da177e4 LT |
2296 | vcc->push(vcc, sb); |
2297 | atomic_inc(&vcc->stats->rx); | |
2298 | } | |
2299 | ||
8728b834 | 2300 | push_rxbufs(card, skb); |
1da177e4 LT |
2301 | |
2302 | } | |
2303 | else /* len > NS_SMBUFSIZE, the usual case */ | |
2304 | { | |
2305 | if (!atm_charge(vcc, skb->truesize)) | |
2306 | { | |
8728b834 | 2307 | push_rxbufs(card, skb); |
1da177e4 LT |
2308 | atomic_inc(&vcc->stats->rx_drop); |
2309 | } | |
2310 | else | |
2311 | { | |
2312 | dequeue_lg_buf(card, skb); | |
2313 | #ifdef NS_USE_DESTRUCTORS | |
2314 | skb->destructor = ns_lb_destructor; | |
2315 | #endif /* NS_USE_DESTRUCTORS */ | |
2316 | skb_push(skb, NS_SMBUFSIZE); | |
d626f62b | 2317 | skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); |
1da177e4 LT |
2318 | skb_put(skb, len - NS_SMBUFSIZE); |
2319 | ATM_SKB(skb)->vcc = vcc; | |
a61bbcf2 | 2320 | __net_timestamp(skb); |
1da177e4 LT |
2321 | vcc->push(vcc, skb); |
2322 | atomic_inc(&vcc->stats->rx); | |
2323 | } | |
2324 | ||
8728b834 | 2325 | push_rxbufs(card, sb); |
1da177e4 LT |
2326 | |
2327 | } | |
2328 | ||
2329 | } | |
2330 | else /* Must push a huge buffer */ | |
2331 | { | |
2332 | struct sk_buff *hb, *sb, *lb; | |
2333 | int remaining, tocopy; | |
2334 | int j; | |
2335 | ||
2336 | hb = skb_dequeue(&(card->hbpool.queue)); | |
2337 | if (hb == NULL) /* No buffers in the queue */ | |
2338 | { | |
2339 | ||
2340 | hb = dev_alloc_skb(NS_HBUFSIZE); | |
2341 | if (hb == NULL) | |
2342 | { | |
2343 | printk("nicstar%d: Out of huge buffers.\n", card->index); | |
2344 | atomic_inc(&vcc->stats->rx_drop); | |
2345 | recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, | |
2346 | NS_SKB(iovb)->iovcnt); | |
2347 | vc->rx_iov = NULL; | |
2348 | recycle_iov_buf(card, iovb); | |
2349 | return; | |
2350 | } | |
2351 | else if (card->hbpool.count < card->hbnr.min) | |
2352 | { | |
2353 | struct sk_buff *new_hb; | |
2354 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) | |
2355 | { | |
2356 | skb_queue_tail(&card->hbpool.queue, new_hb); | |
2357 | card->hbpool.count++; | |
2358 | } | |
2359 | } | |
8728b834 | 2360 | NS_SKB_CB(hb)->buf_type = BUF_NONE; |
1da177e4 LT |
2361 | } |
2362 | else | |
2363 | if (--card->hbpool.count < card->hbnr.min) | |
2364 | { | |
2365 | struct sk_buff *new_hb; | |
2366 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) | |
2367 | { | |
8728b834 | 2368 | NS_SKB_CB(new_hb)->buf_type = BUF_NONE; |
1da177e4 LT |
2369 | skb_queue_tail(&card->hbpool.queue, new_hb); |
2370 | card->hbpool.count++; | |
2371 | } | |
2372 | if (card->hbpool.count < card->hbnr.min) | |
2373 | { | |
2374 | if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) | |
2375 | { | |
8728b834 | 2376 | NS_SKB_CB(new_hb)->buf_type = BUF_NONE; |
1da177e4 LT |
2377 | skb_queue_tail(&card->hbpool.queue, new_hb); |
2378 | card->hbpool.count++; | |
2379 | } | |
2380 | } | |
2381 | } | |
2382 | ||
2383 | iov = (struct iovec *) iovb->data; | |
2384 | ||
2385 | if (!atm_charge(vcc, hb->truesize)) | |
2386 | { | |
2387 | recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt); | |
2388 | if (card->hbpool.count < card->hbnr.max) | |
2389 | { | |
2390 | skb_queue_tail(&card->hbpool.queue, hb); | |
2391 | card->hbpool.count++; | |
2392 | } | |
2393 | else | |
2394 | dev_kfree_skb_any(hb); | |
2395 | atomic_inc(&vcc->stats->rx_drop); | |
2396 | } | |
2397 | else | |
2398 | { | |
2399 | /* Copy the small buffer to the huge buffer */ | |
2400 | sb = (struct sk_buff *) iov->iov_base; | |
d626f62b | 2401 | skb_copy_from_linear_data(sb, hb->data, iov->iov_len); |
1da177e4 LT |
2402 | skb_put(hb, iov->iov_len); |
2403 | remaining = len - iov->iov_len; | |
2404 | iov++; | |
2405 | /* Free the small buffer */ | |
8728b834 | 2406 | push_rxbufs(card, sb); |
1da177e4 LT |
2407 | |
2408 | /* Copy all large buffers to the huge buffer and free them */ | |
2409 | for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) | |
2410 | { | |
2411 | lb = (struct sk_buff *) iov->iov_base; | |
2412 | tocopy = min_t(int, remaining, iov->iov_len); | |
d626f62b | 2413 | skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy); |
1da177e4 LT |
2414 | skb_put(hb, tocopy); |
2415 | iov++; | |
2416 | remaining -= tocopy; | |
8728b834 | 2417 | push_rxbufs(card, lb); |
1da177e4 LT |
2418 | } |
2419 | #ifdef EXTRA_DEBUG | |
2420 | if (remaining != 0 || hb->len != len) | |
2421 | printk("nicstar%d: Huge buffer len mismatch.\n", card->index); | |
2422 | #endif /* EXTRA_DEBUG */ | |
2423 | ATM_SKB(hb)->vcc = vcc; | |
2424 | #ifdef NS_USE_DESTRUCTORS | |
2425 | hb->destructor = ns_hb_destructor; | |
2426 | #endif /* NS_USE_DESTRUCTORS */ | |
a61bbcf2 | 2427 | __net_timestamp(hb); |
1da177e4 LT |
2428 | vcc->push(vcc, hb); |
2429 | atomic_inc(&vcc->stats->rx); | |
2430 | } | |
2431 | } | |
2432 | ||
2433 | vc->rx_iov = NULL; | |
2434 | recycle_iov_buf(card, iovb); | |
2435 | } | |
2436 | ||
2437 | } | |
2438 | ||
2439 | ||
2440 | ||
2441 | #ifdef NS_USE_DESTRUCTORS | |
2442 | ||
2443 | static void ns_sb_destructor(struct sk_buff *sb) | |
2444 | { | |
2445 | ns_dev *card; | |
2446 | u32 stat; | |
2447 | ||
2448 | card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; | |
2449 | stat = readl(card->membase + STAT); | |
2450 | card->sbfqc = ns_stat_sfbqc_get(stat); | |
2451 | card->lbfqc = ns_stat_lfbqc_get(stat); | |
2452 | ||
2453 | do | |
2454 | { | |
2455 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | |
2456 | if (sb == NULL) | |
2457 | break; | |
8728b834 | 2458 | NS_SKB_CB(sb)->buf_type = BUF_SM; |
1da177e4 LT |
2459 | skb_queue_tail(&card->sbpool.queue, sb); |
2460 | skb_reserve(sb, NS_AAL0_HEADER); | |
8728b834 | 2461 | push_rxbufs(card, sb); |
1da177e4 LT |
2462 | } while (card->sbfqc < card->sbnr.min); |
2463 | } | |
2464 | ||
2465 | ||
2466 | ||
2467 | static void ns_lb_destructor(struct sk_buff *lb) | |
2468 | { | |
2469 | ns_dev *card; | |
2470 | u32 stat; | |
2471 | ||
2472 | card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; | |
2473 | stat = readl(card->membase + STAT); | |
2474 | card->sbfqc = ns_stat_sfbqc_get(stat); | |
2475 | card->lbfqc = ns_stat_lfbqc_get(stat); | |
2476 | ||
2477 | do | |
2478 | { | |
2479 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | |
2480 | if (lb == NULL) | |
2481 | break; | |
8728b834 | 2482 | NS_SKB_CB(lb)->buf_type = BUF_LG; |
1da177e4 LT |
2483 | skb_queue_tail(&card->lbpool.queue, lb); |
2484 | skb_reserve(lb, NS_SMBUFSIZE); | |
8728b834 | 2485 | push_rxbufs(card, lb); |
1da177e4 LT |
2486 | } while (card->lbfqc < card->lbnr.min); |
2487 | } | |
2488 | ||
2489 | ||
2490 | ||
2491 | static void ns_hb_destructor(struct sk_buff *hb) | |
2492 | { | |
2493 | ns_dev *card; | |
2494 | ||
2495 | card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; | |
2496 | ||
2497 | while (card->hbpool.count < card->hbnr.init) | |
2498 | { | |
2499 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | |
2500 | if (hb == NULL) | |
2501 | break; | |
8728b834 | 2502 | NS_SKB_CB(hb)->buf_type = BUF_NONE; |
1da177e4 LT |
2503 | skb_queue_tail(&card->hbpool.queue, hb); |
2504 | card->hbpool.count++; | |
2505 | } | |
2506 | } | |
2507 | ||
2508 | #endif /* NS_USE_DESTRUCTORS */ | |
2509 | ||
2510 | ||
1da177e4 LT |
2511 | static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) |
2512 | { | |
8728b834 | 2513 | struct ns_skb_cb *cb = NS_SKB_CB(skb); |
1da177e4 | 2514 | |
8728b834 DM |
2515 | if (unlikely(cb->buf_type == BUF_NONE)) { |
2516 | printk("nicstar%d: What kind of rx buffer is this?\n", card->index); | |
2517 | dev_kfree_skb_any(skb); | |
2518 | } else | |
2519 | push_rxbufs(card, skb); | |
2520 | } | |
1da177e4 LT |
2521 | |
2522 | ||
2523 | static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) | |
2524 | { | |
8728b834 DM |
2525 | while (count-- > 0) |
2526 | recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base); | |
1da177e4 LT |
2527 | } |
2528 | ||
2529 | ||
1da177e4 LT |
2530 | static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) |
2531 | { | |
2532 | if (card->iovpool.count < card->iovnr.max) | |
2533 | { | |
2534 | skb_queue_tail(&card->iovpool.queue, iovb); | |
2535 | card->iovpool.count++; | |
2536 | } | |
2537 | else | |
2538 | dev_kfree_skb_any(iovb); | |
2539 | } | |
2540 | ||
2541 | ||
2542 | ||
2543 | static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) | |
2544 | { | |
8728b834 | 2545 | skb_unlink(sb, &card->sbpool.queue); |
1da177e4 LT |
2546 | #ifdef NS_USE_DESTRUCTORS |
2547 | if (card->sbfqc < card->sbnr.min) | |
2548 | #else | |
2549 | if (card->sbfqc < card->sbnr.init) | |
2550 | { | |
2551 | struct sk_buff *new_sb; | |
2552 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) | |
2553 | { | |
8728b834 | 2554 | NS_SKB_CB(new_sb)->buf_type = BUF_SM; |
1da177e4 LT |
2555 | skb_queue_tail(&card->sbpool.queue, new_sb); |
2556 | skb_reserve(new_sb, NS_AAL0_HEADER); | |
8728b834 | 2557 | push_rxbufs(card, new_sb); |
1da177e4 LT |
2558 | } |
2559 | } | |
2560 | if (card->sbfqc < card->sbnr.init) | |
2561 | #endif /* NS_USE_DESTRUCTORS */ | |
2562 | { | |
2563 | struct sk_buff *new_sb; | |
2564 | if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) | |
2565 | { | |
8728b834 | 2566 | NS_SKB_CB(new_sb)->buf_type = BUF_SM; |
1da177e4 LT |
2567 | skb_queue_tail(&card->sbpool.queue, new_sb); |
2568 | skb_reserve(new_sb, NS_AAL0_HEADER); | |
8728b834 | 2569 | push_rxbufs(card, new_sb); |
1da177e4 LT |
2570 | } |
2571 | } | |
2572 | } | |
2573 | ||
2574 | ||
2575 | ||
2576 | static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) | |
2577 | { | |
8728b834 | 2578 | skb_unlink(lb, &card->lbpool.queue); |
1da177e4 LT |
2579 | #ifdef NS_USE_DESTRUCTORS |
2580 | if (card->lbfqc < card->lbnr.min) | |
2581 | #else | |
2582 | if (card->lbfqc < card->lbnr.init) | |
2583 | { | |
2584 | struct sk_buff *new_lb; | |
2585 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) | |
2586 | { | |
8728b834 | 2587 | NS_SKB_CB(new_lb)->buf_type = BUF_LG; |
1da177e4 LT |
2588 | skb_queue_tail(&card->lbpool.queue, new_lb); |
2589 | skb_reserve(new_lb, NS_SMBUFSIZE); | |
8728b834 | 2590 | push_rxbufs(card, new_lb); |
1da177e4 LT |
2591 | } |
2592 | } | |
2593 | if (card->lbfqc < card->lbnr.init) | |
2594 | #endif /* NS_USE_DESTRUCTORS */ | |
2595 | { | |
2596 | struct sk_buff *new_lb; | |
2597 | if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) | |
2598 | { | |
8728b834 | 2599 | NS_SKB_CB(new_lb)->buf_type = BUF_LG; |
1da177e4 LT |
2600 | skb_queue_tail(&card->lbpool.queue, new_lb); |
2601 | skb_reserve(new_lb, NS_SMBUFSIZE); | |
8728b834 | 2602 | push_rxbufs(card, new_lb); |
1da177e4 LT |
2603 | } |
2604 | } | |
2605 | } | |
2606 | ||
2607 | ||
2608 | ||
2609 | static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page) | |
2610 | { | |
2611 | u32 stat; | |
2612 | ns_dev *card; | |
2613 | int left; | |
2614 | ||
2615 | left = (int) *pos; | |
2616 | card = (ns_dev *) dev->dev_data; | |
2617 | stat = readl(card->membase + STAT); | |
2618 | if (!left--) | |
2619 | return sprintf(page, "Pool count min init max \n"); | |
2620 | if (!left--) | |
2621 | return sprintf(page, "Small %5d %5d %5d %5d \n", | |
2622 | ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, | |
2623 | card->sbnr.max); | |
2624 | if (!left--) | |
2625 | return sprintf(page, "Large %5d %5d %5d %5d \n", | |
2626 | ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, | |
2627 | card->lbnr.max); | |
2628 | if (!left--) | |
2629 | return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, | |
2630 | card->hbnr.min, card->hbnr.init, card->hbnr.max); | |
2631 | if (!left--) | |
2632 | return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, | |
2633 | card->iovnr.min, card->iovnr.init, card->iovnr.max); | |
2634 | if (!left--) | |
2635 | { | |
2636 | int retval; | |
2637 | retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); | |
2638 | card->intcnt = 0; | |
2639 | return retval; | |
2640 | } | |
2641 | #if 0 | |
2642 | /* Dump 25.6 Mbps PHY registers */ | |
2643 | /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it | |
2644 | here just in case it's needed for debugging. */ | |
2645 | if (card->max_pcr == ATM_25_PCR && !left--) | |
2646 | { | |
2647 | u32 phy_regs[4]; | |
2648 | u32 i; | |
2649 | ||
2650 | for (i = 0; i < 4; i++) | |
2651 | { | |
2652 | while (CMD_BUSY(card)); | |
2653 | writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); | |
2654 | while (CMD_BUSY(card)); | |
2655 | phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; | |
2656 | } | |
2657 | ||
2658 | return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", | |
2659 | phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); | |
2660 | } | |
2661 | #endif /* 0 - Dump 25.6 Mbps PHY registers */ | |
2662 | #if 0 | |
2663 | /* Dump TST */ | |
2664 | if (left-- < NS_TST_NUM_ENTRIES) | |
2665 | { | |
2666 | if (card->tste2vc[left + 1] == NULL) | |
2667 | return sprintf(page, "%5d - VBR/UBR \n", left + 1); | |
2668 | else | |
2669 | return sprintf(page, "%5d - %d %d \n", left + 1, | |
2670 | card->tste2vc[left + 1]->tx_vcc->vpi, | |
2671 | card->tste2vc[left + 1]->tx_vcc->vci); | |
2672 | } | |
2673 | #endif /* 0 */ | |
2674 | return 0; | |
2675 | } | |
2676 | ||
2677 | ||
2678 | ||
2679 | static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) | |
2680 | { | |
2681 | ns_dev *card; | |
2682 | pool_levels pl; | |
69c30147 | 2683 | long btype; |
1da177e4 LT |
2684 | unsigned long flags; |
2685 | ||
2686 | card = dev->dev_data; | |
2687 | switch (cmd) | |
2688 | { | |
2689 | case NS_GETPSTAT: | |
2690 | if (get_user(pl.buftype, &((pool_levels __user *) arg)->buftype)) | |
2691 | return -EFAULT; | |
2692 | switch (pl.buftype) | |
2693 | { | |
2694 | case NS_BUFTYPE_SMALL: | |
2695 | pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); | |
2696 | pl.level.min = card->sbnr.min; | |
2697 | pl.level.init = card->sbnr.init; | |
2698 | pl.level.max = card->sbnr.max; | |
2699 | break; | |
2700 | ||
2701 | case NS_BUFTYPE_LARGE: | |
2702 | pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); | |
2703 | pl.level.min = card->lbnr.min; | |
2704 | pl.level.init = card->lbnr.init; | |
2705 | pl.level.max = card->lbnr.max; | |
2706 | break; | |
2707 | ||
2708 | case NS_BUFTYPE_HUGE: | |
2709 | pl.count = card->hbpool.count; | |
2710 | pl.level.min = card->hbnr.min; | |
2711 | pl.level.init = card->hbnr.init; | |
2712 | pl.level.max = card->hbnr.max; | |
2713 | break; | |
2714 | ||
2715 | case NS_BUFTYPE_IOVEC: | |
2716 | pl.count = card->iovpool.count; | |
2717 | pl.level.min = card->iovnr.min; | |
2718 | pl.level.init = card->iovnr.init; | |
2719 | pl.level.max = card->iovnr.max; | |
2720 | break; | |
2721 | ||
2722 | default: | |
2723 | return -ENOIOCTLCMD; | |
2724 | ||
2725 | } | |
2726 | if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) | |
2727 | return (sizeof(pl)); | |
2728 | else | |
2729 | return -EFAULT; | |
2730 | ||
2731 | case NS_SETBUFLEV: | |
2732 | if (!capable(CAP_NET_ADMIN)) | |
2733 | return -EPERM; | |
2734 | if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) | |
2735 | return -EFAULT; | |
2736 | if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) | |
2737 | return -EINVAL; | |
2738 | if (pl.level.min == 0) | |
2739 | return -EINVAL; | |
2740 | switch (pl.buftype) | |
2741 | { | |
2742 | case NS_BUFTYPE_SMALL: | |
2743 | if (pl.level.max > TOP_SB) | |
2744 | return -EINVAL; | |
2745 | card->sbnr.min = pl.level.min; | |
2746 | card->sbnr.init = pl.level.init; | |
2747 | card->sbnr.max = pl.level.max; | |
2748 | break; | |
2749 | ||
2750 | case NS_BUFTYPE_LARGE: | |
2751 | if (pl.level.max > TOP_LB) | |
2752 | return -EINVAL; | |
2753 | card->lbnr.min = pl.level.min; | |
2754 | card->lbnr.init = pl.level.init; | |
2755 | card->lbnr.max = pl.level.max; | |
2756 | break; | |
2757 | ||
2758 | case NS_BUFTYPE_HUGE: | |
2759 | if (pl.level.max > TOP_HB) | |
2760 | return -EINVAL; | |
2761 | card->hbnr.min = pl.level.min; | |
2762 | card->hbnr.init = pl.level.init; | |
2763 | card->hbnr.max = pl.level.max; | |
2764 | break; | |
2765 | ||
2766 | case NS_BUFTYPE_IOVEC: | |
2767 | if (pl.level.max > TOP_IOVB) | |
2768 | return -EINVAL; | |
2769 | card->iovnr.min = pl.level.min; | |
2770 | card->iovnr.init = pl.level.init; | |
2771 | card->iovnr.max = pl.level.max; | |
2772 | break; | |
2773 | ||
2774 | default: | |
2775 | return -EINVAL; | |
2776 | ||
2777 | } | |
2778 | return 0; | |
2779 | ||
2780 | case NS_ADJBUFLEV: | |
2781 | if (!capable(CAP_NET_ADMIN)) | |
2782 | return -EPERM; | |
69c30147 | 2783 | btype = (long) arg; /* a long is the same size as a pointer or bigger */ |
1da177e4 LT |
2784 | switch (btype) |
2785 | { | |
2786 | case NS_BUFTYPE_SMALL: | |
2787 | while (card->sbfqc < card->sbnr.init) | |
2788 | { | |
2789 | struct sk_buff *sb; | |
2790 | ||
2791 | sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); | |
2792 | if (sb == NULL) | |
2793 | return -ENOMEM; | |
8728b834 | 2794 | NS_SKB_CB(sb)->buf_type = BUF_SM; |
1da177e4 LT |
2795 | skb_queue_tail(&card->sbpool.queue, sb); |
2796 | skb_reserve(sb, NS_AAL0_HEADER); | |
8728b834 | 2797 | push_rxbufs(card, sb); |
1da177e4 LT |
2798 | } |
2799 | break; | |
2800 | ||
2801 | case NS_BUFTYPE_LARGE: | |
2802 | while (card->lbfqc < card->lbnr.init) | |
2803 | { | |
2804 | struct sk_buff *lb; | |
2805 | ||
2806 | lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); | |
2807 | if (lb == NULL) | |
2808 | return -ENOMEM; | |
8728b834 | 2809 | NS_SKB_CB(lb)->buf_type = BUF_LG; |
1da177e4 LT |
2810 | skb_queue_tail(&card->lbpool.queue, lb); |
2811 | skb_reserve(lb, NS_SMBUFSIZE); | |
8728b834 | 2812 | push_rxbufs(card, lb); |
1da177e4 LT |
2813 | } |
2814 | break; | |
2815 | ||
2816 | case NS_BUFTYPE_HUGE: | |
2817 | while (card->hbpool.count > card->hbnr.init) | |
2818 | { | |
2819 | struct sk_buff *hb; | |
2820 | ||
36ef4080 | 2821 | spin_lock_irqsave(&card->int_lock, flags); |
1da177e4 LT |
2822 | hb = skb_dequeue(&card->hbpool.queue); |
2823 | card->hbpool.count--; | |
2824 | spin_unlock_irqrestore(&card->int_lock, flags); | |
2825 | if (hb == NULL) | |
2826 | printk("nicstar%d: huge buffer count inconsistent.\n", | |
2827 | card->index); | |
2828 | else | |
2829 | dev_kfree_skb_any(hb); | |
2830 | ||
2831 | } | |
2832 | while (card->hbpool.count < card->hbnr.init) | |
2833 | { | |
2834 | struct sk_buff *hb; | |
2835 | ||
2836 | hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); | |
2837 | if (hb == NULL) | |
2838 | return -ENOMEM; | |
8728b834 | 2839 | NS_SKB_CB(hb)->buf_type = BUF_NONE; |
36ef4080 | 2840 | spin_lock_irqsave(&card->int_lock, flags); |
1da177e4 LT |
2841 | skb_queue_tail(&card->hbpool.queue, hb); |
2842 | card->hbpool.count++; | |
2843 | spin_unlock_irqrestore(&card->int_lock, flags); | |
2844 | } | |
2845 | break; | |
2846 | ||
2847 | case NS_BUFTYPE_IOVEC: | |
2848 | while (card->iovpool.count > card->iovnr.init) | |
2849 | { | |
2850 | struct sk_buff *iovb; | |
2851 | ||
36ef4080 | 2852 | spin_lock_irqsave(&card->int_lock, flags); |
1da177e4 LT |
2853 | iovb = skb_dequeue(&card->iovpool.queue); |
2854 | card->iovpool.count--; | |
2855 | spin_unlock_irqrestore(&card->int_lock, flags); | |
2856 | if (iovb == NULL) | |
2857 | printk("nicstar%d: iovec buffer count inconsistent.\n", | |
2858 | card->index); | |
2859 | else | |
2860 | dev_kfree_skb_any(iovb); | |
2861 | ||
2862 | } | |
2863 | while (card->iovpool.count < card->iovnr.init) | |
2864 | { | |
2865 | struct sk_buff *iovb; | |
2866 | ||
2867 | iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); | |
2868 | if (iovb == NULL) | |
2869 | return -ENOMEM; | |
8728b834 | 2870 | NS_SKB_CB(iovb)->buf_type = BUF_NONE; |
36ef4080 | 2871 | spin_lock_irqsave(&card->int_lock, flags); |
1da177e4 LT |
2872 | skb_queue_tail(&card->iovpool.queue, iovb); |
2873 | card->iovpool.count++; | |
2874 | spin_unlock_irqrestore(&card->int_lock, flags); | |
2875 | } | |
2876 | break; | |
2877 | ||
2878 | default: | |
2879 | return -EINVAL; | |
2880 | ||
2881 | } | |
2882 | return 0; | |
2883 | ||
2884 | default: | |
2885 | if (dev->phy && dev->phy->ioctl) { | |
2886 | return dev->phy->ioctl(dev, cmd, arg); | |
2887 | } | |
2888 | else { | |
2889 | printk("nicstar%d: %s == NULL \n", card->index, | |
2890 | dev->phy ? "dev->phy->ioctl" : "dev->phy"); | |
2891 | return -ENOIOCTLCMD; | |
2892 | } | |
2893 | } | |
2894 | } | |
2895 | ||
2896 | ||
1da177e4 LT |
2897 | static void which_list(ns_dev *card, struct sk_buff *skb) |
2898 | { | |
8728b834 | 2899 | printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type); |
1da177e4 LT |
2900 | } |
2901 | ||
2902 | ||
1da177e4 LT |
2903 | static void ns_poll(unsigned long arg) |
2904 | { | |
2905 | int i; | |
2906 | ns_dev *card; | |
2907 | unsigned long flags; | |
2908 | u32 stat_r, stat_w; | |
2909 | ||
2910 | PRINTK("nicstar: Entering ns_poll().\n"); | |
2911 | for (i = 0; i < num_cards; i++) | |
2912 | { | |
2913 | card = cards[i]; | |
2914 | if (spin_is_locked(&card->int_lock)) { | |
2915 | /* Probably it isn't worth spinning */ | |
2916 | continue; | |
2917 | } | |
36ef4080 | 2918 | spin_lock_irqsave(&card->int_lock, flags); |
1da177e4 LT |
2919 | |
2920 | stat_w = 0; | |
2921 | stat_r = readl(card->membase + STAT); | |
2922 | if (stat_r & NS_STAT_TSIF) | |
2923 | stat_w |= NS_STAT_TSIF; | |
2924 | if (stat_r & NS_STAT_EOPDU) | |
2925 | stat_w |= NS_STAT_EOPDU; | |
2926 | ||
2927 | process_tsq(card); | |
2928 | process_rsq(card); | |
2929 | ||
2930 | writel(stat_w, card->membase + STAT); | |
2931 | spin_unlock_irqrestore(&card->int_lock, flags); | |
2932 | } | |
2933 | mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); | |
2934 | PRINTK("nicstar: Leaving ns_poll().\n"); | |
2935 | } | |
2936 | ||
2937 | ||
2938 | ||
2939 | static int ns_parse_mac(char *mac, unsigned char *esi) | |
2940 | { | |
2941 | int i, j; | |
2942 | short byte1, byte0; | |
2943 | ||
2944 | if (mac == NULL || esi == NULL) | |
2945 | return -1; | |
2946 | j = 0; | |
2947 | for (i = 0; i < 6; i++) | |
2948 | { | |
2949 | if ((byte1 = ns_h2i(mac[j++])) < 0) | |
2950 | return -1; | |
2951 | if ((byte0 = ns_h2i(mac[j++])) < 0) | |
2952 | return -1; | |
2953 | esi[i] = (unsigned char) (byte1 * 16 + byte0); | |
2954 | if (i < 5) | |
2955 | { | |
2956 | if (mac[j++] != ':') | |
2957 | return -1; | |
2958 | } | |
2959 | } | |
2960 | return 0; | |
2961 | } | |
2962 | ||
2963 | ||
2964 | ||
2965 | static short ns_h2i(char c) | |
2966 | { | |
2967 | if (c >= '0' && c <= '9') | |
2968 | return (short) (c - '0'); | |
2969 | if (c >= 'A' && c <= 'F') | |
2970 | return (short) (c - 'A' + 10); | |
2971 | if (c >= 'a' && c <= 'f') | |
2972 | return (short) (c - 'a' + 10); | |
2973 | return -1; | |
2974 | } | |
2975 | ||
2976 | ||
2977 | ||
2978 | static void ns_phy_put(struct atm_dev *dev, unsigned char value, | |
2979 | unsigned long addr) | |
2980 | { | |
2981 | ns_dev *card; | |
2982 | unsigned long flags; | |
2983 | ||
2984 | card = dev->dev_data; | |
36ef4080 | 2985 | spin_lock_irqsave(&card->res_lock, flags); |
1da177e4 LT |
2986 | while(CMD_BUSY(card)); |
2987 | writel((unsigned long) value, card->membase + DR0); | |
2988 | writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), | |
2989 | card->membase + CMD); | |
2990 | spin_unlock_irqrestore(&card->res_lock, flags); | |
2991 | } | |
2992 | ||
2993 | ||
2994 | ||
2995 | static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) | |
2996 | { | |
2997 | ns_dev *card; | |
2998 | unsigned long flags; | |
2999 | unsigned long data; | |
3000 | ||
3001 | card = dev->dev_data; | |
36ef4080 | 3002 | spin_lock_irqsave(&card->res_lock, flags); |
1da177e4 LT |
3003 | while(CMD_BUSY(card)); |
3004 | writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), | |
3005 | card->membase + CMD); | |
3006 | while(CMD_BUSY(card)); | |
3007 | data = readl(card->membase + DR0) & 0x000000FF; | |
3008 | spin_unlock_irqrestore(&card->res_lock, flags); | |
3009 | return (unsigned char) data; | |
3010 | } | |
3011 | ||
3012 | ||
3013 | ||
3014 | module_init(nicstar_init); | |
3015 | module_exit(nicstar_cleanup); |