1 // SPDX-License-Identifier: GPL-2.0-or-later
3 A FORE Systems 200E-series driver for ATM on Linux.
8 This driver simultaneously supports PCA-200E and SBA-200E adapters
9 on i386, alpha (untested), powerpc, sparc and sparc64 architectures.
14 #include <linux/kernel.h>
15 #include <linux/slab.h>
16 #include <linux/init.h>
17 #include <linux/capability.h>
18 #include <linux/interrupt.h>
19 #include <linux/bitops.h>
20 #include <linux/pci.h>
21 #include <linux/module.h>
22 #include <linux/atmdev.h>
23 #include <linux/sonet.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/delay.h>
26 #include <linux/firmware.h>
27 #include <linux/pgtable.h>
29 #include <asm/string.h>
33 #include <asm/byteorder.h>
34 #include <linux/uaccess.h>
35 #include <linux/atomic.h>
39 #include <linux/of_device.h>
40 #include <asm/idprom.h>
41 #include <asm/openprom.h>
42 #include <asm/oplib.h>
45 #if defined(CONFIG_ATM_FORE200E_USE_TASKLET) /* defer interrupt work to a tasklet */
46 #define FORE200E_USE_TASKLET
49 #if 0 /* enable the debugging code of the buffer supply queues */
50 #define FORE200E_BSQ_DEBUG
53 #if 1 /* ensure correct handling of 52-byte AAL0 SDUs expected by atmdump-like apps */
54 #define FORE200E_52BYTE_AAL0_SDU
60 #define FORE200E_VERSION "0.3e"
62 #define FORE200E "fore200e: "
64 #if 0 /* override .config */
65 #define CONFIG_ATM_FORE200E_DEBUG 1
67 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
68 #define DPRINTK(level, format, args...) do { if (CONFIG_ATM_FORE200E_DEBUG >= (level)) \
69 printk(FORE200E format, ##args); } while (0)
71 #define DPRINTK(level, format, args...) do {} while (0)
75 #define FORE200E_ALIGN(addr, alignment) \
76 ((((unsigned long)(addr) + (alignment - 1)) & ~(alignment - 1)) - (unsigned long)(addr))
78 #define FORE200E_DMA_INDEX(dma_addr, type, index) ((dma_addr) + (index) * sizeof(type))
80 #define FORE200E_INDEX(virt_addr, type, index) (&((type *)(virt_addr))[ index ])
82 #define FORE200E_NEXT_ENTRY(index, modulo) (index = ((index) + 1) % (modulo))
85 #define ASSERT(expr) if (!(expr)) { \
86 printk(FORE200E "assertion failed! %s[%d]: %s\n", \
87 __func__, __LINE__, #expr); \
88 panic(FORE200E "%s", __func__); \
91 #define ASSERT(expr) do {} while (0)
95 static const struct atmdev_ops fore200e_ops;
97 static LIST_HEAD(fore200e_boards);
100 MODULE_AUTHOR("Christophe Lizzi - credits to Uwe Dannowski and Heikki Vatiainen");
101 MODULE_DESCRIPTION("FORE Systems 200E-series ATM driver - version " FORE200E_VERSION);
103 static const int fore200e_rx_buf_nbr[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
104 { BUFFER_S1_NBR, BUFFER_L1_NBR },
105 { BUFFER_S2_NBR, BUFFER_L2_NBR }
108 static const int fore200e_rx_buf_size[ BUFFER_SCHEME_NBR ][ BUFFER_MAGN_NBR ] = {
109 { BUFFER_S1_SIZE, BUFFER_L1_SIZE },
110 { BUFFER_S2_SIZE, BUFFER_L2_SIZE }
114 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG > 0)
115 static const char* fore200e_traffic_class[] = { "NONE", "UBR", "CBR", "VBR", "ABR", "ANY" };
119 #if 0 /* currently unused */
121 fore200e_fore2atm_aal(enum fore200e_aal aal)
124 case FORE200E_AAL0: return ATM_AAL0;
125 case FORE200E_AAL34: return ATM_AAL34;
126 case FORE200E_AAL5: return ATM_AAL5;
134 static enum fore200e_aal
135 fore200e_atm2fore_aal(int aal)
138 case ATM_AAL0: return FORE200E_AAL0;
139 case ATM_AAL34: return FORE200E_AAL34;
142 case ATM_AAL5: return FORE200E_AAL5;
150 fore200e_irq_itoa(int irq)
153 sprintf(str, "%d", irq);
158 /* allocate and align a chunk of memory intended to hold the data behing exchanged
159 between the driver and the adapter (using streaming DVMA) */
162 fore200e_chunk_alloc(struct fore200e* fore200e, struct chunk* chunk, int size, int alignment, int direction)
164 unsigned long offset = 0;
166 if (alignment <= sizeof(int))
169 chunk->alloc_size = size + alignment;
170 chunk->direction = direction;
172 chunk->alloc_addr = kzalloc(chunk->alloc_size, GFP_KERNEL);
173 if (chunk->alloc_addr == NULL)
177 offset = FORE200E_ALIGN(chunk->alloc_addr, alignment);
179 chunk->align_addr = chunk->alloc_addr + offset;
181 chunk->dma_addr = dma_map_single(fore200e->dev, chunk->align_addr,
183 if (dma_mapping_error(fore200e->dev, chunk->dma_addr)) {
184 kfree(chunk->alloc_addr);
191 /* free a chunk of memory */
194 fore200e_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
196 dma_unmap_single(fore200e->dev, chunk->dma_addr, chunk->dma_size,
198 kfree(chunk->alloc_addr);
202 * Allocate a DMA consistent chunk of memory intended to act as a communication
203 * mechanism (to hold descriptors, status, queues, etc.) shared by the driver
207 fore200e_dma_chunk_alloc(struct fore200e *fore200e, struct chunk *chunk,
208 int size, int nbr, int alignment)
210 /* returned chunks are page-aligned */
211 chunk->alloc_size = size * nbr;
212 chunk->alloc_addr = dma_alloc_coherent(fore200e->dev, chunk->alloc_size,
213 &chunk->dma_addr, GFP_KERNEL);
214 if (!chunk->alloc_addr)
216 chunk->align_addr = chunk->alloc_addr;
221 * Free a DMA consistent chunk of memory.
224 fore200e_dma_chunk_free(struct fore200e* fore200e, struct chunk* chunk)
226 dma_free_coherent(fore200e->dev, chunk->alloc_size, chunk->alloc_addr,
231 fore200e_spin(int msecs)
233 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
234 while (time_before(jiffies, timeout));
239 fore200e_poll(struct fore200e* fore200e, volatile u32* addr, u32 val, int msecs)
241 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
246 if ((ok = (*addr == val)) || (*addr & STATUS_ERROR))
249 } while (time_before(jiffies, timeout));
253 printk(FORE200E "cmd polling failed, got status 0x%08x, expected 0x%08x\n",
263 fore200e_io_poll(struct fore200e* fore200e, volatile u32 __iomem *addr, u32 val, int msecs)
265 unsigned long timeout = jiffies + msecs_to_jiffies(msecs);
269 if ((ok = (fore200e->bus->read(addr) == val)))
272 } while (time_before(jiffies, timeout));
276 printk(FORE200E "I/O polling failed, got status 0x%08x, expected 0x%08x\n",
277 fore200e->bus->read(addr), val);
286 fore200e_free_rx_buf(struct fore200e* fore200e)
288 int scheme, magn, nbr;
289 struct buffer* buffer;
291 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
292 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
294 if ((buffer = fore200e->host_bsq[ scheme ][ magn ].buffer) != NULL) {
296 for (nbr = 0; nbr < fore200e_rx_buf_nbr[ scheme ][ magn ]; nbr++) {
298 struct chunk* data = &buffer[ nbr ].data;
300 if (data->alloc_addr != NULL)
301 fore200e_chunk_free(fore200e, data);
310 fore200e_uninit_bs_queue(struct fore200e* fore200e)
314 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
315 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
317 struct chunk* status = &fore200e->host_bsq[ scheme ][ magn ].status;
318 struct chunk* rbd_block = &fore200e->host_bsq[ scheme ][ magn ].rbd_block;
320 if (status->alloc_addr)
321 fore200e_dma_chunk_free(fore200e, status);
323 if (rbd_block->alloc_addr)
324 fore200e_dma_chunk_free(fore200e, rbd_block);
331 fore200e_reset(struct fore200e* fore200e, int diag)
335 fore200e->cp_monitor = fore200e->virt_base + FORE200E_CP_MONITOR_OFFSET;
337 fore200e->bus->write(BSTAT_COLD_START, &fore200e->cp_monitor->bstat);
339 fore200e->bus->reset(fore200e);
342 ok = fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_SELFTEST_OK, 1000);
345 printk(FORE200E "device %s self-test failed\n", fore200e->name);
349 printk(FORE200E "device %s self-test passed\n", fore200e->name);
351 fore200e->state = FORE200E_STATE_RESET;
359 fore200e_shutdown(struct fore200e* fore200e)
361 printk(FORE200E "removing device %s at 0x%lx, IRQ %s\n",
362 fore200e->name, fore200e->phys_base,
363 fore200e_irq_itoa(fore200e->irq));
365 if (fore200e->state > FORE200E_STATE_RESET) {
366 /* first, reset the board to prevent further interrupts or data transfers */
367 fore200e_reset(fore200e, 0);
370 /* then, release all allocated resources */
371 switch(fore200e->state) {
373 case FORE200E_STATE_COMPLETE:
374 kfree(fore200e->stats);
377 case FORE200E_STATE_IRQ:
378 free_irq(fore200e->irq, fore200e->atm_dev);
381 case FORE200E_STATE_ALLOC_BUF:
382 fore200e_free_rx_buf(fore200e);
385 case FORE200E_STATE_INIT_BSQ:
386 fore200e_uninit_bs_queue(fore200e);
389 case FORE200E_STATE_INIT_RXQ:
390 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.status);
391 fore200e_dma_chunk_free(fore200e, &fore200e->host_rxq.rpd);
394 case FORE200E_STATE_INIT_TXQ:
395 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.status);
396 fore200e_dma_chunk_free(fore200e, &fore200e->host_txq.tpd);
399 case FORE200E_STATE_INIT_CMDQ:
400 fore200e_dma_chunk_free(fore200e, &fore200e->host_cmdq.status);
403 case FORE200E_STATE_INITIALIZE:
404 /* nothing to do for that state */
406 case FORE200E_STATE_START_FW:
407 /* nothing to do for that state */
409 case FORE200E_STATE_RESET:
410 /* nothing to do for that state */
412 case FORE200E_STATE_MAP:
413 fore200e->bus->unmap(fore200e);
416 case FORE200E_STATE_CONFIGURE:
417 /* nothing to do for that state */
419 case FORE200E_STATE_REGISTER:
420 /* XXX shouldn't we *start* by deregistering the device? */
421 atm_dev_deregister(fore200e->atm_dev);
423 case FORE200E_STATE_BLANK:
424 /* nothing to do for that state */
432 static u32 fore200e_pca_read(volatile u32 __iomem *addr)
434 /* on big-endian hosts, the board is configured to convert
435 the endianess of slave RAM accesses */
436 return le32_to_cpu(readl(addr));
440 static void fore200e_pca_write(u32 val, volatile u32 __iomem *addr)
442 /* on big-endian hosts, the board is configured to convert
443 the endianess of slave RAM accesses */
444 writel(cpu_to_le32(val), addr);
448 fore200e_pca_irq_check(struct fore200e* fore200e)
450 /* this is a 1 bit register */
451 int irq_posted = readl(fore200e->regs.pca.psr);
453 #if defined(CONFIG_ATM_FORE200E_DEBUG) && (CONFIG_ATM_FORE200E_DEBUG == 2)
454 if (irq_posted && (readl(fore200e->regs.pca.hcr) & PCA200E_HCR_OUTFULL)) {
455 DPRINTK(2,"FIFO OUT full, device %d\n", fore200e->atm_dev->number);
464 fore200e_pca_irq_ack(struct fore200e* fore200e)
466 writel(PCA200E_HCR_CLRINTR, fore200e->regs.pca.hcr);
471 fore200e_pca_reset(struct fore200e* fore200e)
473 writel(PCA200E_HCR_RESET, fore200e->regs.pca.hcr);
475 writel(0, fore200e->regs.pca.hcr);
479 static int fore200e_pca_map(struct fore200e* fore200e)
481 DPRINTK(2, "device %s being mapped in memory\n", fore200e->name);
483 fore200e->virt_base = ioremap(fore200e->phys_base, PCA200E_IOSPACE_LENGTH);
485 if (fore200e->virt_base == NULL) {
486 printk(FORE200E "can't map device %s\n", fore200e->name);
490 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
492 /* gain access to the PCA specific registers */
493 fore200e->regs.pca.hcr = fore200e->virt_base + PCA200E_HCR_OFFSET;
494 fore200e->regs.pca.imr = fore200e->virt_base + PCA200E_IMR_OFFSET;
495 fore200e->regs.pca.psr = fore200e->virt_base + PCA200E_PSR_OFFSET;
497 fore200e->state = FORE200E_STATE_MAP;
503 fore200e_pca_unmap(struct fore200e* fore200e)
505 DPRINTK(2, "device %s being unmapped from memory\n", fore200e->name);
507 if (fore200e->virt_base != NULL)
508 iounmap(fore200e->virt_base);
512 static int fore200e_pca_configure(struct fore200e *fore200e)
514 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
515 u8 master_ctrl, latency;
517 DPRINTK(2, "device %s being configured\n", fore200e->name);
519 if ((pci_dev->irq == 0) || (pci_dev->irq == 0xFF)) {
520 printk(FORE200E "incorrect IRQ setting - misconfigured PCI-PCI bridge?\n");
524 pci_read_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, &master_ctrl);
526 master_ctrl = master_ctrl
527 #if defined(__BIG_ENDIAN)
528 /* request the PCA board to convert the endianess of slave RAM accesses */
529 | PCA200E_CTRL_CONVERT_ENDIAN
532 | PCA200E_CTRL_DIS_CACHE_RD
533 | PCA200E_CTRL_DIS_WRT_INVAL
534 | PCA200E_CTRL_ENA_CONT_REQ_MODE
535 | PCA200E_CTRL_2_CACHE_WRT_INVAL
537 | PCA200E_CTRL_LARGE_PCI_BURSTS;
539 pci_write_config_byte(pci_dev, PCA200E_PCI_MASTER_CTRL, master_ctrl);
541 /* raise latency from 32 (default) to 192, as this seems to prevent NIC
542 lockups (under heavy rx loads) due to continuous 'FIFO OUT full' condition.
543 this may impact the performances of other PCI devices on the same bus, though */
545 pci_write_config_byte(pci_dev, PCI_LATENCY_TIMER, latency);
547 fore200e->state = FORE200E_STATE_CONFIGURE;
553 fore200e_pca_prom_read(struct fore200e* fore200e, struct prom_data* prom)
555 struct host_cmdq* cmdq = &fore200e->host_cmdq;
556 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
557 struct prom_opcode opcode;
561 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
563 opcode.opcode = OPCODE_GET_PROM;
566 prom_dma = dma_map_single(fore200e->dev, prom, sizeof(struct prom_data),
568 if (dma_mapping_error(fore200e->dev, prom_dma))
571 fore200e->bus->write(prom_dma, &entry->cp_entry->cmd.prom_block.prom_haddr);
573 *entry->status = STATUS_PENDING;
575 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.prom_block.opcode);
577 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
579 *entry->status = STATUS_FREE;
581 dma_unmap_single(fore200e->dev, prom_dma, sizeof(struct prom_data), DMA_FROM_DEVICE);
584 printk(FORE200E "unable to get PROM data from device %s\n", fore200e->name);
588 #if defined(__BIG_ENDIAN)
590 #define swap_here(addr) (*((u32*)(addr)) = swab32( *((u32*)(addr)) ))
592 /* MAC address is stored as little-endian */
593 swap_here(&prom->mac_addr[0]);
594 swap_here(&prom->mac_addr[4]);
602 fore200e_pca_proc_read(struct fore200e* fore200e, char *page)
604 struct pci_dev *pci_dev = to_pci_dev(fore200e->dev);
606 return sprintf(page, " PCI bus/slot/function:\t%d/%d/%d\n",
607 pci_dev->bus->number, PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn));
610 static const struct fore200e_bus fore200e_pci_ops = {
611 .model_name = "PCA-200E",
612 .proc_name = "pca200e",
613 .descr_alignment = 32,
614 .buffer_alignment = 4,
615 .status_alignment = 32,
616 .read = fore200e_pca_read,
617 .write = fore200e_pca_write,
618 .configure = fore200e_pca_configure,
619 .map = fore200e_pca_map,
620 .reset = fore200e_pca_reset,
621 .prom_read = fore200e_pca_prom_read,
622 .unmap = fore200e_pca_unmap,
623 .irq_check = fore200e_pca_irq_check,
624 .irq_ack = fore200e_pca_irq_ack,
625 .proc_read = fore200e_pca_proc_read,
627 #endif /* CONFIG_PCI */
631 static u32 fore200e_sba_read(volatile u32 __iomem *addr)
633 return sbus_readl(addr);
636 static void fore200e_sba_write(u32 val, volatile u32 __iomem *addr)
638 sbus_writel(val, addr);
641 static void fore200e_sba_irq_enable(struct fore200e *fore200e)
643 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
644 fore200e->bus->write(hcr | SBA200E_HCR_INTR_ENA, fore200e->regs.sba.hcr);
647 static int fore200e_sba_irq_check(struct fore200e *fore200e)
649 return fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_INTR_REQ;
652 static void fore200e_sba_irq_ack(struct fore200e *fore200e)
654 u32 hcr = fore200e->bus->read(fore200e->regs.sba.hcr) & SBA200E_HCR_STICKY;
655 fore200e->bus->write(hcr | SBA200E_HCR_INTR_CLR, fore200e->regs.sba.hcr);
658 static void fore200e_sba_reset(struct fore200e *fore200e)
660 fore200e->bus->write(SBA200E_HCR_RESET, fore200e->regs.sba.hcr);
662 fore200e->bus->write(0, fore200e->regs.sba.hcr);
665 static int __init fore200e_sba_map(struct fore200e *fore200e)
667 struct platform_device *op = to_platform_device(fore200e->dev);
670 /* gain access to the SBA specific registers */
671 fore200e->regs.sba.hcr = of_ioremap(&op->resource[0], 0, SBA200E_HCR_LENGTH, "SBA HCR");
672 fore200e->regs.sba.bsr = of_ioremap(&op->resource[1], 0, SBA200E_BSR_LENGTH, "SBA BSR");
673 fore200e->regs.sba.isr = of_ioremap(&op->resource[2], 0, SBA200E_ISR_LENGTH, "SBA ISR");
674 fore200e->virt_base = of_ioremap(&op->resource[3], 0, SBA200E_RAM_LENGTH, "SBA RAM");
676 if (!fore200e->virt_base) {
677 printk(FORE200E "unable to map RAM of device %s\n", fore200e->name);
681 DPRINTK(1, "device %s mapped to 0x%p\n", fore200e->name, fore200e->virt_base);
683 fore200e->bus->write(0x02, fore200e->regs.sba.isr); /* XXX hardwired interrupt level */
685 /* get the supported DVMA burst sizes */
686 bursts = of_getintprop_default(op->dev.of_node->parent, "burst-sizes", 0x00);
688 if (sbus_can_dma_64bit())
689 sbus_set_sbus64(&op->dev, bursts);
691 fore200e->state = FORE200E_STATE_MAP;
695 static void fore200e_sba_unmap(struct fore200e *fore200e)
697 struct platform_device *op = to_platform_device(fore200e->dev);
699 of_iounmap(&op->resource[0], fore200e->regs.sba.hcr, SBA200E_HCR_LENGTH);
700 of_iounmap(&op->resource[1], fore200e->regs.sba.bsr, SBA200E_BSR_LENGTH);
701 of_iounmap(&op->resource[2], fore200e->regs.sba.isr, SBA200E_ISR_LENGTH);
702 of_iounmap(&op->resource[3], fore200e->virt_base, SBA200E_RAM_LENGTH);
705 static int __init fore200e_sba_configure(struct fore200e *fore200e)
707 fore200e->state = FORE200E_STATE_CONFIGURE;
711 static int __init fore200e_sba_prom_read(struct fore200e *fore200e, struct prom_data *prom)
713 struct platform_device *op = to_platform_device(fore200e->dev);
717 prop = of_get_property(op->dev.of_node, "madaddrlo2", &len);
720 memcpy(&prom->mac_addr[4], prop, 4);
722 prop = of_get_property(op->dev.of_node, "madaddrhi4", &len);
725 memcpy(&prom->mac_addr[2], prop, 4);
727 prom->serial_number = of_getintprop_default(op->dev.of_node,
729 prom->hw_revision = of_getintprop_default(op->dev.of_node,
735 static int fore200e_sba_proc_read(struct fore200e *fore200e, char *page)
737 struct platform_device *op = to_platform_device(fore200e->dev);
738 const struct linux_prom_registers *regs;
740 regs = of_get_property(op->dev.of_node, "reg", NULL);
742 return sprintf(page, " SBUS slot/device:\t\t%d/'%pOFn'\n",
743 (regs ? regs->which_io : 0), op->dev.of_node);
746 static const struct fore200e_bus fore200e_sbus_ops = {
747 .model_name = "SBA-200E",
748 .proc_name = "sba200e",
749 .descr_alignment = 32,
750 .buffer_alignment = 64,
751 .status_alignment = 32,
752 .read = fore200e_sba_read,
753 .write = fore200e_sba_write,
754 .configure = fore200e_sba_configure,
755 .map = fore200e_sba_map,
756 .reset = fore200e_sba_reset,
757 .prom_read = fore200e_sba_prom_read,
758 .unmap = fore200e_sba_unmap,
759 .irq_enable = fore200e_sba_irq_enable,
760 .irq_check = fore200e_sba_irq_check,
761 .irq_ack = fore200e_sba_irq_ack,
762 .proc_read = fore200e_sba_proc_read,
764 #endif /* CONFIG_SBUS */
767 fore200e_tx_irq(struct fore200e* fore200e)
769 struct host_txq* txq = &fore200e->host_txq;
770 struct host_txq_entry* entry;
772 struct fore200e_vc_map* vc_map;
774 if (fore200e->host_txq.txing == 0)
779 entry = &txq->host_entry[ txq->tail ];
781 if ((*entry->status & STATUS_COMPLETE) == 0) {
785 DPRINTK(3, "TX COMPLETED: entry = %p [tail = %d], vc_map = %p, skb = %p\n",
786 entry, txq->tail, entry->vc_map, entry->skb);
788 /* free copy of misaligned data */
791 /* remove DMA mapping */
792 dma_unmap_single(fore200e->dev, entry->tpd->tsd[ 0 ].buffer, entry->tpd->tsd[ 0 ].length,
795 vc_map = entry->vc_map;
797 /* vcc closed since the time the entry was submitted for tx? */
798 if ((vc_map->vcc == NULL) ||
799 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
801 DPRINTK(1, "no ready vcc found for PDU sent on device %d\n",
802 fore200e->atm_dev->number);
804 dev_kfree_skb_any(entry->skb);
809 /* vcc closed then immediately re-opened? */
810 if (vc_map->incarn != entry->incarn) {
812 /* when a vcc is closed, some PDUs may be still pending in the tx queue.
813 if the same vcc is immediately re-opened, those pending PDUs must
814 not be popped after the completion of their emission, as they refer
815 to the prior incarnation of that vcc. otherwise, sk_atm(vcc)->sk_wmem_alloc
816 would be decremented by the size of the (unrelated) skb, possibly
817 leading to a negative sk->sk_wmem_alloc count, ultimately freezing the vcc.
818 we thus bind the tx entry to the current incarnation of the vcc
819 when the entry is submitted for tx. When the tx later completes,
820 if the incarnation number of the tx entry does not match the one
821 of the vcc, then this implies that the vcc has been closed then re-opened.
822 we thus just drop the skb here. */
824 DPRINTK(1, "vcc closed-then-re-opened; dropping PDU sent on device %d\n",
825 fore200e->atm_dev->number);
827 dev_kfree_skb_any(entry->skb);
833 /* notify tx completion */
835 vcc->pop(vcc, entry->skb);
838 dev_kfree_skb_any(entry->skb);
841 /* check error condition */
842 if (*entry->status & STATUS_ERROR)
843 atomic_inc(&vcc->stats->tx_err);
845 atomic_inc(&vcc->stats->tx);
849 *entry->status = STATUS_FREE;
851 fore200e->host_txq.txing--;
853 FORE200E_NEXT_ENTRY(txq->tail, QUEUE_SIZE_TX);
858 #ifdef FORE200E_BSQ_DEBUG
859 int bsq_audit(int where, struct host_bsq* bsq, int scheme, int magn)
861 struct buffer* buffer;
864 buffer = bsq->freebuf;
867 if (buffer->supplied) {
868 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld supplied but in free list!\n",
869 where, scheme, magn, buffer->index);
872 if (buffer->magn != magn) {
873 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected magn = %d\n",
874 where, scheme, magn, buffer->index, buffer->magn);
877 if (buffer->scheme != scheme) {
878 printk(FORE200E "bsq_audit(%d): queue %d.%d, buffer %ld, unexpected scheme = %d\n",
879 where, scheme, magn, buffer->index, buffer->scheme);
882 if ((buffer->index < 0) || (buffer->index >= fore200e_rx_buf_nbr[ scheme ][ magn ])) {
883 printk(FORE200E "bsq_audit(%d): queue %d.%d, out of range buffer index = %ld !\n",
884 where, scheme, magn, buffer->index);
888 buffer = buffer->next;
891 if (count != bsq->freebuf_count) {
892 printk(FORE200E "bsq_audit(%d): queue %d.%d, %d bufs in free list, but freebuf_count = %d\n",
893 where, scheme, magn, count, bsq->freebuf_count);
901 fore200e_supply(struct fore200e* fore200e)
905 struct host_bsq* bsq;
906 struct host_bsq_entry* entry;
907 struct buffer* buffer;
909 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
910 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
912 bsq = &fore200e->host_bsq[ scheme ][ magn ];
914 #ifdef FORE200E_BSQ_DEBUG
915 bsq_audit(1, bsq, scheme, magn);
917 while (bsq->freebuf_count >= RBD_BLK_SIZE) {
919 DPRINTK(2, "supplying %d rx buffers to queue %d / %d, freebuf_count = %d\n",
920 RBD_BLK_SIZE, scheme, magn, bsq->freebuf_count);
922 entry = &bsq->host_entry[ bsq->head ];
924 for (i = 0; i < RBD_BLK_SIZE; i++) {
926 /* take the first buffer in the free buffer list */
927 buffer = bsq->freebuf;
929 printk(FORE200E "no more free bufs in queue %d.%d, but freebuf_count = %d\n",
930 scheme, magn, bsq->freebuf_count);
933 bsq->freebuf = buffer->next;
935 #ifdef FORE200E_BSQ_DEBUG
936 if (buffer->supplied)
937 printk(FORE200E "queue %d.%d, buffer %lu already supplied\n",
938 scheme, magn, buffer->index);
939 buffer->supplied = 1;
941 entry->rbd_block->rbd[ i ].buffer_haddr = buffer->data.dma_addr;
942 entry->rbd_block->rbd[ i ].handle = FORE200E_BUF2HDL(buffer);
945 FORE200E_NEXT_ENTRY(bsq->head, QUEUE_SIZE_BS);
947 /* decrease accordingly the number of free rx buffers */
948 bsq->freebuf_count -= RBD_BLK_SIZE;
950 *entry->status = STATUS_PENDING;
951 fore200e->bus->write(entry->rbd_block_dma, &entry->cp_entry->rbd_block_haddr);
959 fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rpd)
962 struct buffer* buffer;
963 struct fore200e_vcc* fore200e_vcc;
965 #ifdef FORE200E_52BYTE_AAL0_SDU
971 fore200e_vcc = FORE200E_VCC(vcc);
972 ASSERT(fore200e_vcc);
974 #ifdef FORE200E_52BYTE_AAL0_SDU
975 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.rxtp.max_sdu == ATM_AAL0_SDU)) {
977 cell_header = (rpd->atm_header.gfc << ATM_HDR_GFC_SHIFT) |
978 (rpd->atm_header.vpi << ATM_HDR_VPI_SHIFT) |
979 (rpd->atm_header.vci << ATM_HDR_VCI_SHIFT) |
980 (rpd->atm_header.plt << ATM_HDR_PTI_SHIFT) |
986 /* compute total PDU length */
987 for (i = 0; i < rpd->nseg; i++)
988 pdu_len += rpd->rsd[ i ].length;
990 skb = alloc_skb(pdu_len, GFP_ATOMIC);
992 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
994 atomic_inc(&vcc->stats->rx_drop);
998 __net_timestamp(skb);
1000 #ifdef FORE200E_52BYTE_AAL0_SDU
1002 *((u32*)skb_put(skb, 4)) = cell_header;
1006 /* reassemble segments */
1007 for (i = 0; i < rpd->nseg; i++) {
1009 /* rebuild rx buffer address from rsd handle */
1010 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1012 /* Make device DMA transfer visible to CPU. */
1013 dma_sync_single_for_cpu(fore200e->dev, buffer->data.dma_addr,
1014 rpd->rsd[i].length, DMA_FROM_DEVICE);
1016 skb_put_data(skb, buffer->data.align_addr, rpd->rsd[i].length);
1018 /* Now let the device get at it again. */
1019 dma_sync_single_for_device(fore200e->dev, buffer->data.dma_addr,
1020 rpd->rsd[i].length, DMA_FROM_DEVICE);
1023 DPRINTK(3, "rx skb: len = %d, truesize = %d\n", skb->len, skb->truesize);
1025 if (pdu_len < fore200e_vcc->rx_min_pdu)
1026 fore200e_vcc->rx_min_pdu = pdu_len;
1027 if (pdu_len > fore200e_vcc->rx_max_pdu)
1028 fore200e_vcc->rx_max_pdu = pdu_len;
1029 fore200e_vcc->rx_pdu++;
1032 if (atm_charge(vcc, skb->truesize) == 0) {
1034 DPRINTK(2, "receive buffers saturated for %d.%d.%d - PDU dropped\n",
1035 vcc->itf, vcc->vpi, vcc->vci);
1037 dev_kfree_skb_any(skb);
1039 atomic_inc(&vcc->stats->rx_drop);
1043 vcc->push(vcc, skb);
1044 atomic_inc(&vcc->stats->rx);
1051 fore200e_collect_rpd(struct fore200e* fore200e, struct rpd* rpd)
1053 struct host_bsq* bsq;
1054 struct buffer* buffer;
1057 for (i = 0; i < rpd->nseg; i++) {
1059 /* rebuild rx buffer address from rsd handle */
1060 buffer = FORE200E_HDL2BUF(rpd->rsd[ i ].handle);
1062 bsq = &fore200e->host_bsq[ buffer->scheme ][ buffer->magn ];
1064 #ifdef FORE200E_BSQ_DEBUG
1065 bsq_audit(2, bsq, buffer->scheme, buffer->magn);
1067 if (buffer->supplied == 0)
1068 printk(FORE200E "queue %d.%d, buffer %ld was not supplied\n",
1069 buffer->scheme, buffer->magn, buffer->index);
1070 buffer->supplied = 0;
1073 /* re-insert the buffer into the free buffer list */
1074 buffer->next = bsq->freebuf;
1075 bsq->freebuf = buffer;
1077 /* then increment the number of free rx buffers */
1078 bsq->freebuf_count++;
1084 fore200e_rx_irq(struct fore200e* fore200e)
1086 struct host_rxq* rxq = &fore200e->host_rxq;
1087 struct host_rxq_entry* entry;
1088 struct atm_vcc* vcc;
1089 struct fore200e_vc_map* vc_map;
1093 entry = &rxq->host_entry[ rxq->head ];
1095 /* no more received PDUs */
1096 if ((*entry->status & STATUS_COMPLETE) == 0)
1099 vc_map = FORE200E_VC_MAP(fore200e, entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1101 if ((vc_map->vcc == NULL) ||
1102 (test_bit(ATM_VF_READY, &vc_map->vcc->flags) == 0)) {
1104 DPRINTK(1, "no ready VC found for PDU received on %d.%d.%d\n",
1105 fore200e->atm_dev->number,
1106 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1112 if ((*entry->status & STATUS_ERROR) == 0) {
1114 fore200e_push_rpd(fore200e, vcc, entry->rpd);
1117 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
1118 fore200e->atm_dev->number,
1119 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
1120 atomic_inc(&vcc->stats->rx_err);
1124 FORE200E_NEXT_ENTRY(rxq->head, QUEUE_SIZE_RX);
1126 fore200e_collect_rpd(fore200e, entry->rpd);
1128 /* rewrite the rpd address to ack the received PDU */
1129 fore200e->bus->write(entry->rpd_dma, &entry->cp_entry->rpd_haddr);
1130 *entry->status = STATUS_FREE;
1132 fore200e_supply(fore200e);
1137 #ifndef FORE200E_USE_TASKLET
1139 fore200e_irq(struct fore200e* fore200e)
1141 unsigned long flags;
1143 spin_lock_irqsave(&fore200e->q_lock, flags);
1144 fore200e_rx_irq(fore200e);
1145 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1147 spin_lock_irqsave(&fore200e->q_lock, flags);
1148 fore200e_tx_irq(fore200e);
1149 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1155 fore200e_interrupt(int irq, void* dev)
1157 struct fore200e* fore200e = FORE200E_DEV((struct atm_dev*)dev);
1159 if (fore200e->bus->irq_check(fore200e) == 0) {
1161 DPRINTK(3, "interrupt NOT triggered by device %d\n", fore200e->atm_dev->number);
1164 DPRINTK(3, "interrupt triggered by device %d\n", fore200e->atm_dev->number);
1166 #ifdef FORE200E_USE_TASKLET
1167 tasklet_schedule(&fore200e->tx_tasklet);
1168 tasklet_schedule(&fore200e->rx_tasklet);
1170 fore200e_irq(fore200e);
1173 fore200e->bus->irq_ack(fore200e);
1178 #ifdef FORE200E_USE_TASKLET
1180 fore200e_tx_tasklet(unsigned long data)
1182 struct fore200e* fore200e = (struct fore200e*) data;
1183 unsigned long flags;
1185 DPRINTK(3, "tx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1187 spin_lock_irqsave(&fore200e->q_lock, flags);
1188 fore200e_tx_irq(fore200e);
1189 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1194 fore200e_rx_tasklet(unsigned long data)
1196 struct fore200e* fore200e = (struct fore200e*) data;
1197 unsigned long flags;
1199 DPRINTK(3, "rx tasklet scheduled for device %d\n", fore200e->atm_dev->number);
1201 spin_lock_irqsave(&fore200e->q_lock, flags);
1202 fore200e_rx_irq((struct fore200e*) data);
1203 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1209 fore200e_select_scheme(struct atm_vcc* vcc)
1211 /* fairly balance the VCs over (identical) buffer schemes */
1212 int scheme = vcc->vci % 2 ? BUFFER_SCHEME_ONE : BUFFER_SCHEME_TWO;
1214 DPRINTK(1, "VC %d.%d.%d uses buffer scheme %d\n",
1215 vcc->itf, vcc->vpi, vcc->vci, scheme);
1222 fore200e_activate_vcin(struct fore200e* fore200e, int activate, struct atm_vcc* vcc, int mtu)
1224 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1225 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1226 struct activate_opcode activ_opcode;
1227 struct deactivate_opcode deactiv_opcode;
1230 enum fore200e_aal aal = fore200e_atm2fore_aal(vcc->qos.aal);
1232 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1235 FORE200E_VCC(vcc)->scheme = fore200e_select_scheme(vcc);
1237 activ_opcode.opcode = OPCODE_ACTIVATE_VCIN;
1238 activ_opcode.aal = aal;
1239 activ_opcode.scheme = FORE200E_VCC(vcc)->scheme;
1240 activ_opcode.pad = 0;
1243 deactiv_opcode.opcode = OPCODE_DEACTIVATE_VCIN;
1244 deactiv_opcode.pad = 0;
1247 vpvc.vci = vcc->vci;
1248 vpvc.vpi = vcc->vpi;
1250 *entry->status = STATUS_PENDING;
1254 #ifdef FORE200E_52BYTE_AAL0_SDU
1257 /* the MTU is not used by the cp, except in the case of AAL0 */
1258 fore200e->bus->write(mtu, &entry->cp_entry->cmd.activate_block.mtu);
1259 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.vpvc);
1260 fore200e->bus->write(*(u32*)&activ_opcode, (u32 __iomem *)&entry->cp_entry->cmd.activate_block.opcode);
1263 fore200e->bus->write(*(u32*)&vpvc, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.vpvc);
1264 fore200e->bus->write(*(u32*)&deactiv_opcode, (u32 __iomem *)&entry->cp_entry->cmd.deactivate_block.opcode);
1267 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1269 *entry->status = STATUS_FREE;
1272 printk(FORE200E "unable to %s VC %d.%d.%d\n",
1273 activate ? "open" : "close", vcc->itf, vcc->vpi, vcc->vci);
1277 DPRINTK(1, "VC %d.%d.%d %sed\n", vcc->itf, vcc->vpi, vcc->vci,
1278 activate ? "open" : "clos");
1284 #define FORE200E_MAX_BACK2BACK_CELLS 255 /* XXX depends on CDVT */
1287 fore200e_rate_ctrl(struct atm_qos* qos, struct tpd_rate* rate)
1289 if (qos->txtp.max_pcr < ATM_OC3_PCR) {
1291 /* compute the data cells to idle cells ratio from the tx PCR */
1292 rate->data_cells = qos->txtp.max_pcr * FORE200E_MAX_BACK2BACK_CELLS / ATM_OC3_PCR;
1293 rate->idle_cells = FORE200E_MAX_BACK2BACK_CELLS - rate->data_cells;
1296 /* disable rate control */
1297 rate->data_cells = rate->idle_cells = 0;
1303 fore200e_open(struct atm_vcc *vcc)
1305 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1306 struct fore200e_vcc* fore200e_vcc;
1307 struct fore200e_vc_map* vc_map;
1308 unsigned long flags;
1310 short vpi = vcc->vpi;
1312 ASSERT((vpi >= 0) && (vpi < 1<<FORE200E_VPI_BITS));
1313 ASSERT((vci >= 0) && (vci < 1<<FORE200E_VCI_BITS));
1315 spin_lock_irqsave(&fore200e->q_lock, flags);
1317 vc_map = FORE200E_VC_MAP(fore200e, vpi, vci);
1320 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1322 printk(FORE200E "VC %d.%d.%d already in use\n",
1323 fore200e->atm_dev->number, vpi, vci);
1330 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1332 fore200e_vcc = kzalloc(sizeof(struct fore200e_vcc), GFP_ATOMIC);
1333 if (fore200e_vcc == NULL) {
1338 DPRINTK(2, "opening %d.%d.%d:%d QoS = (tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1339 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d)\n",
1340 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1341 fore200e_traffic_class[ vcc->qos.txtp.traffic_class ],
1342 vcc->qos.txtp.min_pcr, vcc->qos.txtp.max_pcr, vcc->qos.txtp.max_cdv, vcc->qos.txtp.max_sdu,
1343 fore200e_traffic_class[ vcc->qos.rxtp.traffic_class ],
1344 vcc->qos.rxtp.min_pcr, vcc->qos.rxtp.max_pcr, vcc->qos.rxtp.max_cdv, vcc->qos.rxtp.max_sdu);
1346 /* pseudo-CBR bandwidth requested? */
1347 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1349 mutex_lock(&fore200e->rate_mtx);
1350 if (fore200e->available_cell_rate < vcc->qos.txtp.max_pcr) {
1351 mutex_unlock(&fore200e->rate_mtx);
1353 kfree(fore200e_vcc);
1358 /* reserve bandwidth */
1359 fore200e->available_cell_rate -= vcc->qos.txtp.max_pcr;
1360 mutex_unlock(&fore200e->rate_mtx);
1363 vcc->itf = vcc->dev->number;
1365 set_bit(ATM_VF_PARTIAL,&vcc->flags);
1366 set_bit(ATM_VF_ADDR, &vcc->flags);
1368 vcc->dev_data = fore200e_vcc;
1370 if (fore200e_activate_vcin(fore200e, 1, vcc, vcc->qos.rxtp.max_sdu) < 0) {
1374 clear_bit(ATM_VF_ADDR, &vcc->flags);
1375 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1377 vcc->dev_data = NULL;
1379 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1381 kfree(fore200e_vcc);
1385 /* compute rate control parameters */
1386 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1388 fore200e_rate_ctrl(&vcc->qos, &fore200e_vcc->rate);
1389 set_bit(ATM_VF_HASQOS, &vcc->flags);
1391 DPRINTK(3, "tx on %d.%d.%d:%d, tx PCR = %d, rx PCR = %d, data_cells = %u, idle_cells = %u\n",
1392 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1393 vcc->qos.txtp.max_pcr, vcc->qos.rxtp.max_pcr,
1394 fore200e_vcc->rate.data_cells, fore200e_vcc->rate.idle_cells);
1397 fore200e_vcc->tx_min_pdu = fore200e_vcc->rx_min_pdu = MAX_PDU_SIZE + 1;
1398 fore200e_vcc->tx_max_pdu = fore200e_vcc->rx_max_pdu = 0;
1399 fore200e_vcc->tx_pdu = fore200e_vcc->rx_pdu = 0;
1401 /* new incarnation of the vcc */
1402 vc_map->incarn = ++fore200e->incarn_count;
1404 /* VC unusable before this flag is set */
1405 set_bit(ATM_VF_READY, &vcc->flags);
1412 fore200e_close(struct atm_vcc* vcc)
1414 struct fore200e_vcc* fore200e_vcc;
1415 struct fore200e* fore200e;
1416 struct fore200e_vc_map* vc_map;
1417 unsigned long flags;
1420 fore200e = FORE200E_DEV(vcc->dev);
1422 ASSERT((vcc->vpi >= 0) && (vcc->vpi < 1<<FORE200E_VPI_BITS));
1423 ASSERT((vcc->vci >= 0) && (vcc->vci < 1<<FORE200E_VCI_BITS));
1425 DPRINTK(2, "closing %d.%d.%d:%d\n", vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal));
1427 clear_bit(ATM_VF_READY, &vcc->flags);
1429 fore200e_activate_vcin(fore200e, 0, vcc, 0);
1431 spin_lock_irqsave(&fore200e->q_lock, flags);
1433 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1435 /* the vc is no longer considered as "in use" by fore200e_open() */
1438 vcc->itf = vcc->vci = vcc->vpi = 0;
1440 fore200e_vcc = FORE200E_VCC(vcc);
1441 vcc->dev_data = NULL;
1443 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1445 /* release reserved bandwidth, if any */
1446 if ((vcc->qos.txtp.traffic_class == ATM_CBR) && (vcc->qos.txtp.max_pcr > 0)) {
1448 mutex_lock(&fore200e->rate_mtx);
1449 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1450 mutex_unlock(&fore200e->rate_mtx);
1452 clear_bit(ATM_VF_HASQOS, &vcc->flags);
1455 clear_bit(ATM_VF_ADDR, &vcc->flags);
1456 clear_bit(ATM_VF_PARTIAL,&vcc->flags);
1458 ASSERT(fore200e_vcc);
1459 kfree(fore200e_vcc);
1464 fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
1466 struct fore200e* fore200e;
1467 struct fore200e_vcc* fore200e_vcc;
1468 struct fore200e_vc_map* vc_map;
1469 struct host_txq* txq;
1470 struct host_txq_entry* entry;
1472 struct tpd_haddr tpd_haddr;
1473 int retry = CONFIG_ATM_FORE200E_TX_RETRY;
1475 int tx_len = skb->len;
1476 u32* cell_header = NULL;
1477 unsigned char* skb_data;
1479 unsigned char* data;
1480 unsigned long flags;
1485 fore200e = FORE200E_DEV(vcc->dev);
1486 fore200e_vcc = FORE200E_VCC(vcc);
1491 txq = &fore200e->host_txq;
1495 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1496 DPRINTK(1, "VC %d.%d.%d not ready for tx\n", vcc->itf, vcc->vpi, vcc->vpi);
1497 dev_kfree_skb_any(skb);
1501 #ifdef FORE200E_52BYTE_AAL0_SDU
1502 if ((vcc->qos.aal == ATM_AAL0) && (vcc->qos.txtp.max_sdu == ATM_AAL0_SDU)) {
1503 cell_header = (u32*) skb->data;
1504 skb_data = skb->data + 4; /* skip 4-byte cell header */
1505 skb_len = tx_len = skb->len - 4;
1507 DPRINTK(3, "user-supplied cell header = 0x%08x\n", *cell_header);
1512 skb_data = skb->data;
1516 if (((unsigned long)skb_data) & 0x3) {
1518 DPRINTK(2, "misaligned tx PDU on device %s\n", fore200e->name);
1523 if ((vcc->qos.aal == ATM_AAL0) && (skb_len % ATM_CELL_PAYLOAD)) {
1525 /* this simply NUKES the PCA board */
1526 DPRINTK(2, "incomplete tx AAL0 PDU on device %s\n", fore200e->name);
1528 tx_len = ((skb_len / ATM_CELL_PAYLOAD) + 1) * ATM_CELL_PAYLOAD;
1532 data = kmalloc(tx_len, GFP_ATOMIC);
1538 dev_kfree_skb_any(skb);
1543 memcpy(data, skb_data, skb_len);
1544 if (skb_len < tx_len)
1545 memset(data + skb_len, 0x00, tx_len - skb_len);
1551 vc_map = FORE200E_VC_MAP(fore200e, vcc->vpi, vcc->vci);
1552 ASSERT(vc_map->vcc == vcc);
1556 spin_lock_irqsave(&fore200e->q_lock, flags);
1558 entry = &txq->host_entry[ txq->head ];
1560 if ((*entry->status != STATUS_FREE) || (txq->txing >= QUEUE_SIZE_TX - 2)) {
1562 /* try to free completed tx queue entries */
1563 fore200e_tx_irq(fore200e);
1565 if (*entry->status != STATUS_FREE) {
1567 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1569 /* retry once again? */
1575 atomic_inc(&vcc->stats->tx_err);
1578 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
1579 fore200e->name, fore200e->cp_queues->heartbeat);
1584 dev_kfree_skb_any(skb);
1594 entry->incarn = vc_map->incarn;
1595 entry->vc_map = vc_map;
1597 entry->data = tx_copy ? data : NULL;
1600 tpd->tsd[ 0 ].buffer = dma_map_single(fore200e->dev, data, tx_len,
1602 if (dma_mapping_error(fore200e->dev, tpd->tsd[0].buffer)) {
1605 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1608 tpd->tsd[ 0 ].length = tx_len;
1610 FORE200E_NEXT_ENTRY(txq->head, QUEUE_SIZE_TX);
1613 /* The dma_map call above implies a dma_sync so the device can use it,
1614 * thus no explicit dma_sync call is necessary here.
1617 DPRINTK(3, "tx on %d.%d.%d:%d, len = %u (%u)\n",
1618 vcc->itf, vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
1619 tpd->tsd[0].length, skb_len);
1621 if (skb_len < fore200e_vcc->tx_min_pdu)
1622 fore200e_vcc->tx_min_pdu = skb_len;
1623 if (skb_len > fore200e_vcc->tx_max_pdu)
1624 fore200e_vcc->tx_max_pdu = skb_len;
1625 fore200e_vcc->tx_pdu++;
1627 /* set tx rate control information */
1628 tpd->rate.data_cells = fore200e_vcc->rate.data_cells;
1629 tpd->rate.idle_cells = fore200e_vcc->rate.idle_cells;
1632 tpd->atm_header.clp = (*cell_header & ATM_HDR_CLP);
1633 tpd->atm_header.plt = (*cell_header & ATM_HDR_PTI_MASK) >> ATM_HDR_PTI_SHIFT;
1634 tpd->atm_header.vci = (*cell_header & ATM_HDR_VCI_MASK) >> ATM_HDR_VCI_SHIFT;
1635 tpd->atm_header.vpi = (*cell_header & ATM_HDR_VPI_MASK) >> ATM_HDR_VPI_SHIFT;
1636 tpd->atm_header.gfc = (*cell_header & ATM_HDR_GFC_MASK) >> ATM_HDR_GFC_SHIFT;
1639 /* set the ATM header, common to all cells conveying the PDU */
1640 tpd->atm_header.clp = 0;
1641 tpd->atm_header.plt = 0;
1642 tpd->atm_header.vci = vcc->vci;
1643 tpd->atm_header.vpi = vcc->vpi;
1644 tpd->atm_header.gfc = 0;
1647 tpd->spec.length = tx_len;
1649 tpd->spec.aal = fore200e_atm2fore_aal(vcc->qos.aal);
1652 tpd_haddr.size = sizeof(struct tpd) / (1<<TPD_HADDR_SHIFT); /* size is expressed in 32 byte blocks */
1654 tpd_haddr.haddr = entry->tpd_dma >> TPD_HADDR_SHIFT; /* shift the address, as we are in a bitfield */
1656 *entry->status = STATUS_PENDING;
1657 fore200e->bus->write(*(u32*)&tpd_haddr, (u32 __iomem *)&entry->cp_entry->tpd_haddr);
1659 spin_unlock_irqrestore(&fore200e->q_lock, flags);
1666 fore200e_getstats(struct fore200e* fore200e)
1668 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1669 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1670 struct stats_opcode opcode;
1674 if (fore200e->stats == NULL) {
1675 fore200e->stats = kzalloc(sizeof(struct stats), GFP_KERNEL);
1676 if (fore200e->stats == NULL)
1680 stats_dma_addr = dma_map_single(fore200e->dev, fore200e->stats,
1681 sizeof(struct stats), DMA_FROM_DEVICE);
1682 if (dma_mapping_error(fore200e->dev, stats_dma_addr))
1685 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1687 opcode.opcode = OPCODE_GET_STATS;
1690 fore200e->bus->write(stats_dma_addr, &entry->cp_entry->cmd.stats_block.stats_haddr);
1692 *entry->status = STATUS_PENDING;
1694 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.stats_block.opcode);
1696 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1698 *entry->status = STATUS_FREE;
1700 dma_unmap_single(fore200e->dev, stats_dma_addr, sizeof(struct stats), DMA_FROM_DEVICE);
1703 printk(FORE200E "unable to get statistics from device %s\n", fore200e->name);
1710 #if 0 /* currently unused */
1712 fore200e_get_oc3(struct fore200e* fore200e, struct oc3_regs* regs)
1714 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1715 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1716 struct oc3_opcode opcode;
1718 u32 oc3_regs_dma_addr;
1720 oc3_regs_dma_addr = fore200e->bus->dma_map(fore200e, regs, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1722 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1724 opcode.opcode = OPCODE_GET_OC3;
1729 fore200e->bus->write(oc3_regs_dma_addr, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1731 *entry->status = STATUS_PENDING;
1733 fore200e->bus->write(*(u32*)&opcode, (u32*)&entry->cp_entry->cmd.oc3_block.opcode);
1735 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1737 *entry->status = STATUS_FREE;
1739 fore200e->bus->dma_unmap(fore200e, oc3_regs_dma_addr, sizeof(struct oc3_regs), DMA_FROM_DEVICE);
1742 printk(FORE200E "unable to get OC-3 regs of device %s\n", fore200e->name);
1752 fore200e_set_oc3(struct fore200e* fore200e, u32 reg, u32 value, u32 mask)
1754 struct host_cmdq* cmdq = &fore200e->host_cmdq;
1755 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ];
1756 struct oc3_opcode opcode;
1759 DPRINTK(2, "set OC-3 reg = 0x%02x, value = 0x%02x, mask = 0x%02x\n", reg, value, mask);
1761 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD);
1763 opcode.opcode = OPCODE_SET_OC3;
1765 opcode.value = value;
1768 fore200e->bus->write(0, &entry->cp_entry->cmd.oc3_block.regs_haddr);
1770 *entry->status = STATUS_PENDING;
1772 fore200e->bus->write(*(u32*)&opcode, (u32 __iomem *)&entry->cp_entry->cmd.oc3_block.opcode);
1774 ok = fore200e_poll(fore200e, entry->status, STATUS_COMPLETE, 400);
1776 *entry->status = STATUS_FREE;
1779 printk(FORE200E "unable to set OC-3 reg 0x%02x of device %s\n", reg, fore200e->name);
1788 fore200e_setloop(struct fore200e* fore200e, int loop_mode)
1790 u32 mct_value, mct_mask;
1793 if (!capable(CAP_NET_ADMIN))
1796 switch (loop_mode) {
1800 mct_mask = SUNI_MCT_DLE | SUNI_MCT_LLE;
1803 case ATM_LM_LOC_PHY:
1804 mct_value = mct_mask = SUNI_MCT_DLE;
1807 case ATM_LM_RMT_PHY:
1808 mct_value = mct_mask = SUNI_MCT_LLE;
1815 error = fore200e_set_oc3(fore200e, SUNI_MCT, mct_value, mct_mask);
1817 fore200e->loop_mode = loop_mode;
1824 fore200e_fetch_stats(struct fore200e* fore200e, struct sonet_stats __user *arg)
1826 struct sonet_stats tmp;
1828 if (fore200e_getstats(fore200e) < 0)
1831 tmp.section_bip = be32_to_cpu(fore200e->stats->oc3.section_bip8_errors);
1832 tmp.line_bip = be32_to_cpu(fore200e->stats->oc3.line_bip24_errors);
1833 tmp.path_bip = be32_to_cpu(fore200e->stats->oc3.path_bip8_errors);
1834 tmp.line_febe = be32_to_cpu(fore200e->stats->oc3.line_febe_errors);
1835 tmp.path_febe = be32_to_cpu(fore200e->stats->oc3.path_febe_errors);
1836 tmp.corr_hcs = be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors);
1837 tmp.uncorr_hcs = be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors);
1838 tmp.tx_cells = be32_to_cpu(fore200e->stats->aal0.cells_transmitted) +
1839 be32_to_cpu(fore200e->stats->aal34.cells_transmitted) +
1840 be32_to_cpu(fore200e->stats->aal5.cells_transmitted);
1841 tmp.rx_cells = be32_to_cpu(fore200e->stats->aal0.cells_received) +
1842 be32_to_cpu(fore200e->stats->aal34.cells_received) +
1843 be32_to_cpu(fore200e->stats->aal5.cells_received);
1846 return copy_to_user(arg, &tmp, sizeof(struct sonet_stats)) ? -EFAULT : 0;
1853 fore200e_ioctl(struct atm_dev* dev, unsigned int cmd, void __user * arg)
1855 struct fore200e* fore200e = FORE200E_DEV(dev);
1857 DPRINTK(2, "ioctl cmd = 0x%x (%u), arg = 0x%p (%lu)\n", cmd, cmd, arg, (unsigned long)arg);
1862 return fore200e_fetch_stats(fore200e, (struct sonet_stats __user *)arg);
1865 return put_user(0, (int __user *)arg) ? -EFAULT : 0;
1868 return fore200e_setloop(fore200e, (int)(unsigned long)arg);
1871 return put_user(fore200e->loop_mode, (int __user *)arg) ? -EFAULT : 0;
1874 return put_user(ATM_LM_LOC_PHY | ATM_LM_RMT_PHY, (int __user *)arg) ? -EFAULT : 0;
1877 return -ENOSYS; /* not implemented */
1882 fore200e_change_qos(struct atm_vcc* vcc,struct atm_qos* qos, int flags)
1884 struct fore200e_vcc* fore200e_vcc = FORE200E_VCC(vcc);
1885 struct fore200e* fore200e = FORE200E_DEV(vcc->dev);
1887 if (!test_bit(ATM_VF_READY, &vcc->flags)) {
1888 DPRINTK(1, "VC %d.%d.%d not ready for QoS change\n", vcc->itf, vcc->vpi, vcc->vpi);
1892 DPRINTK(2, "change_qos %d.%d.%d, "
1893 "(tx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d; "
1894 "rx: cl=%s, pcr=%d-%d, cdv=%d, max_sdu=%d), flags = 0x%x\n"
1895 "available_cell_rate = %u",
1896 vcc->itf, vcc->vpi, vcc->vci,
1897 fore200e_traffic_class[ qos->txtp.traffic_class ],
1898 qos->txtp.min_pcr, qos->txtp.max_pcr, qos->txtp.max_cdv, qos->txtp.max_sdu,
1899 fore200e_traffic_class[ qos->rxtp.traffic_class ],
1900 qos->rxtp.min_pcr, qos->rxtp.max_pcr, qos->rxtp.max_cdv, qos->rxtp.max_sdu,
1901 flags, fore200e->available_cell_rate);
1903 if ((qos->txtp.traffic_class == ATM_CBR) && (qos->txtp.max_pcr > 0)) {
1905 mutex_lock(&fore200e->rate_mtx);
1906 if (fore200e->available_cell_rate + vcc->qos.txtp.max_pcr < qos->txtp.max_pcr) {
1907 mutex_unlock(&fore200e->rate_mtx);
1911 fore200e->available_cell_rate += vcc->qos.txtp.max_pcr;
1912 fore200e->available_cell_rate -= qos->txtp.max_pcr;
1914 mutex_unlock(&fore200e->rate_mtx);
1916 memcpy(&vcc->qos, qos, sizeof(struct atm_qos));
1918 /* update rate control parameters */
1919 fore200e_rate_ctrl(qos, &fore200e_vcc->rate);
1921 set_bit(ATM_VF_HASQOS, &vcc->flags);
1930 static int fore200e_irq_request(struct fore200e *fore200e)
1932 if (request_irq(fore200e->irq, fore200e_interrupt, IRQF_SHARED, fore200e->name, fore200e->atm_dev) < 0) {
1934 printk(FORE200E "unable to reserve IRQ %s for device %s\n",
1935 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1939 printk(FORE200E "IRQ %s reserved for device %s\n",
1940 fore200e_irq_itoa(fore200e->irq), fore200e->name);
1942 #ifdef FORE200E_USE_TASKLET
1943 tasklet_init(&fore200e->tx_tasklet, fore200e_tx_tasklet, (unsigned long)fore200e);
1944 tasklet_init(&fore200e->rx_tasklet, fore200e_rx_tasklet, (unsigned long)fore200e);
1947 fore200e->state = FORE200E_STATE_IRQ;
1952 static int fore200e_get_esi(struct fore200e *fore200e)
1954 struct prom_data* prom = kzalloc(sizeof(struct prom_data), GFP_KERNEL);
1960 ok = fore200e->bus->prom_read(fore200e, prom);
1966 printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
1968 (prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
1969 prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
1971 for (i = 0; i < ESI_LEN; i++) {
1972 fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
1981 static int fore200e_alloc_rx_buf(struct fore200e *fore200e)
1983 int scheme, magn, nbr, size, i;
1985 struct host_bsq* bsq;
1986 struct buffer* buffer;
1988 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
1989 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
1991 bsq = &fore200e->host_bsq[ scheme ][ magn ];
1993 nbr = fore200e_rx_buf_nbr[ scheme ][ magn ];
1994 size = fore200e_rx_buf_size[ scheme ][ magn ];
1996 DPRINTK(2, "rx buffers %d / %d are being allocated\n", scheme, magn);
1998 /* allocate the array of receive buffers */
1999 buffer = bsq->buffer = kcalloc(nbr, sizeof(struct buffer),
2005 bsq->freebuf = NULL;
2007 for (i = 0; i < nbr; i++) {
2009 buffer[ i ].scheme = scheme;
2010 buffer[ i ].magn = magn;
2011 #ifdef FORE200E_BSQ_DEBUG
2012 buffer[ i ].index = i;
2013 buffer[ i ].supplied = 0;
2016 /* allocate the receive buffer body */
2017 if (fore200e_chunk_alloc(fore200e,
2018 &buffer[ i ].data, size, fore200e->bus->buffer_alignment,
2019 DMA_FROM_DEVICE) < 0) {
2022 fore200e_chunk_free(fore200e, &buffer[ --i ].data);
2028 /* insert the buffer into the free buffer list */
2029 buffer[ i ].next = bsq->freebuf;
2030 bsq->freebuf = &buffer[ i ];
2032 /* all the buffers are free, initially */
2033 bsq->freebuf_count = nbr;
2035 #ifdef FORE200E_BSQ_DEBUG
2036 bsq_audit(3, bsq, scheme, magn);
2041 fore200e->state = FORE200E_STATE_ALLOC_BUF;
2046 static int fore200e_init_bs_queue(struct fore200e *fore200e)
2048 int scheme, magn, i;
2050 struct host_bsq* bsq;
2051 struct cp_bsq_entry __iomem * cp_entry;
2053 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++) {
2054 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++) {
2056 DPRINTK(2, "buffer supply queue %d / %d is being initialized\n", scheme, magn);
2058 bsq = &fore200e->host_bsq[ scheme ][ magn ];
2060 /* allocate and align the array of status words */
2061 if (fore200e_dma_chunk_alloc(fore200e,
2063 sizeof(enum status),
2065 fore200e->bus->status_alignment) < 0) {
2069 /* allocate and align the array of receive buffer descriptors */
2070 if (fore200e_dma_chunk_alloc(fore200e,
2072 sizeof(struct rbd_block),
2074 fore200e->bus->descr_alignment) < 0) {
2076 fore200e_dma_chunk_free(fore200e, &bsq->status);
2080 /* get the base address of the cp resident buffer supply queue entries */
2081 cp_entry = fore200e->virt_base +
2082 fore200e->bus->read(&fore200e->cp_queues->cp_bsq[ scheme ][ magn ]);
2084 /* fill the host resident and cp resident buffer supply queue entries */
2085 for (i = 0; i < QUEUE_SIZE_BS; i++) {
2087 bsq->host_entry[ i ].status =
2088 FORE200E_INDEX(bsq->status.align_addr, enum status, i);
2089 bsq->host_entry[ i ].rbd_block =
2090 FORE200E_INDEX(bsq->rbd_block.align_addr, struct rbd_block, i);
2091 bsq->host_entry[ i ].rbd_block_dma =
2092 FORE200E_DMA_INDEX(bsq->rbd_block.dma_addr, struct rbd_block, i);
2093 bsq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2095 *bsq->host_entry[ i ].status = STATUS_FREE;
2097 fore200e->bus->write(FORE200E_DMA_INDEX(bsq->status.dma_addr, enum status, i),
2098 &cp_entry[ i ].status_haddr);
2103 fore200e->state = FORE200E_STATE_INIT_BSQ;
2108 static int fore200e_init_rx_queue(struct fore200e *fore200e)
2110 struct host_rxq* rxq = &fore200e->host_rxq;
2111 struct cp_rxq_entry __iomem * cp_entry;
2114 DPRINTK(2, "receive queue is being initialized\n");
2116 /* allocate and align the array of status words */
2117 if (fore200e_dma_chunk_alloc(fore200e,
2119 sizeof(enum status),
2121 fore200e->bus->status_alignment) < 0) {
2125 /* allocate and align the array of receive PDU descriptors */
2126 if (fore200e_dma_chunk_alloc(fore200e,
2130 fore200e->bus->descr_alignment) < 0) {
2132 fore200e_dma_chunk_free(fore200e, &rxq->status);
2136 /* get the base address of the cp resident rx queue entries */
2137 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_rxq);
2139 /* fill the host resident and cp resident rx entries */
2140 for (i=0; i < QUEUE_SIZE_RX; i++) {
2142 rxq->host_entry[ i ].status =
2143 FORE200E_INDEX(rxq->status.align_addr, enum status, i);
2144 rxq->host_entry[ i ].rpd =
2145 FORE200E_INDEX(rxq->rpd.align_addr, struct rpd, i);
2146 rxq->host_entry[ i ].rpd_dma =
2147 FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i);
2148 rxq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2150 *rxq->host_entry[ i ].status = STATUS_FREE;
2152 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->status.dma_addr, enum status, i),
2153 &cp_entry[ i ].status_haddr);
2155 fore200e->bus->write(FORE200E_DMA_INDEX(rxq->rpd.dma_addr, struct rpd, i),
2156 &cp_entry[ i ].rpd_haddr);
2159 /* set the head entry of the queue */
2162 fore200e->state = FORE200E_STATE_INIT_RXQ;
2167 static int fore200e_init_tx_queue(struct fore200e *fore200e)
2169 struct host_txq* txq = &fore200e->host_txq;
2170 struct cp_txq_entry __iomem * cp_entry;
2173 DPRINTK(2, "transmit queue is being initialized\n");
2175 /* allocate and align the array of status words */
2176 if (fore200e_dma_chunk_alloc(fore200e,
2178 sizeof(enum status),
2180 fore200e->bus->status_alignment) < 0) {
2184 /* allocate and align the array of transmit PDU descriptors */
2185 if (fore200e_dma_chunk_alloc(fore200e,
2189 fore200e->bus->descr_alignment) < 0) {
2191 fore200e_dma_chunk_free(fore200e, &txq->status);
2195 /* get the base address of the cp resident tx queue entries */
2196 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_txq);
2198 /* fill the host resident and cp resident tx entries */
2199 for (i=0; i < QUEUE_SIZE_TX; i++) {
2201 txq->host_entry[ i ].status =
2202 FORE200E_INDEX(txq->status.align_addr, enum status, i);
2203 txq->host_entry[ i ].tpd =
2204 FORE200E_INDEX(txq->tpd.align_addr, struct tpd, i);
2205 txq->host_entry[ i ].tpd_dma =
2206 FORE200E_DMA_INDEX(txq->tpd.dma_addr, struct tpd, i);
2207 txq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2209 *txq->host_entry[ i ].status = STATUS_FREE;
2211 fore200e->bus->write(FORE200E_DMA_INDEX(txq->status.dma_addr, enum status, i),
2212 &cp_entry[ i ].status_haddr);
2214 /* although there is a one-to-one mapping of tx queue entries and tpds,
2215 we do not write here the DMA (physical) base address of each tpd into
2216 the related cp resident entry, because the cp relies on this write
2217 operation to detect that a new pdu has been submitted for tx */
2220 /* set the head and tail entries of the queue */
2224 fore200e->state = FORE200E_STATE_INIT_TXQ;
2229 static int fore200e_init_cmd_queue(struct fore200e *fore200e)
2231 struct host_cmdq* cmdq = &fore200e->host_cmdq;
2232 struct cp_cmdq_entry __iomem * cp_entry;
2235 DPRINTK(2, "command queue is being initialized\n");
2237 /* allocate and align the array of status words */
2238 if (fore200e_dma_chunk_alloc(fore200e,
2240 sizeof(enum status),
2242 fore200e->bus->status_alignment) < 0) {
2246 /* get the base address of the cp resident cmd queue entries */
2247 cp_entry = fore200e->virt_base + fore200e->bus->read(&fore200e->cp_queues->cp_cmdq);
2249 /* fill the host resident and cp resident cmd entries */
2250 for (i=0; i < QUEUE_SIZE_CMD; i++) {
2252 cmdq->host_entry[ i ].status =
2253 FORE200E_INDEX(cmdq->status.align_addr, enum status, i);
2254 cmdq->host_entry[ i ].cp_entry = &cp_entry[ i ];
2256 *cmdq->host_entry[ i ].status = STATUS_FREE;
2258 fore200e->bus->write(FORE200E_DMA_INDEX(cmdq->status.dma_addr, enum status, i),
2259 &cp_entry[ i ].status_haddr);
2262 /* set the head entry of the queue */
2265 fore200e->state = FORE200E_STATE_INIT_CMDQ;
2270 static void fore200e_param_bs_queue(struct fore200e *fore200e,
2271 enum buffer_scheme scheme,
2272 enum buffer_magn magn, int queue_length,
2273 int pool_size, int supply_blksize)
2275 struct bs_spec __iomem * bs_spec = &fore200e->cp_queues->init.bs_spec[ scheme ][ magn ];
2277 fore200e->bus->write(queue_length, &bs_spec->queue_length);
2278 fore200e->bus->write(fore200e_rx_buf_size[ scheme ][ magn ], &bs_spec->buffer_size);
2279 fore200e->bus->write(pool_size, &bs_spec->pool_size);
2280 fore200e->bus->write(supply_blksize, &bs_spec->supply_blksize);
2284 static int fore200e_initialize(struct fore200e *fore200e)
2286 struct cp_queues __iomem * cpq;
2287 int ok, scheme, magn;
2289 DPRINTK(2, "device %s being initialized\n", fore200e->name);
2291 mutex_init(&fore200e->rate_mtx);
2292 spin_lock_init(&fore200e->q_lock);
2294 cpq = fore200e->cp_queues = fore200e->virt_base + FORE200E_CP_QUEUES_OFFSET;
2296 /* enable cp to host interrupts */
2297 fore200e->bus->write(1, &cpq->imask);
2299 if (fore200e->bus->irq_enable)
2300 fore200e->bus->irq_enable(fore200e);
2302 fore200e->bus->write(NBR_CONNECT, &cpq->init.num_connect);
2304 fore200e->bus->write(QUEUE_SIZE_CMD, &cpq->init.cmd_queue_len);
2305 fore200e->bus->write(QUEUE_SIZE_RX, &cpq->init.rx_queue_len);
2306 fore200e->bus->write(QUEUE_SIZE_TX, &cpq->init.tx_queue_len);
2308 fore200e->bus->write(RSD_EXTENSION, &cpq->init.rsd_extension);
2309 fore200e->bus->write(TSD_EXTENSION, &cpq->init.tsd_extension);
2311 for (scheme = 0; scheme < BUFFER_SCHEME_NBR; scheme++)
2312 for (magn = 0; magn < BUFFER_MAGN_NBR; magn++)
2313 fore200e_param_bs_queue(fore200e, scheme, magn,
2315 fore200e_rx_buf_nbr[ scheme ][ magn ],
2318 /* issue the initialize command */
2319 fore200e->bus->write(STATUS_PENDING, &cpq->init.status);
2320 fore200e->bus->write(OPCODE_INITIALIZE, &cpq->init.opcode);
2322 ok = fore200e_io_poll(fore200e, &cpq->init.status, STATUS_COMPLETE, 3000);
2324 printk(FORE200E "device %s initialization failed\n", fore200e->name);
2328 printk(FORE200E "device %s initialized\n", fore200e->name);
2330 fore200e->state = FORE200E_STATE_INITIALIZE;
2335 static void fore200e_monitor_putc(struct fore200e *fore200e, char c)
2337 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2342 fore200e->bus->write(((u32) c) | FORE200E_CP_MONITOR_UART_AVAIL, &monitor->soft_uart.send);
2346 static int fore200e_monitor_getc(struct fore200e *fore200e)
2348 struct cp_monitor __iomem * monitor = fore200e->cp_monitor;
2349 unsigned long timeout = jiffies + msecs_to_jiffies(50);
2352 while (time_before(jiffies, timeout)) {
2354 c = (int) fore200e->bus->read(&monitor->soft_uart.recv);
2356 if (c & FORE200E_CP_MONITOR_UART_AVAIL) {
2358 fore200e->bus->write(FORE200E_CP_MONITOR_UART_FREE, &monitor->soft_uart.recv);
2360 printk("%c", c & 0xFF);
2370 static void fore200e_monitor_puts(struct fore200e *fore200e, char *str)
2374 /* the i960 monitor doesn't accept any new character if it has something to say */
2375 while (fore200e_monitor_getc(fore200e) >= 0);
2377 fore200e_monitor_putc(fore200e, *str++);
2380 while (fore200e_monitor_getc(fore200e) >= 0);
2383 #ifdef __LITTLE_ENDIAN
2384 #define FW_EXT ".bin"
2386 #define FW_EXT "_ecd.bin2"
2389 static int fore200e_load_and_start_fw(struct fore200e *fore200e)
2391 const struct firmware *firmware;
2392 const struct fw_header *fw_header;
2393 const __le32 *fw_data;
2395 u32 __iomem *load_addr;
2399 sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
2400 if ((err = request_firmware(&firmware, buf, fore200e->dev)) < 0) {
2401 printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
2405 fw_data = (const __le32 *)firmware->data;
2406 fw_size = firmware->size / sizeof(u32);
2407 fw_header = (const struct fw_header *)firmware->data;
2408 load_addr = fore200e->virt_base + le32_to_cpu(fw_header->load_offset);
2410 DPRINTK(2, "device %s firmware being loaded at 0x%p (%d words)\n",
2411 fore200e->name, load_addr, fw_size);
2413 if (le32_to_cpu(fw_header->magic) != FW_HEADER_MAGIC) {
2414 printk(FORE200E "corrupted %s firmware image\n", fore200e->bus->model_name);
2418 for (; fw_size--; fw_data++, load_addr++)
2419 fore200e->bus->write(le32_to_cpu(*fw_data), load_addr);
2421 DPRINTK(2, "device %s firmware being started\n", fore200e->name);
2423 #if defined(__sparc_v9__)
2424 /* reported to be required by SBA cards on some sparc64 hosts */
2428 sprintf(buf, "\rgo %x\r", le32_to_cpu(fw_header->start_offset));
2429 fore200e_monitor_puts(fore200e, buf);
2431 if (fore200e_io_poll(fore200e, &fore200e->cp_monitor->bstat, BSTAT_CP_RUNNING, 1000) == 0) {
2432 printk(FORE200E "device %s firmware didn't start\n", fore200e->name);
2436 printk(FORE200E "device %s firmware started\n", fore200e->name);
2438 fore200e->state = FORE200E_STATE_START_FW;
2442 release_firmware(firmware);
2447 static int fore200e_register(struct fore200e *fore200e, struct device *parent)
2449 struct atm_dev* atm_dev;
2451 DPRINTK(2, "device %s being registered\n", fore200e->name);
2453 atm_dev = atm_dev_register(fore200e->bus->proc_name, parent, &fore200e_ops,
2455 if (atm_dev == NULL) {
2456 printk(FORE200E "unable to register device %s\n", fore200e->name);
2460 atm_dev->dev_data = fore200e;
2461 fore200e->atm_dev = atm_dev;
2463 atm_dev->ci_range.vpi_bits = FORE200E_VPI_BITS;
2464 atm_dev->ci_range.vci_bits = FORE200E_VCI_BITS;
2466 fore200e->available_cell_rate = ATM_OC3_PCR;
2468 fore200e->state = FORE200E_STATE_REGISTER;
2473 static int fore200e_init(struct fore200e *fore200e, struct device *parent)
2475 if (fore200e_register(fore200e, parent) < 0)
2478 if (fore200e->bus->configure(fore200e) < 0)
2481 if (fore200e->bus->map(fore200e) < 0)
2484 if (fore200e_reset(fore200e, 1) < 0)
2487 if (fore200e_load_and_start_fw(fore200e) < 0)
2490 if (fore200e_initialize(fore200e) < 0)
2493 if (fore200e_init_cmd_queue(fore200e) < 0)
2496 if (fore200e_init_tx_queue(fore200e) < 0)
2499 if (fore200e_init_rx_queue(fore200e) < 0)
2502 if (fore200e_init_bs_queue(fore200e) < 0)
2505 if (fore200e_alloc_rx_buf(fore200e) < 0)
2508 if (fore200e_get_esi(fore200e) < 0)
2511 if (fore200e_irq_request(fore200e) < 0)
2514 fore200e_supply(fore200e);
2516 /* all done, board initialization is now complete */
2517 fore200e->state = FORE200E_STATE_COMPLETE;
2522 static const struct of_device_id fore200e_sba_match[];
2523 static int fore200e_sba_probe(struct platform_device *op)
2525 const struct of_device_id *match;
2526 struct fore200e *fore200e;
2527 static int index = 0;
2530 match = of_match_device(fore200e_sba_match, &op->dev);
2534 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2538 fore200e->bus = &fore200e_sbus_ops;
2539 fore200e->dev = &op->dev;
2540 fore200e->irq = op->archdata.irqs[0];
2541 fore200e->phys_base = op->resource[0].start;
2543 sprintf(fore200e->name, "SBA-200E-%d", index);
2545 err = fore200e_init(fore200e, &op->dev);
2547 fore200e_shutdown(fore200e);
2553 dev_set_drvdata(&op->dev, fore200e);
2558 static int fore200e_sba_remove(struct platform_device *op)
2560 struct fore200e *fore200e = dev_get_drvdata(&op->dev);
2562 fore200e_shutdown(fore200e);
2568 static const struct of_device_id fore200e_sba_match[] = {
2570 .name = SBA200E_PROM_NAME,
2574 MODULE_DEVICE_TABLE(of, fore200e_sba_match);
2576 static struct platform_driver fore200e_sba_driver = {
2578 .name = "fore_200e",
2579 .of_match_table = fore200e_sba_match,
2581 .probe = fore200e_sba_probe,
2582 .remove = fore200e_sba_remove,
2587 static int fore200e_pca_detect(struct pci_dev *pci_dev,
2588 const struct pci_device_id *pci_ent)
2590 struct fore200e* fore200e;
2592 static int index = 0;
2594 if (pci_enable_device(pci_dev)) {
2599 if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
2604 fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
2605 if (fore200e == NULL) {
2610 fore200e->bus = &fore200e_pci_ops;
2611 fore200e->dev = &pci_dev->dev;
2612 fore200e->irq = pci_dev->irq;
2613 fore200e->phys_base = pci_resource_start(pci_dev, 0);
2615 sprintf(fore200e->name, "PCA-200E-%d", index - 1);
2617 pci_set_master(pci_dev);
2619 printk(FORE200E "device PCA-200E found at 0x%lx, IRQ %s\n",
2620 fore200e->phys_base, fore200e_irq_itoa(fore200e->irq));
2622 sprintf(fore200e->name, "PCA-200E-%d", index);
2624 err = fore200e_init(fore200e, &pci_dev->dev);
2626 fore200e_shutdown(fore200e);
2631 pci_set_drvdata(pci_dev, fore200e);
2639 pci_disable_device(pci_dev);
2644 static void fore200e_pca_remove_one(struct pci_dev *pci_dev)
2646 struct fore200e *fore200e;
2648 fore200e = pci_get_drvdata(pci_dev);
2650 fore200e_shutdown(fore200e);
2652 pci_disable_device(pci_dev);
2656 static const struct pci_device_id fore200e_pca_tbl[] = {
2657 { PCI_VENDOR_ID_FORE, PCI_DEVICE_ID_FORE_PCA200E, PCI_ANY_ID, PCI_ANY_ID },
2661 MODULE_DEVICE_TABLE(pci, fore200e_pca_tbl);
2663 static struct pci_driver fore200e_pca_driver = {
2664 .name = "fore_200e",
2665 .probe = fore200e_pca_detect,
2666 .remove = fore200e_pca_remove_one,
2667 .id_table = fore200e_pca_tbl,
2671 static int __init fore200e_module_init(void)
2675 printk(FORE200E "FORE Systems 200E-series ATM driver - version " FORE200E_VERSION "\n");
2678 err = platform_driver_register(&fore200e_sba_driver);
2684 err = pci_register_driver(&fore200e_pca_driver);
2689 platform_driver_unregister(&fore200e_sba_driver);
2695 static void __exit fore200e_module_cleanup(void)
2698 pci_unregister_driver(&fore200e_pca_driver);
2701 platform_driver_unregister(&fore200e_sba_driver);
2706 fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2708 struct fore200e* fore200e = FORE200E_DEV(dev);
2709 struct fore200e_vcc* fore200e_vcc;
2710 struct atm_vcc* vcc;
2711 int i, len, left = *pos;
2712 unsigned long flags;
2716 if (fore200e_getstats(fore200e) < 0)
2719 len = sprintf(page,"\n"
2721 " internal name:\t\t%s\n", fore200e->name);
2723 /* print bus-specific information */
2724 if (fore200e->bus->proc_read)
2725 len += fore200e->bus->proc_read(fore200e, page + len);
2727 len += sprintf(page + len,
2728 " interrupt line:\t\t%s\n"
2729 " physical base address:\t0x%p\n"
2730 " virtual base address:\t0x%p\n"
2731 " factory address (ESI):\t%pM\n"
2732 " board serial number:\t\t%d\n\n",
2733 fore200e_irq_itoa(fore200e->irq),
2734 (void*)fore200e->phys_base,
2735 fore200e->virt_base,
2737 fore200e->esi[4] * 256 + fore200e->esi[5]);
2743 return sprintf(page,
2744 " free small bufs, scheme 1:\t%d\n"
2745 " free large bufs, scheme 1:\t%d\n"
2746 " free small bufs, scheme 2:\t%d\n"
2747 " free large bufs, scheme 2:\t%d\n",
2748 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_SMALL ].freebuf_count,
2749 fore200e->host_bsq[ BUFFER_SCHEME_ONE ][ BUFFER_MAGN_LARGE ].freebuf_count,
2750 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_SMALL ].freebuf_count,
2751 fore200e->host_bsq[ BUFFER_SCHEME_TWO ][ BUFFER_MAGN_LARGE ].freebuf_count);
2754 u32 hb = fore200e->bus->read(&fore200e->cp_queues->heartbeat);
2756 len = sprintf(page,"\n\n"
2757 " cell processor:\n"
2758 " heartbeat state:\t\t");
2760 if (hb >> 16 != 0xDEAD)
2761 len += sprintf(page + len, "0x%08x\n", hb);
2763 len += sprintf(page + len, "*** FATAL ERROR %04x ***\n", hb & 0xFFFF);
2769 static const char* media_name[] = {
2770 "unshielded twisted pair",
2771 "multimode optical fiber ST",
2772 "multimode optical fiber SC",
2773 "single-mode optical fiber ST",
2774 "single-mode optical fiber SC",
2778 static const char* oc3_mode[] = {
2780 "diagnostic loopback",
2785 u32 fw_release = fore200e->bus->read(&fore200e->cp_queues->fw_release);
2786 u32 mon960_release = fore200e->bus->read(&fore200e->cp_queues->mon960_release);
2787 u32 oc3_revision = fore200e->bus->read(&fore200e->cp_queues->oc3_revision);
2788 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2791 if (media_index > 4)
2794 switch (fore200e->loop_mode) {
2795 case ATM_LM_NONE: oc3_index = 0;
2797 case ATM_LM_LOC_PHY: oc3_index = 1;
2799 case ATM_LM_RMT_PHY: oc3_index = 2;
2801 default: oc3_index = 3;
2804 return sprintf(page,
2805 " firmware release:\t\t%d.%d.%d\n"
2806 " monitor release:\t\t%d.%d\n"
2807 " media type:\t\t\t%s\n"
2808 " OC-3 revision:\t\t0x%x\n"
2809 " OC-3 mode:\t\t\t%s",
2810 fw_release >> 16, fw_release << 16 >> 24, fw_release << 24 >> 24,
2811 mon960_release >> 16, mon960_release << 16 >> 16,
2812 media_name[ media_index ],
2814 oc3_mode[ oc3_index ]);
2818 struct cp_monitor __iomem * cp_monitor = fore200e->cp_monitor;
2820 return sprintf(page,
2823 " version number:\t\t%d\n"
2824 " boot status word:\t\t0x%08x\n",
2825 fore200e->bus->read(&cp_monitor->mon_version),
2826 fore200e->bus->read(&cp_monitor->bstat));
2830 return sprintf(page,
2832 " device statistics:\n"
2834 " crc_header_errors:\t\t%10u\n"
2835 " framing_errors:\t\t%10u\n",
2836 be32_to_cpu(fore200e->stats->phy.crc_header_errors),
2837 be32_to_cpu(fore200e->stats->phy.framing_errors));
2840 return sprintf(page, "\n"
2842 " section_bip8_errors:\t%10u\n"
2843 " path_bip8_errors:\t\t%10u\n"
2844 " line_bip24_errors:\t\t%10u\n"
2845 " line_febe_errors:\t\t%10u\n"
2846 " path_febe_errors:\t\t%10u\n"
2847 " corr_hcs_errors:\t\t%10u\n"
2848 " ucorr_hcs_errors:\t\t%10u\n",
2849 be32_to_cpu(fore200e->stats->oc3.section_bip8_errors),
2850 be32_to_cpu(fore200e->stats->oc3.path_bip8_errors),
2851 be32_to_cpu(fore200e->stats->oc3.line_bip24_errors),
2852 be32_to_cpu(fore200e->stats->oc3.line_febe_errors),
2853 be32_to_cpu(fore200e->stats->oc3.path_febe_errors),
2854 be32_to_cpu(fore200e->stats->oc3.corr_hcs_errors),
2855 be32_to_cpu(fore200e->stats->oc3.ucorr_hcs_errors));
2858 return sprintf(page,"\n"
2859 " ATM:\t\t\t\t cells\n"
2862 " vpi out of range:\t\t%10u\n"
2863 " vpi no conn:\t\t%10u\n"
2864 " vci out of range:\t\t%10u\n"
2865 " vci no conn:\t\t%10u\n",
2866 be32_to_cpu(fore200e->stats->atm.cells_transmitted),
2867 be32_to_cpu(fore200e->stats->atm.cells_received),
2868 be32_to_cpu(fore200e->stats->atm.vpi_bad_range),
2869 be32_to_cpu(fore200e->stats->atm.vpi_no_conn),
2870 be32_to_cpu(fore200e->stats->atm.vci_bad_range),
2871 be32_to_cpu(fore200e->stats->atm.vci_no_conn));
2874 return sprintf(page,"\n"
2875 " AAL0:\t\t\t cells\n"
2878 " dropped:\t\t\t%10u\n",
2879 be32_to_cpu(fore200e->stats->aal0.cells_transmitted),
2880 be32_to_cpu(fore200e->stats->aal0.cells_received),
2881 be32_to_cpu(fore200e->stats->aal0.cells_dropped));
2884 return sprintf(page,"\n"
2886 " SAR sublayer:\t\t cells\n"
2889 " dropped:\t\t\t%10u\n"
2890 " CRC errors:\t\t%10u\n"
2891 " protocol errors:\t\t%10u\n\n"
2892 " CS sublayer:\t\t PDUs\n"
2895 " dropped:\t\t\t%10u\n"
2896 " protocol errors:\t\t%10u\n",
2897 be32_to_cpu(fore200e->stats->aal34.cells_transmitted),
2898 be32_to_cpu(fore200e->stats->aal34.cells_received),
2899 be32_to_cpu(fore200e->stats->aal34.cells_dropped),
2900 be32_to_cpu(fore200e->stats->aal34.cells_crc_errors),
2901 be32_to_cpu(fore200e->stats->aal34.cells_protocol_errors),
2902 be32_to_cpu(fore200e->stats->aal34.cspdus_transmitted),
2903 be32_to_cpu(fore200e->stats->aal34.cspdus_received),
2904 be32_to_cpu(fore200e->stats->aal34.cspdus_dropped),
2905 be32_to_cpu(fore200e->stats->aal34.cspdus_protocol_errors));
2908 return sprintf(page,"\n"
2910 " SAR sublayer:\t\t cells\n"
2913 " dropped:\t\t\t%10u\n"
2914 " congestions:\t\t%10u\n\n"
2915 " CS sublayer:\t\t PDUs\n"
2918 " dropped:\t\t\t%10u\n"
2919 " CRC errors:\t\t%10u\n"
2920 " protocol errors:\t\t%10u\n",
2921 be32_to_cpu(fore200e->stats->aal5.cells_transmitted),
2922 be32_to_cpu(fore200e->stats->aal5.cells_received),
2923 be32_to_cpu(fore200e->stats->aal5.cells_dropped),
2924 be32_to_cpu(fore200e->stats->aal5.congestion_experienced),
2925 be32_to_cpu(fore200e->stats->aal5.cspdus_transmitted),
2926 be32_to_cpu(fore200e->stats->aal5.cspdus_received),
2927 be32_to_cpu(fore200e->stats->aal5.cspdus_dropped),
2928 be32_to_cpu(fore200e->stats->aal5.cspdus_crc_errors),
2929 be32_to_cpu(fore200e->stats->aal5.cspdus_protocol_errors));
2932 return sprintf(page,"\n"
2933 " AUX:\t\t allocation failures\n"
2934 " small b1:\t\t\t%10u\n"
2935 " large b1:\t\t\t%10u\n"
2936 " small b2:\t\t\t%10u\n"
2937 " large b2:\t\t\t%10u\n"
2938 " RX PDUs:\t\t\t%10u\n"
2939 " TX PDUs:\t\t\t%10lu\n",
2940 be32_to_cpu(fore200e->stats->aux.small_b1_failed),
2941 be32_to_cpu(fore200e->stats->aux.large_b1_failed),
2942 be32_to_cpu(fore200e->stats->aux.small_b2_failed),
2943 be32_to_cpu(fore200e->stats->aux.large_b2_failed),
2944 be32_to_cpu(fore200e->stats->aux.rpd_alloc_failed),
2948 return sprintf(page,"\n"
2949 " receive carrier:\t\t\t%s\n",
2950 fore200e->stats->aux.receive_carrier ? "ON" : "OFF!");
2953 return sprintf(page,"\n"
2954 " VCCs:\n address VPI VCI AAL "
2955 "TX PDUs TX min/max size RX PDUs RX min/max size\n");
2958 for (i = 0; i < NBR_CONNECT; i++) {
2960 vcc = fore200e->vc_map[i].vcc;
2965 spin_lock_irqsave(&fore200e->q_lock, flags);
2967 if (vcc && test_bit(ATM_VF_READY, &vcc->flags) && !left--) {
2969 fore200e_vcc = FORE200E_VCC(vcc);
2970 ASSERT(fore200e_vcc);
2973 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
2975 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
2976 fore200e_vcc->tx_pdu,
2977 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
2978 fore200e_vcc->tx_max_pdu,
2979 fore200e_vcc->rx_pdu,
2980 fore200e_vcc->rx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->rx_min_pdu,
2981 fore200e_vcc->rx_max_pdu);
2983 spin_unlock_irqrestore(&fore200e->q_lock, flags);
2987 spin_unlock_irqrestore(&fore200e->q_lock, flags);
2993 module_init(fore200e_module_init);
2994 module_exit(fore200e_module_cleanup);
2997 static const struct atmdev_ops fore200e_ops = {
2998 .open = fore200e_open,
2999 .close = fore200e_close,
3000 .ioctl = fore200e_ioctl,
3001 .send = fore200e_send,
3002 .change_qos = fore200e_change_qos,
3003 .proc_read = fore200e_proc_read,
3004 .owner = THIS_MODULE
3007 MODULE_LICENSE("GPL");
3009 #ifdef __LITTLE_ENDIAN__
3010 MODULE_FIRMWARE("pca200e.bin");
3012 MODULE_FIRMWARE("pca200e_ecd.bin2");
3014 #endif /* CONFIG_PCI */
3016 MODULE_FIRMWARE("sba200e_ecd.bin2");