2 * Copyright (c) 2010 Broadcom Corporation
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
11 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
13 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
14 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/netdevice.h>
20 #include <linux/pci.h>
31 #include <asm/addrspace.h>
35 #error "hnddma.c shouldn't be needed for FULLMAC"
40 #define DMA_ERROR(args) \
42 if (!(*di->msg_level & 1)) \
47 #define DMA_TRACE(args) \
49 if (!(*di->msg_level & 2)) \
55 #define DMA_ERROR(args)
56 #define DMA_TRACE(args)
59 #define DMA_NONE(args)
61 #define d64txregs dregs.d64_u.txregs_64
62 #define d64rxregs dregs.d64_u.rxregs_64
63 #define txd64 dregs.d64_u.txd_64
64 #define rxd64 dregs.d64_u.rxd_64
66 /* default dma message level (if input msg_level pointer is null in dma_attach()) */
67 static uint dma_msg_level;
69 #define MAXNAMEL 8 /* 8 char names */
71 #define DI_INFO(dmah) ((dma_info_t *)dmah)
73 #define R_SM(r) (*(r))
74 #define W_SM(r, v) (*(r) = (v))
76 /* dma engine software state */
77 typedef struct dma_info {
78 struct hnddma_pub hnddma; /* exported structure */
79 uint *msg_level; /* message level pointer */
80 char name[MAXNAMEL]; /* callers name for diag msgs */
82 void *pbus; /* bus handle */
84 bool dma64; /* this dma engine is operating in 64-bit mode */
85 bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
89 dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */
90 dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */
91 dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */
92 dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */
96 u16 dmadesc_align; /* alignment requirement for dma descriptors */
98 u16 ntxd; /* # tx descriptors tunable */
99 u16 txin; /* index of next descriptor to reclaim */
100 u16 txout; /* index of next descriptor to post */
101 void **txp; /* pointer to parallel array of pointers to packets */
102 hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */
103 dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */
104 dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */
105 u16 txdalign; /* #bytes added to alloc'd mem to align txd */
106 u32 txdalloc; /* #bytes allocated for the ring */
107 u32 xmtptrbase; /* When using unaligned descriptors, the ptr register
108 * is not just an index, it needs all 13 bits to be
109 * an offset from the addr register.
112 u16 nrxd; /* # rx descriptors tunable */
113 u16 rxin; /* index of next descriptor to reclaim */
114 u16 rxout; /* index of next descriptor to post */
115 void **rxp; /* pointer to parallel array of pointers to packets */
116 hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */
117 dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */
118 dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */
119 u16 rxdalign; /* #bytes added to alloc'd mem to align rxd */
120 u32 rxdalloc; /* #bytes allocated for the ring */
121 u32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */
124 unsigned int rxbufsize; /* rx buffer size in bytes,
125 * not including the extra headroom
127 uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack
128 * e.g. some rx pkt buffers will be bridged to tx side
129 * without byte copying. The extra headroom needs to be
130 * large enough to fit txheader needs.
131 * Some dongle driver may not need it.
133 uint nrxpost; /* # rx buffers to keep posted */
134 unsigned int rxoffset; /* rxcontrol offset */
135 uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
136 uint ddoffsethigh; /* high 32 bits */
137 uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
138 uint dataoffsethigh; /* high 32 bits */
139 bool aligndesc_4k; /* descriptor base need to be aligned or not */
142 /* DMA Scatter-gather list is supported. Note this is limited to TX direction only */
143 #ifdef BCMDMASGLISTOSL
144 #define DMASGLIST_ENAB true
146 #define DMASGLIST_ENAB false
147 #endif /* BCMDMASGLISTOSL */
149 /* descriptor bumping macros */
150 #define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
151 #define TXD(x) XXD((x), di->ntxd)
152 #define RXD(x) XXD((x), di->nrxd)
153 #define NEXTTXD(i) TXD((i) + 1)
154 #define PREVTXD(i) TXD((i) - 1)
155 #define NEXTRXD(i) RXD((i) + 1)
156 #define PREVRXD(i) RXD((i) - 1)
158 #define NTXDACTIVE(h, t) TXD((t) - (h))
159 #define NRXDACTIVE(h, t) RXD((t) - (h))
161 /* macros to convert between byte offsets and indexes */
162 #define B2I(bytes, type) ((bytes) / sizeof(type))
163 #define I2B(index, type) ((index) * sizeof(type))
165 #define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
166 #define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
168 #define PCI64ADDR_HIGH 0x80000000 /* address[63] */
169 #define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */
171 /* Common prototypes */
172 static bool _dma_isaddrext(dma_info_t *di);
173 static bool _dma_descriptor_align(dma_info_t *di);
174 static bool _dma_alloc(dma_info_t *di, uint direction);
175 static void _dma_detach(dma_info_t *di);
176 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa);
177 static void _dma_rxinit(dma_info_t *di);
178 static void *_dma_rx(dma_info_t *di);
179 static bool _dma_rxfill(dma_info_t *di);
180 static void _dma_rxreclaim(dma_info_t *di);
181 static void _dma_rxenable(dma_info_t *di);
182 static void *_dma_getnextrxp(dma_info_t *di, bool forceall);
183 static void _dma_rx_param_get(dma_info_t *di, u16 *rxoffset,
186 static void _dma_txblock(dma_info_t *di);
187 static void _dma_txunblock(dma_info_t *di);
188 static uint _dma_txactive(dma_info_t *di);
189 static uint _dma_rxactive(dma_info_t *di);
190 static uint _dma_txpending(dma_info_t *di);
191 static uint _dma_txcommitted(dma_info_t *di);
193 static void *_dma_peeknexttxp(dma_info_t *di);
194 static void *_dma_peeknextrxp(dma_info_t *di);
195 static unsigned long _dma_getvar(dma_info_t *di, const char *name);
196 static void _dma_counterreset(dma_info_t *di);
197 static void _dma_fifoloopbackenable(dma_info_t *di);
198 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags);
199 static u8 dma_align_sizetobits(uint size);
200 static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
201 u16 *alignbits, uint *alloced,
204 /* Prototypes for 64-bit routines */
205 static bool dma64_alloc(dma_info_t *di, uint direction);
206 static bool dma64_txreset(dma_info_t *di);
207 static bool dma64_rxreset(dma_info_t *di);
208 static bool dma64_txsuspendedidle(dma_info_t *di);
209 static int dma64_txfast(dma_info_t *di, struct sk_buff *p0, bool commit);
210 static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit);
211 static void *dma64_getpos(dma_info_t *di, bool direction);
212 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range);
213 static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
214 static void dma64_txrotate(dma_info_t *di);
216 static bool dma64_rxidle(dma_info_t *di);
217 static void dma64_txinit(dma_info_t *di);
218 static bool dma64_txenabled(dma_info_t *di);
219 static void dma64_txsuspend(dma_info_t *di);
220 static void dma64_txresume(dma_info_t *di);
221 static bool dma64_txsuspended(dma_info_t *di);
222 static void dma64_txreclaim(dma_info_t *di, txd_range_t range);
223 static bool dma64_txstopped(dma_info_t *di);
224 static bool dma64_rxstopped(dma_info_t *di);
225 static bool dma64_rxenabled(dma_info_t *di);
226 static bool _dma64_addrext(dma64regs_t *dma64regs);
228 static inline u32 parity32(u32 data);
230 const di_fcn_t dma64proc = {
231 (di_detach_t) _dma_detach,
232 (di_txinit_t) dma64_txinit,
233 (di_txreset_t) dma64_txreset,
234 (di_txenabled_t) dma64_txenabled,
235 (di_txsuspend_t) dma64_txsuspend,
236 (di_txresume_t) dma64_txresume,
237 (di_txsuspended_t) dma64_txsuspended,
238 (di_txsuspendedidle_t) dma64_txsuspendedidle,
239 (di_txfast_t) dma64_txfast,
240 (di_txunframed_t) dma64_txunframed,
241 (di_getpos_t) dma64_getpos,
242 (di_txstopped_t) dma64_txstopped,
243 (di_txreclaim_t) dma64_txreclaim,
244 (di_getnexttxp_t) dma64_getnexttxp,
245 (di_peeknexttxp_t) _dma_peeknexttxp,
246 (di_txblock_t) _dma_txblock,
247 (di_txunblock_t) _dma_txunblock,
248 (di_txactive_t) _dma_txactive,
249 (di_txrotate_t) dma64_txrotate,
251 (di_rxinit_t) _dma_rxinit,
252 (di_rxreset_t) dma64_rxreset,
253 (di_rxidle_t) dma64_rxidle,
254 (di_rxstopped_t) dma64_rxstopped,
255 (di_rxenable_t) _dma_rxenable,
256 (di_rxenabled_t) dma64_rxenabled,
258 (di_rxfill_t) _dma_rxfill,
259 (di_rxreclaim_t) _dma_rxreclaim,
260 (di_getnextrxp_t) _dma_getnextrxp,
261 (di_peeknextrxp_t) _dma_peeknextrxp,
262 (di_rxparam_get_t) _dma_rx_param_get,
264 (di_fifoloopbackenable_t) _dma_fifoloopbackenable,
265 (di_getvar_t) _dma_getvar,
266 (di_counterreset_t) _dma_counterreset,
267 (di_ctrlflags_t) _dma_ctrlflags,
271 (di_rxactive_t) _dma_rxactive,
272 (di_txpending_t) _dma_txpending,
273 (di_txcommitted_t) _dma_txcommitted,
277 struct hnddma_pub *dma_attach(char *name, si_t *sih,
278 void *dmaregstx, void *dmaregsrx, uint ntxd,
279 uint nrxd, uint rxbufsize, int rxextheadroom,
280 uint nrxpost, uint rxoffset, uint *msg_level)
285 /* allocate private info structure */
286 di = kzalloc(sizeof(dma_info_t), GFP_ATOMIC);
289 printk(KERN_ERR "dma_attach: out of memory\n");
294 di->msg_level = msg_level ? msg_level : &dma_msg_level;
297 di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
299 /* init dma reg pointer */
300 di->d64txregs = (dma64regs_t *) dmaregstx;
301 di->d64rxregs = (dma64regs_t *) dmaregsrx;
302 di->hnddma.di_fn = (const di_fcn_t *)&dma64proc;
304 /* Default flags (which can be changed by the driver calling dma_ctrlflags
305 * before enable): For backwards compatibility both Rx Overflow Continue
306 * and Parity are DISABLED.
309 di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN,
312 DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
313 "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
314 "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
315 di->hnddma.dmactrlflags, ntxd, nrxd, rxbufsize,
316 rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
318 /* make a private copy of our callers name */
319 strncpy(di->name, name, MAXNAMEL);
320 di->name[MAXNAMEL - 1] = '\0';
322 di->pbus = ((struct si_info *)sih)->pbus;
325 di->ntxd = (u16) ntxd;
326 di->nrxd = (u16) nrxd;
328 /* the actual dma size doesn't include the extra headroom */
330 (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom;
331 if (rxbufsize > BCMEXTRAHDROOM)
332 di->rxbufsize = (u16) (rxbufsize - di->rxextrahdrroom);
334 di->rxbufsize = (u16) rxbufsize;
336 di->nrxpost = (u16) nrxpost;
337 di->rxoffset = (u8) rxoffset;
340 * figure out the DMA physical address offset for dd and data
341 * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
342 * Other bus: use zero
343 * SI_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
346 di->dataoffsetlow = 0;
347 /* for pci bus, add offset */
348 if (sih->bustype == PCI_BUS) {
349 /* pcie with DMA64 */
351 di->ddoffsethigh = SI_PCIE_DMA_H32;
352 di->dataoffsetlow = di->ddoffsetlow;
353 di->dataoffsethigh = di->ddoffsethigh;
355 #if defined(__mips__) && defined(IL_BIGENDIAN)
356 di->dataoffsetlow = di->dataoffsetlow + SI_SDRAM_SWAPPED;
357 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
358 /* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
359 if ((ai_coreid(sih) == SDIOD_CORE_ID)
360 && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
362 else if ((ai_coreid(sih) == I2S_CORE_ID) &&
363 ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
366 di->addrext = _dma_isaddrext(di);
368 /* does the descriptors need to be aligned and if yes, on 4K/8K or not */
369 di->aligndesc_4k = _dma_descriptor_align(di);
370 if (di->aligndesc_4k) {
371 di->dmadesc_align = D64RINGALIGN_BITS;
372 if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
373 /* for smaller dd table, HW relax alignment reqmnt */
374 di->dmadesc_align = D64RINGALIGN_BITS - 1;
377 di->dmadesc_align = 4; /* 16 byte alignment */
379 DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
380 di->aligndesc_4k, di->dmadesc_align));
382 /* allocate tx packet pointer vector */
384 size = ntxd * sizeof(void *);
385 di->txp = kzalloc(size, GFP_ATOMIC);
386 if (di->txp == NULL) {
387 DMA_ERROR(("%s: dma_attach: out of tx memory\n", di->name));
392 /* allocate rx packet pointer vector */
394 size = nrxd * sizeof(void *);
395 di->rxp = kzalloc(size, GFP_ATOMIC);
396 if (di->rxp == NULL) {
397 DMA_ERROR(("%s: dma_attach: out of rx memory\n", di->name));
402 /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
404 if (!_dma_alloc(di, DMA_TX))
408 /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
410 if (!_dma_alloc(di, DMA_RX))
414 if ((di->ddoffsetlow != 0) && !di->addrext) {
415 if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) {
416 DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->txdpa)));
419 if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) {
420 DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not supported\n", di->name, (u32) PHYSADDRLO(di->rxdpa)));
425 DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh, di->addrext));
427 /* allocate DMA mapping vectors */
428 if (DMASGLIST_ENAB) {
430 size = ntxd * sizeof(hnddma_seg_map_t);
431 di->txp_dmah = kzalloc(size, GFP_ATOMIC);
432 if (di->txp_dmah == NULL)
437 size = nrxd * sizeof(hnddma_seg_map_t);
438 di->rxp_dmah = kzalloc(size, GFP_ATOMIC);
439 if (di->rxp_dmah == NULL)
444 return (struct hnddma_pub *) di;
451 /* Check for odd number of 1's */
452 static inline u32 parity32(u32 data)
463 #define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2)
466 dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx,
467 u32 *flags, u32 bufcount)
469 u32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
471 /* PCI bus with big(>1G) physical address, use address extension */
472 #if defined(__mips__) && defined(IL_BIGENDIAN)
473 if ((di->dataoffsetlow == SI_SDRAM_SWAPPED)
474 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
476 if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
477 #endif /* defined(__mips__) && defined(IL_BIGENDIAN) */
479 W_SM(&ddring[outidx].addrlow,
480 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
481 W_SM(&ddring[outidx].addrhigh,
482 BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh));
483 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
484 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
486 /* address extension for 32-bit PCI */
489 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
490 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
492 ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
493 W_SM(&ddring[outidx].addrlow,
494 BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow));
495 W_SM(&ddring[outidx].addrhigh,
496 BUS_SWAP32(0 + di->dataoffsethigh));
497 W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
498 W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
500 if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) {
501 if (DMA64_DD_PARITY(&ddring[outidx])) {
502 W_SM(&ddring[outidx].ctrl2,
503 BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY));
508 static bool _dma_alloc(dma_info_t *di, uint direction)
510 return dma64_alloc(di, direction);
513 void *dma_alloc_consistent(struct pci_dev *pdev, uint size, u16 align_bits,
514 uint *alloced, unsigned long *pap)
517 u16 align = (1 << align_bits);
518 if (!IS_ALIGNED(PAGE_SIZE, align))
522 return pci_alloc_consistent(pdev, size, (dma_addr_t *) pap);
525 /* !! may be called with core in reset */
526 static void _dma_detach(dma_info_t *di)
529 DMA_TRACE(("%s: dma_detach\n", di->name));
531 /* free dma descriptor rings */
533 pci_free_consistent(di->pbus, di->txdalloc,
534 ((s8 *)di->txd64 - di->txdalign),
537 pci_free_consistent(di->pbus, di->rxdalloc,
538 ((s8 *)di->rxd64 - di->rxdalign),
541 /* free packet pointer vectors */
545 /* free tx packet DMA handles */
548 /* free rx packet DMA handles */
551 /* free our private info structure */
556 static bool _dma_descriptor_align(dma_info_t *di)
560 /* Check to see if the descriptors need to be aligned on 4K/8K or not */
561 if (di->d64txregs != NULL) {
562 W_REG(&di->d64txregs->addrlow, 0xff0);
563 addrl = R_REG(&di->d64txregs->addrlow);
566 } else if (di->d64rxregs != NULL) {
567 W_REG(&di->d64rxregs->addrlow, 0xff0);
568 addrl = R_REG(&di->d64rxregs->addrlow);
575 /* return true if this dma engine supports DmaExtendedAddrChanges, otherwise false */
576 static bool _dma_isaddrext(dma_info_t *di)
578 /* DMA64 supports full 32- or 64-bit operation. AE is always valid */
580 /* not all tx or rx channel are available */
581 if (di->d64txregs != NULL) {
582 if (!_dma64_addrext(di->d64txregs)) {
583 DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
584 "AE set\n", di->name));
587 } else if (di->d64rxregs != NULL) {
588 if (!_dma64_addrext(di->d64rxregs)) {
589 DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
590 "AE set\n", di->name));
597 /* initialize descriptor table base address */
598 static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa)
600 if (!di->aligndesc_4k) {
601 if (direction == DMA_TX)
602 di->xmtptrbase = PHYSADDRLO(pa);
604 di->rcvptrbase = PHYSADDRLO(pa);
607 if ((di->ddoffsetlow == 0)
608 || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) {
609 if (direction == DMA_TX) {
610 W_REG(&di->d64txregs->addrlow,
611 (PHYSADDRLO(pa) + di->ddoffsetlow));
612 W_REG(&di->d64txregs->addrhigh,
613 (PHYSADDRHI(pa) + di->ddoffsethigh));
615 W_REG(&di->d64rxregs->addrlow,
616 (PHYSADDRLO(pa) + di->ddoffsetlow));
617 W_REG(&di->d64rxregs->addrhigh,
618 (PHYSADDRHI(pa) + di->ddoffsethigh));
621 /* DMA64 32bits address extension */
624 /* shift the high bit(s) from pa to ae */
625 ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >>
626 PCI32ADDR_HIGH_SHIFT;
627 PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH;
629 if (direction == DMA_TX) {
630 W_REG(&di->d64txregs->addrlow,
631 (PHYSADDRLO(pa) + di->ddoffsetlow));
632 W_REG(&di->d64txregs->addrhigh,
634 SET_REG(&di->d64txregs->control,
635 D64_XC_AE, (ae << D64_XC_AE_SHIFT));
637 W_REG(&di->d64rxregs->addrlow,
638 (PHYSADDRLO(pa) + di->ddoffsetlow));
639 W_REG(&di->d64rxregs->addrhigh,
641 SET_REG(&di->d64rxregs->control,
642 D64_RC_AE, (ae << D64_RC_AE_SHIFT));
647 static void _dma_fifoloopbackenable(dma_info_t *di)
649 DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
651 OR_REG(&di->d64txregs->control, D64_XC_LE);
654 static void _dma_rxinit(dma_info_t *di)
656 DMA_TRACE(("%s: dma_rxinit\n", di->name));
661 di->rxin = di->rxout = 0;
663 /* clear rx descriptor ring */
664 memset((void *)di->rxd64, '\0',
665 (di->nrxd * sizeof(dma64dd_t)));
667 /* DMA engine with out alignment requirement requires table to be inited
668 * before enabling the engine
670 if (!di->aligndesc_4k)
671 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
675 if (di->aligndesc_4k)
676 _dma_ddtable_init(di, DMA_RX, di->rxdpa);
679 static void _dma_rxenable(dma_info_t *di)
681 uint dmactrlflags = di->hnddma.dmactrlflags;
684 DMA_TRACE(("%s: dma_rxenable\n", di->name));
687 (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
690 if ((dmactrlflags & DMA_CTRL_PEN) == 0)
691 control |= D64_RC_PD;
693 if (dmactrlflags & DMA_CTRL_ROC)
694 control |= D64_RC_OC;
696 W_REG(&di->d64rxregs->control,
697 ((di->rxoffset << D64_RC_RO_SHIFT) | control));
701 _dma_rx_param_get(dma_info_t *di, u16 *rxoffset, u16 *rxbufsize)
703 /* the normal values fit into 16 bits */
704 *rxoffset = (u16) di->rxoffset;
705 *rxbufsize = (u16) di->rxbufsize;
708 /* !! rx entry routine
709 * returns a pointer to the next frame received, or NULL if there are no more
710 * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported
712 * otherwise, it's treated as giant pkt and will be tossed.
713 * The DMA scattering starts with normal DMA header, followed by first buffer data.
714 * After it reaches the max size of buffer, the data continues in next DMA descriptor
715 * buffer WITHOUT DMA header
717 static void *_dma_rx(dma_info_t *di)
719 struct sk_buff *p, *head, *tail;
725 head = _dma_getnextrxp(di, false);
729 len = le16_to_cpu(*(u16 *) (head->data));
730 DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
731 dma_spin_for_len(len, head);
733 /* set actual length */
734 pkt_len = min((di->rxoffset + len), di->rxbufsize);
735 __skb_trim(head, pkt_len);
736 resid = len - (di->rxbufsize - di->rxoffset);
738 /* check for single or multi-buffer rx */
741 while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
743 pkt_len = min(resid, (int)di->rxbufsize);
744 __skb_trim(p, pkt_len);
747 resid -= di->rxbufsize;
754 B2I(((R_REG(&di->d64rxregs->status0) &
756 di->rcvptrbase) & D64_RS0_CD_MASK,
758 DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n",
759 di->rxin, di->rxout, cur));
763 if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
764 DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
766 bcm_pkt_buf_free_skb(head);
767 di->hnddma.rxgiants++;
775 /* post receive buffers
776 * return false is refill failed completely and ring is empty
777 * this will stall the rx dma and user might want to call rxfill again asap
778 * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle
780 static bool _dma_rxfill(dma_info_t *di)
788 uint extra_offset = 0;
794 * Determine how many receive buffers we're lacking
795 * from the full complement, allocate, initialize,
796 * and post them, then update the chip rx lastdscr.
802 n = di->nrxpost - NRXDACTIVE(rxin, rxout);
804 DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
806 if (di->rxbufsize > BCMEXTRAHDROOM)
807 extra_offset = di->rxextrahdrroom;
809 for (i = 0; i < n; i++) {
810 /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
814 p = bcm_pkt_buf_get_skb(di->rxbufsize + extra_offset);
817 DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
819 if (i == 0 && dma64_rxidle(di)) {
820 DMA_ERROR(("%s: rxfill64: ring is empty !\n",
824 di->hnddma.rxnobuf++;
827 /* reserve an extra headroom, if applicable */
829 skb_pull(p, extra_offset);
831 /* Do a cached write instead of uncached write since DMA_MAP
832 * will flush the cache.
834 *(u32 *) (p->data) = 0;
837 memset(&di->rxp_dmah[rxout], 0,
838 sizeof(hnddma_seg_map_t));
840 pa = pci_map_single(di->pbus, p->data,
841 di->rxbufsize, PCI_DMA_FROMDEVICE);
843 /* save the free packet pointer */
846 /* reset flags for each descriptor */
848 if (rxout == (di->nrxd - 1))
849 flags = D64_CTRL1_EOT;
851 dma64_dd_upd(di, di->rxd64, pa, rxout, &flags,
853 rxout = NEXTRXD(rxout);
858 /* update the chip lastdscr pointer */
859 W_REG(&di->d64rxregs->ptr,
860 di->rcvptrbase + I2B(rxout, dma64dd_t));
865 /* like getnexttxp but no reclaim */
866 static void *_dma_peeknexttxp(dma_info_t *di)
874 B2I(((R_REG(&di->d64txregs->status0) &
875 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
878 for (i = di->txin; i != end; i = NEXTTXD(i))
885 /* like getnextrxp but not take off the ring */
886 static void *_dma_peeknextrxp(dma_info_t *di)
894 B2I(((R_REG(&di->d64rxregs->status0) &
895 D64_RS0_CD_MASK) - di->rcvptrbase) & D64_RS0_CD_MASK,
898 for (i = di->rxin; i != end; i = NEXTRXD(i))
905 static void _dma_rxreclaim(dma_info_t *di)
909 DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
911 while ((p = _dma_getnextrxp(di, true)))
912 bcm_pkt_buf_free_skb(p);
915 static void *_dma_getnextrxp(dma_info_t *di, bool forceall)
920 return dma64_getnextrxp(di, forceall);
923 static void _dma_txblock(dma_info_t *di)
925 di->hnddma.txavail = 0;
928 static void _dma_txunblock(dma_info_t *di)
930 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
933 static uint _dma_txactive(dma_info_t *di)
935 return NTXDACTIVE(di->txin, di->txout);
938 static uint _dma_txpending(dma_info_t *di)
943 B2I(((R_REG(&di->d64txregs->status0) &
944 D64_XS0_CD_MASK) - di->xmtptrbase) & D64_XS0_CD_MASK,
947 return NTXDACTIVE(curr, di->txout);
950 static uint _dma_txcommitted(dma_info_t *di)
953 uint txin = di->txin;
955 if (txin == di->txout)
958 ptr = B2I(R_REG(&di->d64txregs->ptr), dma64dd_t);
960 return NTXDACTIVE(di->txin, ptr);
963 static uint _dma_rxactive(dma_info_t *di)
965 return NRXDACTIVE(di->rxin, di->rxout);
968 static void _dma_counterreset(dma_info_t *di)
970 /* reset all software counter */
971 di->hnddma.rxgiants = 0;
972 di->hnddma.rxnobuf = 0;
973 di->hnddma.txnobuf = 0;
976 static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags)
978 uint dmactrlflags = di->hnddma.dmactrlflags;
981 DMA_ERROR(("%s: _dma_ctrlflags: NULL dma handle\n", di->name));
985 dmactrlflags &= ~mask;
986 dmactrlflags |= flags;
988 /* If trying to enable parity, check if parity is actually supported */
989 if (dmactrlflags & DMA_CTRL_PEN) {
992 control = R_REG(&di->d64txregs->control);
993 W_REG(&di->d64txregs->control,
994 control | D64_XC_PD);
995 if (R_REG(&di->d64txregs->control) & D64_XC_PD) {
996 /* We *can* disable it so it is supported,
997 * restore control register
999 W_REG(&di->d64txregs->control,
1002 /* Not supported, don't allow it to be enabled */
1003 dmactrlflags &= ~DMA_CTRL_PEN;
1007 di->hnddma.dmactrlflags = dmactrlflags;
1009 return dmactrlflags;
1012 /* get the address of the var in order to change later */
1013 static unsigned long _dma_getvar(dma_info_t *di, const char *name)
1015 if (!strcmp(name, "&txavail"))
1016 return (unsigned long)&(di->hnddma.txavail);
1021 u8 dma_align_sizetobits(uint size)
1024 while (size >>= 1) {
1030 /* This function ensures that the DMA descriptor ring will not get allocated
1031 * across Page boundary. If the allocation is done across the page boundary
1032 * at the first time, then it is freed and the allocation is done at
1033 * descriptor ring size aligned location. This will ensure that the ring will
1034 * not cross page boundary
1036 static void *dma_ringalloc(dma_info_t *di, u32 boundary, uint size,
1037 u16 *alignbits, uint *alloced,
1042 u32 alignbytes = 1 << *alignbits;
1044 va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
1049 desc_strtaddr = (u32) roundup((unsigned long)va, alignbytes);
1050 if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
1052 *alignbits = dma_align_sizetobits(size);
1053 pci_free_consistent(di->pbus, size, va, *descpa);
1054 va = dma_alloc_consistent(di->pbus, size, *alignbits,
1060 /* 64-bit DMA functions */
1062 static void dma64_txinit(dma_info_t *di)
1064 u32 control = D64_XC_XE;
1066 DMA_TRACE(("%s: dma_txinit\n", di->name));
1071 di->txin = di->txout = 0;
1072 di->hnddma.txavail = di->ntxd - 1;
1074 /* clear tx descriptor ring */
1075 memset((void *)di->txd64, '\0', (di->ntxd * sizeof(dma64dd_t)));
1077 /* DMA engine with out alignment requirement requires table to be inited
1078 * before enabling the engine
1080 if (!di->aligndesc_4k)
1081 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1083 if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0)
1084 control |= D64_XC_PD;
1085 OR_REG(&di->d64txregs->control, control);
1087 /* DMA engine with alignment requirement requires table to be inited
1088 * before enabling the engine
1090 if (di->aligndesc_4k)
1091 _dma_ddtable_init(di, DMA_TX, di->txdpa);
1094 static bool dma64_txenabled(dma_info_t *di)
1098 /* If the chip is dead, it is not enabled :-) */
1099 xc = R_REG(&di->d64txregs->control);
1100 return (xc != 0xffffffff) && (xc & D64_XC_XE);
1103 static void dma64_txsuspend(dma_info_t *di)
1105 DMA_TRACE(("%s: dma_txsuspend\n", di->name));
1110 OR_REG(&di->d64txregs->control, D64_XC_SE);
1113 static void dma64_txresume(dma_info_t *di)
1115 DMA_TRACE(("%s: dma_txresume\n", di->name));
1120 AND_REG(&di->d64txregs->control, ~D64_XC_SE);
1123 static bool dma64_txsuspended(dma_info_t *di)
1125 return (di->ntxd == 0) ||
1126 ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
1130 static void dma64_txreclaim(dma_info_t *di, txd_range_t range)
1134 DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
1135 (range == HNDDMA_RANGE_ALL) ? "all" :
1137 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1140 if (di->txin == di->txout)
1143 while ((p = dma64_getnexttxp(di, range))) {
1144 /* For unframed data, we don't have any packets to free */
1145 if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED))
1146 bcm_pkt_buf_free_skb(p);
1150 static bool dma64_txstopped(dma_info_t *di)
1152 return ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1153 D64_XS0_XS_STOPPED);
1156 static bool dma64_rxstopped(dma_info_t *di)
1158 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK) ==
1159 D64_RS0_RS_STOPPED);
1162 static bool dma64_alloc(dma_info_t *di, uint direction)
1171 ddlen = sizeof(dma64dd_t);
1173 size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
1174 align_bits = di->dmadesc_align;
1175 align = (1 << align_bits);
1177 if (direction == DMA_TX) {
1178 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
1179 &alloced, &di->txdpaorig);
1181 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", di->name));
1184 align = (1 << align_bits);
1185 di->txd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1186 di->txdalign = (uint) ((s8 *)di->txd64 - (s8 *) va);
1187 PHYSADDRLOSET(di->txdpa,
1188 PHYSADDRLO(di->txdpaorig) + di->txdalign);
1189 PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig));
1190 di->txdalloc = alloced;
1192 va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
1193 &alloced, &di->rxdpaorig);
1195 DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", di->name));
1198 align = (1 << align_bits);
1199 di->rxd64 = (dma64dd_t *) roundup((unsigned long)va, align);
1200 di->rxdalign = (uint) ((s8 *)di->rxd64 - (s8 *) va);
1201 PHYSADDRLOSET(di->rxdpa,
1202 PHYSADDRLO(di->rxdpaorig) + di->rxdalign);
1203 PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig));
1204 di->rxdalloc = alloced;
1210 static bool dma64_txreset(dma_info_t *di)
1217 /* suspend tx DMA first */
1218 W_REG(&di->d64txregs->control, D64_XC_SE);
1220 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1221 != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
1222 && (status != D64_XS0_XS_STOPPED), 10000);
1224 W_REG(&di->d64txregs->control, 0);
1226 (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
1227 != D64_XS0_XS_DISABLED), 10000);
1229 /* wait for the last transaction to complete */
1232 return status == D64_XS0_XS_DISABLED;
1235 static bool dma64_rxidle(dma_info_t *di)
1237 DMA_TRACE(("%s: dma_rxidle\n", di->name));
1242 return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
1243 (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
1246 static bool dma64_rxreset(dma_info_t *di)
1253 W_REG(&di->d64rxregs->control, 0);
1255 (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
1256 != D64_RS0_RS_DISABLED), 10000);
1258 return status == D64_RS0_RS_DISABLED;
1261 static bool dma64_rxenabled(dma_info_t *di)
1265 rc = R_REG(&di->d64rxregs->control);
1266 return (rc != 0xffffffff) && (rc & D64_RC_RE);
1269 static bool dma64_txsuspendedidle(dma_info_t *di)
1275 if (!(R_REG(&di->d64txregs->control) & D64_XC_SE))
1278 if ((R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK) ==
1285 /* Useful when sending unframed data. This allows us to get a progress report from the DMA.
1286 * We return a pointer to the beginning of the DATA buffer of the current descriptor.
1287 * If DMA is idle, we return NULL.
1289 static void *dma64_getpos(dma_info_t *di, bool direction)
1295 if (direction == DMA_TX) {
1297 R_REG(&di->d64txregs->status0) & D64_XS0_CD_MASK;
1298 idle = !NTXDACTIVE(di->txin, di->txout);
1299 va = di->txp[B2I(cd_offset, dma64dd_t)];
1302 R_REG(&di->d64rxregs->status0) & D64_XS0_CD_MASK;
1303 idle = !NRXDACTIVE(di->rxin, di->rxout);
1304 va = di->rxp[B2I(cd_offset, dma64dd_t)];
1307 /* If DMA is IDLE, return NULL */
1309 DMA_TRACE(("%s: DMA idle, return NULL\n", __func__));
1316 /* TX of unframed data
1318 * Adds a DMA ring descriptor for the data pointed to by "buf".
1319 * This is for DMA of a buffer of data and is unlike other hnddma TX functions
1320 * that take a pointer to a "packet"
1321 * Each call to this is results in a single descriptor being added for "len" bytes of
1322 * data starting at "buf", it doesn't handle chained buffers.
1324 static int dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit)
1328 dmaaddr_t pa; /* phys addr */
1332 /* return nonzero if out of tx descriptors */
1333 if (NEXTTXD(txout) == di->txin)
1339 pa = pci_map_single(di->pbus, buf, len, PCI_DMA_TODEVICE);
1341 flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF);
1343 if (txout == (di->ntxd - 1))
1344 flags |= D64_CTRL1_EOT;
1346 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1348 /* save the buffer pointer - used by dma_getpos */
1349 di->txp[txout] = buf;
1351 txout = NEXTTXD(txout);
1352 /* bump the tx descriptor index */
1357 W_REG(&di->d64txregs->ptr,
1358 di->xmtptrbase + I2B(txout, dma64dd_t));
1361 /* tx flow control */
1362 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1367 DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __func__));
1368 di->hnddma.txavail = 0;
1369 di->hnddma.txnobuf++;
1373 /* !! tx entry routine
1374 * WARNING: call must check the return value for error.
1375 * the error(toss frames) could be fatal and cause many subsequent hard to debug problems
1377 static int dma64_txfast(dma_info_t *di, struct sk_buff *p0,
1380 struct sk_buff *p, *next;
1381 unsigned char *data;
1387 DMA_TRACE(("%s: dma_txfast\n", di->name));
1392 * Walk the chain of packet buffers
1393 * allocating and initializing transmit descriptor entries.
1395 for (p = p0; p; p = next) {
1397 hnddma_seg_map_t *map;
1403 /* return nonzero if out of tx descriptors */
1404 if (NEXTTXD(txout) == di->txin)
1410 /* get physical address of buffer start */
1412 memset(&di->txp_dmah[txout], 0,
1413 sizeof(hnddma_seg_map_t));
1415 pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
1417 if (DMASGLIST_ENAB) {
1418 map = &di->txp_dmah[txout];
1420 /* See if all the segments can be accounted for */
1422 (uint) (di->ntxd - NTXDACTIVE(di->txin, di->txout) -
1430 for (j = 1; j <= nsegs; j++) {
1432 if (p == p0 && j == 1)
1433 flags |= D64_CTRL1_SOF;
1435 /* With a DMA segment list, Descriptor table is filled
1436 * using the segment list instead of looping over
1437 * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when
1438 * end of segment list is reached.
1440 if ((!DMASGLIST_ENAB && next == NULL) ||
1441 (DMASGLIST_ENAB && j == nsegs))
1442 flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
1443 if (txout == (di->ntxd - 1))
1444 flags |= D64_CTRL1_EOT;
1446 if (DMASGLIST_ENAB) {
1447 len = map->segs[j - 1].length;
1448 pa = map->segs[j - 1].addr;
1450 dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
1452 txout = NEXTTXD(txout);
1455 /* See above. No need to loop over individual buffers */
1460 /* if last txd eof not set, fix it */
1461 if (!(flags & D64_CTRL1_EOF))
1462 W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
1463 BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
1465 /* save the packet */
1466 di->txp[PREVTXD(txout)] = p0;
1468 /* bump the tx descriptor index */
1473 W_REG(&di->d64txregs->ptr,
1474 di->xmtptrbase + I2B(txout, dma64dd_t));
1476 /* tx flow control */
1477 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1482 DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
1483 bcm_pkt_buf_free_skb(p0);
1484 di->hnddma.txavail = 0;
1485 di->hnddma.txnobuf++;
1490 * Reclaim next completed txd (txds if using chained buffers) in the range
1491 * specified and return associated packet.
1492 * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be
1493 * transmitted as noted by the hardware "CurrDescr" pointer.
1494 * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be
1495 * transferred by the DMA as noted by the hardware "ActiveDescr" pointer.
1496 * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and
1497 * return associated packet regardless of the value of hardware pointers.
1499 static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range)
1505 DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
1506 (range == HNDDMA_RANGE_ALL) ? "all" :
1508 HNDDMA_RANGE_TRANSMITTED) ? "transmitted" :
1517 if (range == HNDDMA_RANGE_ALL)
1520 dma64regs_t *dregs = di->d64txregs;
1524 (((R_REG(&dregs->status0) &
1526 di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t));
1528 if (range == HNDDMA_RANGE_TRANSFERED) {
1530 (u16) (R_REG(&dregs->status1) &
1533 (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
1534 active_desc = B2I(active_desc, dma64dd_t);
1535 if (end != active_desc)
1536 end = PREVTXD(active_desc);
1540 if ((start == 0) && (end > di->txout))
1543 for (i = start; i != end && !txp; i = NEXTTXD(i)) {
1545 hnddma_seg_map_t *map = NULL;
1546 uint size, j, nsegs;
1549 (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) -
1550 di->dataoffsetlow));
1552 (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) -
1553 di->dataoffsethigh));
1555 if (DMASGLIST_ENAB) {
1556 map = &di->txp_dmah[i];
1557 size = map->origsize;
1561 (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) &
1566 for (j = nsegs; j > 0; j--) {
1567 W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
1568 W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
1576 pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
1581 /* tx flow control */
1582 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1587 DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", start, end, di->txout, forceall));
1591 static void *dma64_getnextrxp(dma_info_t *di, bool forceall)
1599 /* return if no packets posted */
1604 B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
1605 di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t);
1607 /* ignore curr if forceall */
1608 if (!forceall && (i == curr))
1611 /* get the packet pointer that corresponds to the rx descriptor */
1616 (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) -
1617 di->dataoffsetlow));
1619 (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) -
1620 di->dataoffsethigh));
1622 /* clear this packet from the descriptor ring */
1623 pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
1625 W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
1626 W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
1628 di->rxin = NEXTRXD(i);
1633 static bool _dma64_addrext(dma64regs_t *dma64regs)
1636 OR_REG(&dma64regs->control, D64_XC_AE);
1637 w = R_REG(&dma64regs->control);
1638 AND_REG(&dma64regs->control, ~D64_XC_AE);
1639 return (w & D64_XC_AE) == D64_XC_AE;
1643 * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
1645 static void dma64_txrotate(dma_info_t *di)
1654 nactive = _dma_txactive(di);
1656 ((((R_REG(&di->d64txregs->status1) &
1658 - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t));
1659 rot = TXD(ad - di->txin);
1661 /* full-ring case is a lot harder - don't worry about this */
1662 if (rot >= (di->ntxd - nactive)) {
1663 DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
1668 last = PREVTXD(di->txout);
1670 /* move entries starting at last and moving backwards to first */
1671 for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
1672 new = TXD(old + rot);
1675 * Move the tx dma descriptor.
1676 * EOT is set only in the last entry in the ring.
1678 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
1679 if (new == (di->ntxd - 1))
1681 W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
1683 w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
1684 W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
1686 W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
1687 W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
1689 /* zap the old tx dma descriptor address field */
1690 W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
1691 W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
1693 /* move the corresponding txp[] entry */
1694 di->txp[new] = di->txp[old];
1697 if (DMASGLIST_ENAB) {
1698 memcpy(&di->txp_dmah[new], &di->txp_dmah[old],
1699 sizeof(hnddma_seg_map_t));
1700 memset(&di->txp_dmah[old], 0, sizeof(hnddma_seg_map_t));
1703 di->txp[old] = NULL;
1706 /* update txin and txout */
1708 di->txout = TXD(di->txout + rot);
1709 di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
1712 W_REG(&di->d64txregs->ptr,
1713 di->xmtptrbase + I2B(di->txout, dma64dd_t));
1716 uint dma_addrwidth(si_t *sih, void *dmaregs)
1718 /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */
1719 /* DMA engine is 64-bit capable */
1720 if ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) {
1721 /* backplane are 64-bit capable */
1722 if (ai_backplane64(sih))
1723 /* If bus is System Backplane or PCIE then we can access 64-bits */
1724 if ((sih->bustype == SI_BUS) ||
1725 ((sih->bustype == PCI_BUS) &&
1726 (sih->buscoretype == PCIE_CORE_ID)))
1727 return DMADDRWIDTH_64;
1729 /* DMA hardware not supported by this driver*/
1730 return DMADDRWIDTH_64;
1734 * Mac80211 initiated actions sometimes require packets in the DMA queue to be
1735 * modified. The modified portion of the packet is not under control of the DMA
1736 * engine. This function calls a caller-supplied function for each packet in
1737 * the caller specified dma chain.
1739 void dma_walk_packets(struct hnddma_pub *dmah, void (*callback_fnc)
1740 (void *pkt, void *arg_a), void *arg_a)
1742 dma_info_t *di = (dma_info_t *) dmah;
1744 uint end = di->txout;
1745 struct sk_buff *skb;
1746 struct ieee80211_tx_info *tx_info;
1749 skb = (struct sk_buff *)di->txp[i];
1751 tx_info = (struct ieee80211_tx_info *)skb->cb;
1752 (callback_fnc)(tx_info, arg_a);