]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | ** IA64 System Bus Adapter (SBA) I/O MMU manager | |
3 | ** | |
5f6602a1 | 4 | ** (c) Copyright 2002-2005 Alex Williamson |
1da177e4 | 5 | ** (c) Copyright 2002-2003 Grant Grundler |
5f6602a1 | 6 | ** (c) Copyright 2002-2005 Hewlett-Packard Company |
1da177e4 LT |
7 | ** |
8 | ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code) | |
9 | ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code) | |
10 | ** | |
11 | ** This program is free software; you can redistribute it and/or modify | |
12 | ** it under the terms of the GNU General Public License as published by | |
13 | ** the Free Software Foundation; either version 2 of the License, or | |
14 | ** (at your option) any later version. | |
15 | ** | |
16 | ** | |
17 | ** This module initializes the IOC (I/O Controller) found on HP | |
18 | ** McKinley machines and their successors. | |
19 | ** | |
20 | */ | |
21 | ||
1da177e4 LT |
22 | #include <linux/types.h> |
23 | #include <linux/kernel.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/spinlock.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/string.h> | |
30 | #include <linux/pci.h> | |
31 | #include <linux/proc_fs.h> | |
32 | #include <linux/seq_file.h> | |
33 | #include <linux/acpi.h> | |
34 | #include <linux/efi.h> | |
35 | #include <linux/nodemask.h> | |
36 | #include <linux/bitops.h> /* hweight64() */ | |
51b58e3e | 37 | #include <linux/crash_dump.h> |
1da177e4 LT |
38 | |
39 | #include <asm/delay.h> /* ia64_get_itc() */ | |
40 | #include <asm/io.h> | |
41 | #include <asm/page.h> /* PAGE_OFFSET */ | |
42 | #include <asm/dma.h> | |
43 | #include <asm/system.h> /* wmb() */ | |
44 | ||
45 | #include <asm/acpi-ext.h> | |
46 | ||
51b58e3e TL |
47 | extern int swiotlb_late_init_with_default_size (size_t size); |
48 | ||
1da177e4 LT |
49 | #define PFX "IOC: " |
50 | ||
51 | /* | |
52 | ** Enabling timing search of the pdir resource map. Output in /proc. | |
53 | ** Disabled by default to optimize performance. | |
54 | */ | |
55 | #undef PDIR_SEARCH_TIMING | |
56 | ||
57 | /* | |
58 | ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If | |
59 | ** not defined, all DMA will be 32bit and go through the TLB. | |
60 | ** There's potentially a conflict in the bio merge code with us | |
61 | ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing | |
62 | ** appears to give more performance than bio-level virtual merging, we'll | |
63 | ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to | |
64 | ** completely restrict DMA to the IOMMU. | |
65 | */ | |
66 | #define ALLOW_IOV_BYPASS | |
67 | ||
68 | /* | |
69 | ** This option specifically allows/disallows bypassing scatterlists with | |
70 | ** multiple entries. Coalescing these entries can allow better DMA streaming | |
71 | ** and in some cases shows better performance than entirely bypassing the | |
72 | ** IOMMU. Performance increase on the order of 1-2% sequential output/input | |
73 | ** using bonnie++ on a RAID0 MD device (sym2 & mpt). | |
74 | */ | |
75 | #undef ALLOW_IOV_BYPASS_SG | |
76 | ||
77 | /* | |
78 | ** If a device prefetches beyond the end of a valid pdir entry, it will cause | |
79 | ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should | |
80 | ** disconnect on 4k boundaries and prevent such issues. If the device is | |
0779bf2d | 81 | ** particularly aggressive, this option will keep the entire pdir valid such |
1da177e4 LT |
82 | ** that prefetching will hit a valid address. This could severely impact |
83 | ** error containment, and is therefore off by default. The page that is | |
84 | ** used for spill-over is poisoned, so that should help debugging somewhat. | |
85 | */ | |
86 | #undef FULL_VALID_PDIR | |
87 | ||
88 | #define ENABLE_MARK_CLEAN | |
89 | ||
90 | /* | |
91 | ** The number of debug flags is a clue - this code is fragile. NOTE: since | |
92 | ** tightening the use of res_lock the resource bitmap and actual pdir are no | |
93 | ** longer guaranteed to stay in sync. The sanity checking code isn't going to | |
94 | ** like that. | |
95 | */ | |
96 | #undef DEBUG_SBA_INIT | |
97 | #undef DEBUG_SBA_RUN | |
98 | #undef DEBUG_SBA_RUN_SG | |
99 | #undef DEBUG_SBA_RESOURCE | |
100 | #undef ASSERT_PDIR_SANITY | |
101 | #undef DEBUG_LARGE_SG_ENTRIES | |
102 | #undef DEBUG_BYPASS | |
103 | ||
104 | #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY) | |
105 | #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive | |
106 | #endif | |
107 | ||
108 | #define SBA_INLINE __inline__ | |
109 | /* #define SBA_INLINE */ | |
110 | ||
111 | #ifdef DEBUG_SBA_INIT | |
112 | #define DBG_INIT(x...) printk(x) | |
113 | #else | |
114 | #define DBG_INIT(x...) | |
115 | #endif | |
116 | ||
117 | #ifdef DEBUG_SBA_RUN | |
118 | #define DBG_RUN(x...) printk(x) | |
119 | #else | |
120 | #define DBG_RUN(x...) | |
121 | #endif | |
122 | ||
123 | #ifdef DEBUG_SBA_RUN_SG | |
124 | #define DBG_RUN_SG(x...) printk(x) | |
125 | #else | |
126 | #define DBG_RUN_SG(x...) | |
127 | #endif | |
128 | ||
129 | ||
130 | #ifdef DEBUG_SBA_RESOURCE | |
131 | #define DBG_RES(x...) printk(x) | |
132 | #else | |
133 | #define DBG_RES(x...) | |
134 | #endif | |
135 | ||
136 | #ifdef DEBUG_BYPASS | |
137 | #define DBG_BYPASS(x...) printk(x) | |
138 | #else | |
139 | #define DBG_BYPASS(x...) | |
140 | #endif | |
141 | ||
142 | #ifdef ASSERT_PDIR_SANITY | |
143 | #define ASSERT(expr) \ | |
144 | if(!(expr)) { \ | |
145 | printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \ | |
146 | panic(#expr); \ | |
147 | } | |
148 | #else | |
149 | #define ASSERT(expr) | |
150 | #endif | |
151 | ||
152 | /* | |
153 | ** The number of pdir entries to "free" before issuing | |
154 | ** a read to PCOM register to flush out PCOM writes. | |
155 | ** Interacts with allocation granularity (ie 4 or 8 entries | |
156 | ** allocated and free'd/purged at a time might make this | |
157 | ** less interesting). | |
158 | */ | |
159 | #define DELAYED_RESOURCE_CNT 64 | |
160 | ||
e15da401 BH |
161 | #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec |
162 | ||
1da177e4 LT |
163 | #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP) |
164 | #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP) | |
165 | #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP) | |
166 | #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP) | |
e15da401 | 167 | #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP) |
1da177e4 LT |
168 | |
169 | #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */ | |
170 | ||
171 | #define IOC_FUNC_ID 0x000 | |
172 | #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */ | |
173 | #define IOC_IBASE 0x300 /* IO TLB */ | |
174 | #define IOC_IMASK 0x308 | |
175 | #define IOC_PCOM 0x310 | |
176 | #define IOC_TCNFG 0x318 | |
177 | #define IOC_PDIR_BASE 0x320 | |
178 | ||
179 | #define IOC_ROPE0_CFG 0x500 | |
180 | #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */ | |
181 | ||
182 | ||
183 | /* AGP GART driver looks for this */ | |
184 | #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL | |
185 | ||
186 | /* | |
187 | ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register) | |
188 | ** | |
189 | ** Some IOCs (sx1000) can run at the above pages sizes, but are | |
190 | ** really only supported using the IOC at a 4k page size. | |
191 | ** | |
192 | ** iovp_size could only be greater than PAGE_SIZE if we are | |
193 | ** confident the drivers really only touch the next physical | |
194 | ** page iff that driver instance owns it. | |
195 | */ | |
196 | static unsigned long iovp_size; | |
197 | static unsigned long iovp_shift; | |
198 | static unsigned long iovp_mask; | |
199 | ||
200 | struct ioc { | |
201 | void __iomem *ioc_hpa; /* I/O MMU base address */ | |
202 | char *res_map; /* resource map, bit == pdir entry */ | |
203 | u64 *pdir_base; /* physical base address */ | |
204 | unsigned long ibase; /* pdir IOV Space base */ | |
205 | unsigned long imask; /* pdir IOV Space mask */ | |
206 | ||
207 | unsigned long *res_hint; /* next avail IOVP - circular search */ | |
208 | unsigned long dma_mask; | |
209 | spinlock_t res_lock; /* protects the resource bitmap, but must be held when */ | |
210 | /* clearing pdir to prevent races with allocations. */ | |
211 | unsigned int res_bitshift; /* from the RIGHT! */ | |
212 | unsigned int res_size; /* size of resource map in bytes */ | |
213 | #ifdef CONFIG_NUMA | |
214 | unsigned int node; /* node where this IOC lives */ | |
215 | #endif | |
216 | #if DELAYED_RESOURCE_CNT > 0 | |
217 | spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */ | |
218 | /* than res_lock for bigger systems. */ | |
219 | int saved_cnt; | |
220 | struct sba_dma_pair { | |
221 | dma_addr_t iova; | |
222 | size_t size; | |
223 | } saved[DELAYED_RESOURCE_CNT]; | |
224 | #endif | |
225 | ||
226 | #ifdef PDIR_SEARCH_TIMING | |
227 | #define SBA_SEARCH_SAMPLE 0x100 | |
228 | unsigned long avg_search[SBA_SEARCH_SAMPLE]; | |
229 | unsigned long avg_idx; /* current index into avg_search */ | |
230 | #endif | |
231 | ||
232 | /* Stuff we don't need in performance path */ | |
233 | struct ioc *next; /* list of IOC's in system */ | |
234 | acpi_handle handle; /* for multiple IOC's */ | |
235 | const char *name; | |
236 | unsigned int func_id; | |
237 | unsigned int rev; /* HW revision of chip */ | |
238 | u32 iov_size; | |
239 | unsigned int pdir_size; /* in bytes, determined by IOV Space size */ | |
240 | struct pci_dev *sac_only_dev; | |
241 | }; | |
242 | ||
243 | static struct ioc *ioc_list; | |
244 | static int reserve_sba_gart = 1; | |
245 | ||
246 | static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t); | |
247 | static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t); | |
248 | ||
249 | #define sba_sg_address(sg) (page_address((sg)->page) + (sg)->offset) | |
250 | ||
251 | #ifdef FULL_VALID_PDIR | |
252 | static u64 prefetch_spill_page; | |
253 | #endif | |
254 | ||
255 | #ifdef CONFIG_PCI | |
256 | # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \ | |
257 | ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL) | |
258 | #else | |
259 | # define GET_IOC(dev) NULL | |
260 | #endif | |
261 | ||
262 | /* | |
263 | ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up | |
0779bf2d | 264 | ** (or rather not merge) DMAs into manageable chunks. |
1da177e4 | 265 | ** On parisc, this is more of the software/tuning constraint |
0779bf2d ML |
266 | ** rather than the HW. I/O MMU allocation algorithms can be |
267 | ** faster with smaller sizes (to some degree). | |
1da177e4 LT |
268 | */ |
269 | #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size) | |
270 | ||
271 | #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) | |
272 | ||
273 | /************************************ | |
274 | ** SBA register read and write support | |
275 | ** | |
276 | ** BE WARNED: register writes are posted. | |
277 | ** (ie follow writes which must reach HW with a read) | |
278 | ** | |
279 | */ | |
280 | #define READ_REG(addr) __raw_readq(addr) | |
281 | #define WRITE_REG(val, addr) __raw_writeq(val, addr) | |
282 | ||
283 | #ifdef DEBUG_SBA_INIT | |
284 | ||
285 | /** | |
286 | * sba_dump_tlb - debugging only - print IOMMU operating parameters | |
287 | * @hpa: base address of the IOMMU | |
288 | * | |
289 | * Print the size/location of the IO MMU PDIR. | |
290 | */ | |
291 | static void | |
292 | sba_dump_tlb(char *hpa) | |
293 | { | |
294 | DBG_INIT("IO TLB at 0x%p\n", (void *)hpa); | |
295 | DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE)); | |
296 | DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK)); | |
297 | DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG)); | |
298 | DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE)); | |
299 | DBG_INIT("\n"); | |
300 | } | |
301 | #endif | |
302 | ||
303 | ||
304 | #ifdef ASSERT_PDIR_SANITY | |
305 | ||
306 | /** | |
307 | * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry | |
308 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
309 | * @msg: text to print ont the output line. | |
310 | * @pide: pdir index. | |
311 | * | |
312 | * Print one entry of the IO MMU PDIR in human readable form. | |
313 | */ | |
314 | static void | |
315 | sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide) | |
316 | { | |
317 | /* start printing from lowest pde in rval */ | |
318 | u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)]; | |
319 | unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)]; | |
320 | uint rcnt; | |
321 | ||
322 | printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n", | |
323 | msg, rptr, pide & (BITS_PER_LONG - 1), *rptr); | |
324 | ||
325 | rcnt = 0; | |
326 | while (rcnt < BITS_PER_LONG) { | |
327 | printk(KERN_DEBUG "%s %2d %p %016Lx\n", | |
328 | (rcnt == (pide & (BITS_PER_LONG - 1))) | |
329 | ? " -->" : " ", | |
330 | rcnt, ptr, (unsigned long long) *ptr ); | |
331 | rcnt++; | |
332 | ptr++; | |
333 | } | |
334 | printk(KERN_DEBUG "%s", msg); | |
335 | } | |
336 | ||
337 | ||
338 | /** | |
339 | * sba_check_pdir - debugging only - consistency checker | |
340 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
341 | * @msg: text to print ont the output line. | |
342 | * | |
343 | * Verify the resource map and pdir state is consistent | |
344 | */ | |
345 | static int | |
346 | sba_check_pdir(struct ioc *ioc, char *msg) | |
347 | { | |
348 | u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]); | |
349 | u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */ | |
350 | u64 *pptr = ioc->pdir_base; /* pdir ptr */ | |
351 | uint pide = 0; | |
352 | ||
353 | while (rptr < rptr_end) { | |
354 | u64 rval; | |
355 | int rcnt; /* number of bits we might check */ | |
356 | ||
357 | rval = *rptr; | |
358 | rcnt = 64; | |
359 | ||
360 | while (rcnt) { | |
361 | /* Get last byte and highest bit from that */ | |
362 | u32 pde = ((u32)((*pptr >> (63)) & 0x1)); | |
363 | if ((rval & 0x1) ^ pde) | |
364 | { | |
365 | /* | |
366 | ** BUMMER! -- res_map != pdir -- | |
367 | ** Dump rval and matching pdir entries | |
368 | */ | |
369 | sba_dump_pdir_entry(ioc, msg, pide); | |
370 | return(1); | |
371 | } | |
372 | rcnt--; | |
373 | rval >>= 1; /* try the next bit */ | |
374 | pptr++; | |
375 | pide++; | |
376 | } | |
377 | rptr++; /* look at next word of res_map */ | |
378 | } | |
379 | /* It'd be nice if we always got here :^) */ | |
380 | return 0; | |
381 | } | |
382 | ||
383 | ||
384 | /** | |
385 | * sba_dump_sg - debugging only - print Scatter-Gather list | |
386 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
387 | * @startsg: head of the SG list | |
388 | * @nents: number of entries in SG list | |
389 | * | |
390 | * print the SG list so we can verify it's correct by hand. | |
391 | */ | |
392 | static void | |
393 | sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |
394 | { | |
395 | while (nents-- > 0) { | |
396 | printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents, | |
397 | startsg->dma_address, startsg->dma_length, | |
398 | sba_sg_address(startsg)); | |
9b6eccfc | 399 | startsg = sg_next(startsg); |
1da177e4 LT |
400 | } |
401 | } | |
402 | ||
403 | static void | |
404 | sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents) | |
405 | { | |
406 | struct scatterlist *the_sg = startsg; | |
407 | int the_nents = nents; | |
408 | ||
409 | while (the_nents-- > 0) { | |
410 | if (sba_sg_address(the_sg) == 0x0UL) | |
411 | sba_dump_sg(NULL, startsg, nents); | |
9b6eccfc | 412 | the_sg = sg_next(the_sg); |
1da177e4 LT |
413 | } |
414 | } | |
415 | ||
416 | #endif /* ASSERT_PDIR_SANITY */ | |
417 | ||
418 | ||
419 | ||
420 | ||
421 | /************************************************************** | |
422 | * | |
423 | * I/O Pdir Resource Management | |
424 | * | |
425 | * Bits set in the resource map are in use. | |
426 | * Each bit can represent a number of pages. | |
427 | * LSbs represent lower addresses (IOVA's). | |
428 | * | |
429 | ***************************************************************/ | |
430 | #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */ | |
431 | ||
432 | /* Convert from IOVP to IOVA and vice versa. */ | |
433 | #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset)) | |
434 | #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase)) | |
435 | ||
436 | #define PDIR_ENTRY_SIZE sizeof(u64) | |
437 | ||
438 | #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift) | |
439 | ||
440 | #define RESMAP_MASK(n) ~(~0UL << (n)) | |
441 | #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1) | |
442 | ||
443 | ||
444 | /** | |
445 | * For most cases the normal get_order is sufficient, however it limits us | |
446 | * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity. | |
447 | * It only incurs about 1 clock cycle to use this one with the static variable | |
448 | * and makes the code more intuitive. | |
449 | */ | |
450 | static SBA_INLINE int | |
451 | get_iovp_order (unsigned long size) | |
452 | { | |
453 | long double d = size - 1; | |
454 | long order; | |
455 | ||
456 | order = ia64_getf_exp(d); | |
457 | order = order - iovp_shift - 0xffff + 1; | |
458 | if (order < 0) | |
459 | order = 0; | |
460 | return order; | |
461 | } | |
462 | ||
463 | /** | |
464 | * sba_search_bitmap - find free space in IO PDIR resource bitmap | |
465 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
466 | * @bits_wanted: number of entries we need. | |
5f6602a1 | 467 | * @use_hint: use res_hint to indicate where to start looking |
1da177e4 LT |
468 | * |
469 | * Find consecutive free bits in resource bitmap. | |
470 | * Each bit represents one entry in the IO Pdir. | |
471 | * Cool perf optimization: search for log2(size) bits at a time. | |
472 | */ | |
473 | static SBA_INLINE unsigned long | |
5f6602a1 | 474 | sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint) |
1da177e4 | 475 | { |
5f6602a1 | 476 | unsigned long *res_ptr; |
1da177e4 | 477 | unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]); |
5f6602a1 | 478 | unsigned long flags, pide = ~0UL; |
1da177e4 LT |
479 | |
480 | ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); | |
481 | ASSERT(res_ptr < res_end); | |
482 | ||
5f6602a1 AW |
483 | spin_lock_irqsave(&ioc->res_lock, flags); |
484 | ||
485 | /* Allow caller to force a search through the entire resource space */ | |
486 | if (likely(use_hint)) { | |
487 | res_ptr = ioc->res_hint; | |
488 | } else { | |
489 | res_ptr = (ulong *)ioc->res_map; | |
490 | ioc->res_bitshift = 0; | |
491 | } | |
492 | ||
1da177e4 LT |
493 | /* |
494 | * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts | |
495 | * if a TLB entry is purged while in use. sba_mark_invalid() | |
496 | * purges IOTLB entries in power-of-two sizes, so we also | |
497 | * allocate IOVA space in power-of-two sizes. | |
498 | */ | |
499 | bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift); | |
500 | ||
501 | if (likely(bits_wanted == 1)) { | |
502 | unsigned int bitshiftcnt; | |
503 | for(; res_ptr < res_end ; res_ptr++) { | |
504 | if (likely(*res_ptr != ~0UL)) { | |
505 | bitshiftcnt = ffz(*res_ptr); | |
506 | *res_ptr |= (1UL << bitshiftcnt); | |
507 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | |
508 | pide <<= 3; /* convert to bit address */ | |
509 | pide += bitshiftcnt; | |
510 | ioc->res_bitshift = bitshiftcnt + bits_wanted; | |
511 | goto found_it; | |
512 | } | |
513 | } | |
514 | goto not_found; | |
515 | ||
516 | } | |
517 | ||
518 | if (likely(bits_wanted <= BITS_PER_LONG/2)) { | |
519 | /* | |
520 | ** Search the resource bit map on well-aligned values. | |
521 | ** "o" is the alignment. | |
522 | ** We need the alignment to invalidate I/O TLB using | |
523 | ** SBA HW features in the unmap path. | |
524 | */ | |
525 | unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift); | |
526 | uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o); | |
527 | unsigned long mask, base_mask; | |
528 | ||
529 | base_mask = RESMAP_MASK(bits_wanted); | |
530 | mask = base_mask << bitshiftcnt; | |
531 | ||
532 | DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr); | |
533 | for(; res_ptr < res_end ; res_ptr++) | |
534 | { | |
535 | DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr); | |
536 | ASSERT(0 != mask); | |
537 | for (; mask ; mask <<= o, bitshiftcnt += o) { | |
538 | if(0 == ((*res_ptr) & mask)) { | |
539 | *res_ptr |= mask; /* mark resources busy! */ | |
540 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | |
541 | pide <<= 3; /* convert to bit address */ | |
542 | pide += bitshiftcnt; | |
543 | ioc->res_bitshift = bitshiftcnt + bits_wanted; | |
544 | goto found_it; | |
545 | } | |
546 | } | |
547 | ||
548 | bitshiftcnt = 0; | |
549 | mask = base_mask; | |
550 | ||
551 | } | |
552 | ||
553 | } else { | |
554 | int qwords, bits, i; | |
555 | unsigned long *end; | |
556 | ||
557 | qwords = bits_wanted >> 6; /* /64 */ | |
558 | bits = bits_wanted - (qwords * BITS_PER_LONG); | |
559 | ||
560 | end = res_end - qwords; | |
561 | ||
562 | for (; res_ptr < end; res_ptr++) { | |
563 | for (i = 0 ; i < qwords ; i++) { | |
564 | if (res_ptr[i] != 0) | |
565 | goto next_ptr; | |
566 | } | |
567 | if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits)) | |
568 | continue; | |
569 | ||
570 | /* Found it, mark it */ | |
571 | for (i = 0 ; i < qwords ; i++) | |
572 | res_ptr[i] = ~0UL; | |
573 | res_ptr[i] |= RESMAP_MASK(bits); | |
574 | ||
575 | pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map); | |
576 | pide <<= 3; /* convert to bit address */ | |
577 | res_ptr += qwords; | |
578 | ioc->res_bitshift = bits; | |
579 | goto found_it; | |
580 | next_ptr: | |
581 | ; | |
582 | } | |
583 | } | |
584 | ||
585 | not_found: | |
586 | prefetch(ioc->res_map); | |
587 | ioc->res_hint = (unsigned long *) ioc->res_map; | |
588 | ioc->res_bitshift = 0; | |
5f6602a1 | 589 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1da177e4 LT |
590 | return (pide); |
591 | ||
592 | found_it: | |
593 | ioc->res_hint = res_ptr; | |
5f6602a1 | 594 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1da177e4 LT |
595 | return (pide); |
596 | } | |
597 | ||
598 | ||
599 | /** | |
600 | * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap | |
601 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
602 | * @size: number of bytes to create a mapping for | |
603 | * | |
604 | * Given a size, find consecutive unmarked and then mark those bits in the | |
605 | * resource bit map. | |
606 | */ | |
607 | static int | |
608 | sba_alloc_range(struct ioc *ioc, size_t size) | |
609 | { | |
610 | unsigned int pages_needed = size >> iovp_shift; | |
611 | #ifdef PDIR_SEARCH_TIMING | |
612 | unsigned long itc_start; | |
613 | #endif | |
614 | unsigned long pide; | |
1da177e4 LT |
615 | |
616 | ASSERT(pages_needed); | |
617 | ASSERT(0 == (size & ~iovp_mask)); | |
618 | ||
1da177e4 LT |
619 | #ifdef PDIR_SEARCH_TIMING |
620 | itc_start = ia64_get_itc(); | |
621 | #endif | |
622 | /* | |
623 | ** "seek and ye shall find"...praying never hurts either... | |
624 | */ | |
5f6602a1 | 625 | pide = sba_search_bitmap(ioc, pages_needed, 1); |
1da177e4 | 626 | if (unlikely(pide >= (ioc->res_size << 3))) { |
5f6602a1 | 627 | pide = sba_search_bitmap(ioc, pages_needed, 0); |
1da177e4 LT |
628 | if (unlikely(pide >= (ioc->res_size << 3))) { |
629 | #if DELAYED_RESOURCE_CNT > 0 | |
5f6602a1 AW |
630 | unsigned long flags; |
631 | ||
1da177e4 LT |
632 | /* |
633 | ** With delayed resource freeing, we can give this one more shot. We're | |
634 | ** getting close to being in trouble here, so do what we can to make this | |
635 | ** one count. | |
636 | */ | |
5f6602a1 | 637 | spin_lock_irqsave(&ioc->saved_lock, flags); |
1da177e4 LT |
638 | if (ioc->saved_cnt > 0) { |
639 | struct sba_dma_pair *d; | |
640 | int cnt = ioc->saved_cnt; | |
641 | ||
5f6602a1 | 642 | d = &(ioc->saved[ioc->saved_cnt - 1]); |
1da177e4 | 643 | |
5f6602a1 | 644 | spin_lock(&ioc->res_lock); |
1da177e4 LT |
645 | while (cnt--) { |
646 | sba_mark_invalid(ioc, d->iova, d->size); | |
647 | sba_free_range(ioc, d->iova, d->size); | |
648 | d--; | |
649 | } | |
650 | ioc->saved_cnt = 0; | |
651 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | |
5f6602a1 | 652 | spin_unlock(&ioc->res_lock); |
1da177e4 | 653 | } |
5f6602a1 | 654 | spin_unlock_irqrestore(&ioc->saved_lock, flags); |
1da177e4 | 655 | |
5f6602a1 | 656 | pide = sba_search_bitmap(ioc, pages_needed, 0); |
1da177e4 LT |
657 | if (unlikely(pide >= (ioc->res_size << 3))) |
658 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | |
659 | ioc->ioc_hpa); | |
660 | #else | |
661 | panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", | |
662 | ioc->ioc_hpa); | |
663 | #endif | |
664 | } | |
665 | } | |
666 | ||
667 | #ifdef PDIR_SEARCH_TIMING | |
668 | ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed; | |
669 | ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1; | |
670 | #endif | |
671 | ||
672 | prefetchw(&(ioc->pdir_base[pide])); | |
673 | ||
674 | #ifdef ASSERT_PDIR_SANITY | |
675 | /* verify the first enable bit is clear */ | |
676 | if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) { | |
677 | sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide); | |
678 | } | |
679 | #endif | |
680 | ||
681 | DBG_RES("%s(%x) %d -> %lx hint %x/%x\n", | |
682 | __FUNCTION__, size, pages_needed, pide, | |
683 | (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map), | |
684 | ioc->res_bitshift ); | |
685 | ||
1da177e4 LT |
686 | return (pide); |
687 | } | |
688 | ||
689 | ||
690 | /** | |
691 | * sba_free_range - unmark bits in IO PDIR resource bitmap | |
692 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
693 | * @iova: IO virtual address which was previously allocated. | |
694 | * @size: number of bytes to create a mapping for | |
695 | * | |
696 | * clear bits in the ioc's resource map | |
697 | */ | |
698 | static SBA_INLINE void | |
699 | sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size) | |
700 | { | |
701 | unsigned long iovp = SBA_IOVP(ioc, iova); | |
702 | unsigned int pide = PDIR_INDEX(iovp); | |
703 | unsigned int ridx = pide >> 3; /* convert bit to byte address */ | |
704 | unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]); | |
705 | int bits_not_wanted = size >> iovp_shift; | |
706 | unsigned long m; | |
707 | ||
708 | /* Round up to power-of-two size: see AR2305 note above */ | |
709 | bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift); | |
710 | for (; bits_not_wanted > 0 ; res_ptr++) { | |
711 | ||
712 | if (unlikely(bits_not_wanted > BITS_PER_LONG)) { | |
713 | ||
714 | /* these mappings start 64bit aligned */ | |
715 | *res_ptr = 0UL; | |
716 | bits_not_wanted -= BITS_PER_LONG; | |
717 | pide += BITS_PER_LONG; | |
718 | ||
719 | } else { | |
720 | ||
721 | /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */ | |
722 | m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1)); | |
723 | bits_not_wanted = 0; | |
724 | ||
725 | DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size, | |
726 | bits_not_wanted, m, pide, res_ptr, *res_ptr); | |
727 | ||
728 | ASSERT(m != 0); | |
729 | ASSERT(bits_not_wanted); | |
730 | ASSERT((*res_ptr & m) == m); /* verify same bits are set */ | |
731 | *res_ptr &= ~m; | |
732 | } | |
733 | } | |
734 | } | |
735 | ||
736 | ||
737 | /************************************************************** | |
738 | * | |
739 | * "Dynamic DMA Mapping" support (aka "Coherent I/O") | |
740 | * | |
741 | ***************************************************************/ | |
742 | ||
743 | /** | |
744 | * sba_io_pdir_entry - fill in one IO PDIR entry | |
745 | * @pdir_ptr: pointer to IO PDIR entry | |
746 | * @vba: Virtual CPU address of buffer to map | |
747 | * | |
748 | * SBA Mapping Routine | |
749 | * | |
750 | * Given a virtual address (vba, arg1) sba_io_pdir_entry() | |
751 | * loads the I/O PDIR entry pointed to by pdir_ptr (arg0). | |
752 | * Each IO Pdir entry consists of 8 bytes as shown below | |
753 | * (LSB == bit 0): | |
754 | * | |
755 | * 63 40 11 7 0 | |
756 | * +-+---------------------+----------------------------------+----+--------+ | |
757 | * |V| U | PPN[39:12] | U | FF | | |
758 | * +-+---------------------+----------------------------------+----+--------+ | |
759 | * | |
760 | * V == Valid Bit | |
761 | * U == Unused | |
762 | * PPN == Physical Page Number | |
763 | * | |
764 | * The physical address fields are filled with the results of virt_to_phys() | |
765 | * on the vba. | |
766 | */ | |
767 | ||
768 | #if 1 | |
769 | #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \ | |
770 | | 0x8000000000000000ULL) | |
771 | #else | |
772 | void SBA_INLINE | |
773 | sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba) | |
774 | { | |
775 | *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL); | |
776 | } | |
777 | #endif | |
778 | ||
779 | #ifdef ENABLE_MARK_CLEAN | |
780 | /** | |
781 | * Since DMA is i-cache coherent, any (complete) pages that were written via | |
782 | * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to | |
783 | * flush them when they get mapped into an executable vm-area. | |
784 | */ | |
785 | static void | |
786 | mark_clean (void *addr, size_t size) | |
787 | { | |
788 | unsigned long pg_addr, end; | |
789 | ||
790 | pg_addr = PAGE_ALIGN((unsigned long) addr); | |
791 | end = (unsigned long) addr + size; | |
792 | while (pg_addr + PAGE_SIZE <= end) { | |
793 | struct page *page = virt_to_page((void *)pg_addr); | |
794 | set_bit(PG_arch_1, &page->flags); | |
795 | pg_addr += PAGE_SIZE; | |
796 | } | |
797 | } | |
798 | #endif | |
799 | ||
800 | /** | |
801 | * sba_mark_invalid - invalidate one or more IO PDIR entries | |
802 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
803 | * @iova: IO Virtual Address mapped earlier | |
804 | * @byte_cnt: number of bytes this mapping covers. | |
805 | * | |
806 | * Marking the IO PDIR entry(ies) as Invalid and invalidate | |
807 | * corresponding IO TLB entry. The PCOM (Purge Command Register) | |
808 | * is to purge stale entries in the IO TLB when unmapping entries. | |
809 | * | |
810 | * The PCOM register supports purging of multiple pages, with a minium | |
811 | * of 1 page and a maximum of 2GB. Hardware requires the address be | |
812 | * aligned to the size of the range being purged. The size of the range | |
813 | * must be a power of 2. The "Cool perf optimization" in the | |
814 | * allocation routine helps keep that true. | |
815 | */ | |
816 | static SBA_INLINE void | |
817 | sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |
818 | { | |
819 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | |
820 | ||
821 | int off = PDIR_INDEX(iovp); | |
822 | ||
823 | /* Must be non-zero and rounded up */ | |
824 | ASSERT(byte_cnt > 0); | |
825 | ASSERT(0 == (byte_cnt & ~iovp_mask)); | |
826 | ||
827 | #ifdef ASSERT_PDIR_SANITY | |
828 | /* Assert first pdir entry is set */ | |
829 | if (!(ioc->pdir_base[off] >> 60)) { | |
830 | sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp)); | |
831 | } | |
832 | #endif | |
833 | ||
834 | if (byte_cnt <= iovp_size) | |
835 | { | |
836 | ASSERT(off < ioc->pdir_size); | |
837 | ||
838 | iovp |= iovp_shift; /* set "size" field for PCOM */ | |
839 | ||
840 | #ifndef FULL_VALID_PDIR | |
841 | /* | |
842 | ** clear I/O PDIR entry "valid" bit | |
843 | ** Do NOT clear the rest - save it for debugging. | |
844 | ** We should only clear bits that have previously | |
845 | ** been enabled. | |
846 | */ | |
847 | ioc->pdir_base[off] &= ~(0x80000000000000FFULL); | |
848 | #else | |
849 | /* | |
850 | ** If we want to maintain the PDIR as valid, put in | |
851 | ** the spill page so devices prefetching won't | |
852 | ** cause a hard fail. | |
853 | */ | |
854 | ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); | |
855 | #endif | |
856 | } else { | |
857 | u32 t = get_iovp_order(byte_cnt) + iovp_shift; | |
858 | ||
859 | iovp |= t; | |
860 | ASSERT(t <= 31); /* 2GB! Max value of "size" field */ | |
861 | ||
862 | do { | |
863 | /* verify this pdir entry is enabled */ | |
864 | ASSERT(ioc->pdir_base[off] >> 63); | |
865 | #ifndef FULL_VALID_PDIR | |
866 | /* clear I/O Pdir entry "valid" bit first */ | |
867 | ioc->pdir_base[off] &= ~(0x80000000000000FFULL); | |
868 | #else | |
869 | ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page); | |
870 | #endif | |
871 | off++; | |
872 | byte_cnt -= iovp_size; | |
873 | } while (byte_cnt > 0); | |
874 | } | |
875 | ||
876 | WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM); | |
877 | } | |
878 | ||
879 | /** | |
880 | * sba_map_single - map one buffer and return IOVA for DMA | |
881 | * @dev: instance of PCI owned by the driver that's asking. | |
882 | * @addr: driver buffer to map. | |
883 | * @size: number of bytes to map in driver buffer. | |
884 | * @dir: R/W or both. | |
885 | * | |
886 | * See Documentation/DMA-mapping.txt | |
887 | */ | |
888 | dma_addr_t | |
889 | sba_map_single(struct device *dev, void *addr, size_t size, int dir) | |
890 | { | |
891 | struct ioc *ioc; | |
892 | dma_addr_t iovp; | |
893 | dma_addr_t offset; | |
894 | u64 *pdir_start; | |
895 | int pide; | |
896 | #ifdef ASSERT_PDIR_SANITY | |
897 | unsigned long flags; | |
898 | #endif | |
899 | #ifdef ALLOW_IOV_BYPASS | |
900 | unsigned long pci_addr = virt_to_phys(addr); | |
901 | #endif | |
902 | ||
903 | #ifdef ALLOW_IOV_BYPASS | |
904 | ASSERT(to_pci_dev(dev)->dma_mask); | |
905 | /* | |
906 | ** Check if the PCI device can DMA to ptr... if so, just return ptr | |
907 | */ | |
908 | if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) { | |
909 | /* | |
910 | ** Device is bit capable of DMA'ing to the buffer... | |
911 | ** just return the PCI address of ptr | |
912 | */ | |
913 | DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", | |
914 | to_pci_dev(dev)->dma_mask, pci_addr); | |
915 | return pci_addr; | |
916 | } | |
917 | #endif | |
918 | ioc = GET_IOC(dev); | |
919 | ASSERT(ioc); | |
920 | ||
921 | prefetch(ioc->res_hint); | |
922 | ||
923 | ASSERT(size > 0); | |
924 | ASSERT(size <= DMA_CHUNK_SIZE); | |
925 | ||
926 | /* save offset bits */ | |
927 | offset = ((dma_addr_t) (long) addr) & ~iovp_mask; | |
928 | ||
929 | /* round up to nearest iovp_size */ | |
930 | size = (size + offset + ~iovp_mask) & iovp_mask; | |
931 | ||
932 | #ifdef ASSERT_PDIR_SANITY | |
933 | spin_lock_irqsave(&ioc->res_lock, flags); | |
934 | if (sba_check_pdir(ioc,"Check before sba_map_single()")) | |
935 | panic("Sanity check failed"); | |
936 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
937 | #endif | |
938 | ||
939 | pide = sba_alloc_range(ioc, size); | |
940 | ||
941 | iovp = (dma_addr_t) pide << iovp_shift; | |
942 | ||
943 | DBG_RUN("%s() 0x%p -> 0x%lx\n", | |
944 | __FUNCTION__, addr, (long) iovp | offset); | |
945 | ||
946 | pdir_start = &(ioc->pdir_base[pide]); | |
947 | ||
948 | while (size > 0) { | |
949 | ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */ | |
950 | sba_io_pdir_entry(pdir_start, (unsigned long) addr); | |
951 | ||
952 | DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start); | |
953 | ||
954 | addr += iovp_size; | |
955 | size -= iovp_size; | |
956 | pdir_start++; | |
957 | } | |
958 | /* force pdir update */ | |
959 | wmb(); | |
960 | ||
961 | /* form complete address */ | |
962 | #ifdef ASSERT_PDIR_SANITY | |
963 | spin_lock_irqsave(&ioc->res_lock, flags); | |
964 | sba_check_pdir(ioc,"Check after sba_map_single()"); | |
965 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
966 | #endif | |
967 | return SBA_IOVA(ioc, iovp, offset); | |
968 | } | |
969 | ||
5f6602a1 AW |
970 | #ifdef ENABLE_MARK_CLEAN |
971 | static SBA_INLINE void | |
972 | sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) | |
973 | { | |
974 | u32 iovp = (u32) SBA_IOVP(ioc,iova); | |
975 | int off = PDIR_INDEX(iovp); | |
976 | void *addr; | |
977 | ||
978 | if (size <= iovp_size) { | |
979 | addr = phys_to_virt(ioc->pdir_base[off] & | |
980 | ~0xE000000000000FFFULL); | |
981 | mark_clean(addr, size); | |
982 | } else { | |
983 | do { | |
984 | addr = phys_to_virt(ioc->pdir_base[off] & | |
985 | ~0xE000000000000FFFULL); | |
986 | mark_clean(addr, min(size, iovp_size)); | |
987 | off++; | |
988 | size -= iovp_size; | |
989 | } while (size > 0); | |
990 | } | |
991 | } | |
992 | #endif | |
993 | ||
1da177e4 LT |
994 | /** |
995 | * sba_unmap_single - unmap one IOVA and free resources | |
996 | * @dev: instance of PCI owned by the driver that's asking. | |
997 | * @iova: IOVA of driver buffer previously mapped. | |
998 | * @size: number of bytes mapped in driver buffer. | |
999 | * @dir: R/W or both. | |
1000 | * | |
1001 | * See Documentation/DMA-mapping.txt | |
1002 | */ | |
1003 | void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | |
1004 | { | |
1005 | struct ioc *ioc; | |
1006 | #if DELAYED_RESOURCE_CNT > 0 | |
1007 | struct sba_dma_pair *d; | |
1008 | #endif | |
1009 | unsigned long flags; | |
1010 | dma_addr_t offset; | |
1011 | ||
1012 | ioc = GET_IOC(dev); | |
1013 | ASSERT(ioc); | |
1014 | ||
1015 | #ifdef ALLOW_IOV_BYPASS | |
1016 | if (likely((iova & ioc->imask) != ioc->ibase)) { | |
1017 | /* | |
1018 | ** Address does not fall w/in IOVA, must be bypassing | |
1019 | */ | |
1020 | DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); | |
1021 | ||
1022 | #ifdef ENABLE_MARK_CLEAN | |
1023 | if (dir == DMA_FROM_DEVICE) { | |
1024 | mark_clean(phys_to_virt(iova), size); | |
1025 | } | |
1026 | #endif | |
1027 | return; | |
1028 | } | |
1029 | #endif | |
1030 | offset = iova & ~iovp_mask; | |
1031 | ||
1032 | DBG_RUN("%s() iovp 0x%lx/%x\n", | |
1033 | __FUNCTION__, (long) iova, size); | |
1034 | ||
1035 | iova ^= offset; /* clear offset bits */ | |
1036 | size += offset; | |
1037 | size = ROUNDUP(size, iovp_size); | |
1038 | ||
5f6602a1 AW |
1039 | #ifdef ENABLE_MARK_CLEAN |
1040 | if (dir == DMA_FROM_DEVICE) | |
1041 | sba_mark_clean(ioc, iova, size); | |
1042 | #endif | |
1da177e4 LT |
1043 | |
1044 | #if DELAYED_RESOURCE_CNT > 0 | |
1045 | spin_lock_irqsave(&ioc->saved_lock, flags); | |
1046 | d = &(ioc->saved[ioc->saved_cnt]); | |
1047 | d->iova = iova; | |
1048 | d->size = size; | |
1049 | if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) { | |
1050 | int cnt = ioc->saved_cnt; | |
1051 | spin_lock(&ioc->res_lock); | |
1052 | while (cnt--) { | |
1053 | sba_mark_invalid(ioc, d->iova, d->size); | |
1054 | sba_free_range(ioc, d->iova, d->size); | |
1055 | d--; | |
1056 | } | |
1057 | ioc->saved_cnt = 0; | |
1058 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | |
1059 | spin_unlock(&ioc->res_lock); | |
1060 | } | |
1061 | spin_unlock_irqrestore(&ioc->saved_lock, flags); | |
1062 | #else /* DELAYED_RESOURCE_CNT == 0 */ | |
1063 | spin_lock_irqsave(&ioc->res_lock, flags); | |
1064 | sba_mark_invalid(ioc, iova, size); | |
1065 | sba_free_range(ioc, iova, size); | |
1066 | READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */ | |
1067 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
1068 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | |
1da177e4 LT |
1069 | } |
1070 | ||
1071 | ||
1072 | /** | |
1073 | * sba_alloc_coherent - allocate/map shared mem for DMA | |
1074 | * @dev: instance of PCI owned by the driver that's asking. | |
1075 | * @size: number of bytes mapped in driver buffer. | |
1076 | * @dma_handle: IOVA of new buffer. | |
1077 | * | |
1078 | * See Documentation/DMA-mapping.txt | |
1079 | */ | |
1080 | void * | |
06a54497 | 1081 | sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags) |
1da177e4 LT |
1082 | { |
1083 | struct ioc *ioc; | |
1084 | void *addr; | |
1085 | ||
1086 | ioc = GET_IOC(dev); | |
1087 | ASSERT(ioc); | |
1088 | ||
1089 | #ifdef CONFIG_NUMA | |
1090 | { | |
1091 | struct page *page; | |
1092 | page = alloc_pages_node(ioc->node == MAX_NUMNODES ? | |
1093 | numa_node_id() : ioc->node, flags, | |
1094 | get_order(size)); | |
1095 | ||
1096 | if (unlikely(!page)) | |
1097 | return NULL; | |
1098 | ||
1099 | addr = page_address(page); | |
1100 | } | |
1101 | #else | |
1102 | addr = (void *) __get_free_pages(flags, get_order(size)); | |
1103 | #endif | |
1104 | if (unlikely(!addr)) | |
1105 | return NULL; | |
1106 | ||
1107 | memset(addr, 0, size); | |
1108 | *dma_handle = virt_to_phys(addr); | |
1109 | ||
1110 | #ifdef ALLOW_IOV_BYPASS | |
1111 | ASSERT(dev->coherent_dma_mask); | |
1112 | /* | |
1113 | ** Check if the PCI device can DMA to ptr... if so, just return ptr | |
1114 | */ | |
1115 | if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) { | |
1116 | DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n", | |
1117 | dev->coherent_dma_mask, *dma_handle); | |
1118 | ||
1119 | return addr; | |
1120 | } | |
1121 | #endif | |
1122 | ||
1123 | /* | |
1124 | * If device can't bypass or bypass is disabled, pass the 32bit fake | |
1125 | * device to map single to get an iova mapping. | |
1126 | */ | |
1127 | *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0); | |
1128 | ||
1129 | return addr; | |
1130 | } | |
1131 | ||
1132 | ||
1133 | /** | |
1134 | * sba_free_coherent - free/unmap shared mem for DMA | |
1135 | * @dev: instance of PCI owned by the driver that's asking. | |
1136 | * @size: number of bytes mapped in driver buffer. | |
1137 | * @vaddr: virtual address IOVA of "consistent" buffer. | |
1138 | * @dma_handler: IO virtual address of "consistent" buffer. | |
1139 | * | |
1140 | * See Documentation/DMA-mapping.txt | |
1141 | */ | |
1142 | void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | |
1143 | { | |
1144 | sba_unmap_single(dev, dma_handle, size, 0); | |
1145 | free_pages((unsigned long) vaddr, get_order(size)); | |
1146 | } | |
1147 | ||
1148 | ||
1149 | /* | |
1150 | ** Since 0 is a valid pdir_base index value, can't use that | |
1151 | ** to determine if a value is valid or not. Use a flag to indicate | |
1152 | ** the SG list entry contains a valid pdir index. | |
1153 | */ | |
1154 | #define PIDE_FLAG 0x1UL | |
1155 | ||
1156 | #ifdef DEBUG_LARGE_SG_ENTRIES | |
1157 | int dump_run_sg = 0; | |
1158 | #endif | |
1159 | ||
1160 | ||
1161 | /** | |
1162 | * sba_fill_pdir - write allocated SG entries into IO PDIR | |
1163 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
1164 | * @startsg: list of IOVA/size pairs | |
1165 | * @nents: number of entries in startsg list | |
1166 | * | |
1167 | * Take preprocessed SG list and write corresponding entries | |
1168 | * in the IO PDIR. | |
1169 | */ | |
1170 | ||
1171 | static SBA_INLINE int | |
1172 | sba_fill_pdir( | |
1173 | struct ioc *ioc, | |
1174 | struct scatterlist *startsg, | |
1175 | int nents) | |
1176 | { | |
1177 | struct scatterlist *dma_sg = startsg; /* pointer to current DMA */ | |
1178 | int n_mappings = 0; | |
1179 | u64 *pdirp = NULL; | |
1180 | unsigned long dma_offset = 0; | |
1181 | ||
1182 | dma_sg--; | |
1183 | while (nents-- > 0) { | |
1184 | int cnt = startsg->dma_length; | |
1185 | startsg->dma_length = 0; | |
1186 | ||
1187 | #ifdef DEBUG_LARGE_SG_ENTRIES | |
1188 | if (dump_run_sg) | |
1189 | printk(" %2d : %08lx/%05x %p\n", | |
1190 | nents, startsg->dma_address, cnt, | |
1191 | sba_sg_address(startsg)); | |
1192 | #else | |
1193 | DBG_RUN_SG(" %d : %08lx/%05x %p\n", | |
1194 | nents, startsg->dma_address, cnt, | |
1195 | sba_sg_address(startsg)); | |
1196 | #endif | |
1197 | /* | |
1198 | ** Look for the start of a new DMA stream | |
1199 | */ | |
1200 | if (startsg->dma_address & PIDE_FLAG) { | |
1201 | u32 pide = startsg->dma_address & ~PIDE_FLAG; | |
1202 | dma_offset = (unsigned long) pide & ~iovp_mask; | |
1203 | startsg->dma_address = 0; | |
9b6eccfc | 1204 | dma_sg = sg_next(dma_sg); |
1da177e4 LT |
1205 | dma_sg->dma_address = pide | ioc->ibase; |
1206 | pdirp = &(ioc->pdir_base[pide >> iovp_shift]); | |
1207 | n_mappings++; | |
1208 | } | |
1209 | ||
1210 | /* | |
1211 | ** Look for a VCONTIG chunk | |
1212 | */ | |
1213 | if (cnt) { | |
1214 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | |
1215 | ASSERT(pdirp); | |
1216 | ||
1217 | /* Since multiple Vcontig blocks could make up | |
1218 | ** one DMA stream, *add* cnt to dma_len. | |
1219 | */ | |
1220 | dma_sg->dma_length += cnt; | |
1221 | cnt += dma_offset; | |
1222 | dma_offset=0; /* only want offset on first chunk */ | |
1223 | cnt = ROUNDUP(cnt, iovp_size); | |
1224 | do { | |
1225 | sba_io_pdir_entry(pdirp, vaddr); | |
1226 | vaddr += iovp_size; | |
1227 | cnt -= iovp_size; | |
1228 | pdirp++; | |
1229 | } while (cnt > 0); | |
1230 | } | |
9b6eccfc | 1231 | startsg = sg_next(startsg); |
1da177e4 LT |
1232 | } |
1233 | /* force pdir update */ | |
1234 | wmb(); | |
1235 | ||
1236 | #ifdef DEBUG_LARGE_SG_ENTRIES | |
1237 | dump_run_sg = 0; | |
1238 | #endif | |
1239 | return(n_mappings); | |
1240 | } | |
1241 | ||
1242 | ||
1243 | /* | |
1244 | ** Two address ranges are DMA contiguous *iff* "end of prev" and | |
1245 | ** "start of next" are both on an IOV page boundary. | |
1246 | ** | |
1247 | ** (shift left is a quick trick to mask off upper bits) | |
1248 | */ | |
1249 | #define DMA_CONTIG(__X, __Y) \ | |
1250 | (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL) | |
1251 | ||
1252 | ||
1253 | /** | |
1254 | * sba_coalesce_chunks - preprocess the SG list | |
1255 | * @ioc: IO MMU structure which owns the pdir we are interested in. | |
1256 | * @startsg: list of IOVA/size pairs | |
1257 | * @nents: number of entries in startsg list | |
1258 | * | |
1259 | * First pass is to walk the SG list and determine where the breaks are | |
1260 | * in the DMA stream. Allocates PDIR entries but does not fill them. | |
1261 | * Returns the number of DMA chunks. | |
1262 | * | |
1263 | * Doing the fill separate from the coalescing/allocation keeps the | |
1264 | * code simpler. Future enhancement could make one pass through | |
1265 | * the sglist do both. | |
1266 | */ | |
1267 | static SBA_INLINE int | |
1268 | sba_coalesce_chunks( struct ioc *ioc, | |
1269 | struct scatterlist *startsg, | |
1270 | int nents) | |
1271 | { | |
1272 | struct scatterlist *vcontig_sg; /* VCONTIG chunk head */ | |
1273 | unsigned long vcontig_len; /* len of VCONTIG chunk */ | |
1274 | unsigned long vcontig_end; | |
1275 | struct scatterlist *dma_sg; /* next DMA stream head */ | |
1276 | unsigned long dma_offset, dma_len; /* start/len of DMA stream */ | |
1277 | int n_mappings = 0; | |
1278 | ||
1279 | while (nents > 0) { | |
1280 | unsigned long vaddr = (unsigned long) sba_sg_address(startsg); | |
1281 | ||
1282 | /* | |
1283 | ** Prepare for first/next DMA stream | |
1284 | */ | |
1285 | dma_sg = vcontig_sg = startsg; | |
1286 | dma_len = vcontig_len = vcontig_end = startsg->length; | |
1287 | vcontig_end += vaddr; | |
1288 | dma_offset = vaddr & ~iovp_mask; | |
1289 | ||
1290 | /* PARANOID: clear entries */ | |
1291 | startsg->dma_address = startsg->dma_length = 0; | |
1292 | ||
1293 | /* | |
1294 | ** This loop terminates one iteration "early" since | |
1295 | ** it's always looking one "ahead". | |
1296 | */ | |
1297 | while (--nents > 0) { | |
1298 | unsigned long vaddr; /* tmp */ | |
1299 | ||
9b6eccfc | 1300 | startsg = sg_next(startsg); |
1da177e4 LT |
1301 | |
1302 | /* PARANOID */ | |
1303 | startsg->dma_address = startsg->dma_length = 0; | |
1304 | ||
1305 | /* catch brokenness in SCSI layer */ | |
1306 | ASSERT(startsg->length <= DMA_CHUNK_SIZE); | |
1307 | ||
1308 | /* | |
1309 | ** First make sure current dma stream won't | |
1310 | ** exceed DMA_CHUNK_SIZE if we coalesce the | |
1311 | ** next entry. | |
1312 | */ | |
1313 | if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask) | |
1314 | > DMA_CHUNK_SIZE) | |
1315 | break; | |
1316 | ||
1317 | /* | |
1318 | ** Then look for virtually contiguous blocks. | |
1319 | ** | |
1320 | ** append the next transaction? | |
1321 | */ | |
1322 | vaddr = (unsigned long) sba_sg_address(startsg); | |
1323 | if (vcontig_end == vaddr) | |
1324 | { | |
1325 | vcontig_len += startsg->length; | |
1326 | vcontig_end += startsg->length; | |
1327 | dma_len += startsg->length; | |
1328 | continue; | |
1329 | } | |
1330 | ||
1331 | #ifdef DEBUG_LARGE_SG_ENTRIES | |
1332 | dump_run_sg = (vcontig_len > iovp_size); | |
1333 | #endif | |
1334 | ||
1335 | /* | |
1336 | ** Not virtually contigous. | |
1337 | ** Terminate prev chunk. | |
1338 | ** Start a new chunk. | |
1339 | ** | |
1340 | ** Once we start a new VCONTIG chunk, dma_offset | |
1341 | ** can't change. And we need the offset from the first | |
1342 | ** chunk - not the last one. Ergo Successive chunks | |
1343 | ** must start on page boundaries and dove tail | |
1344 | ** with it's predecessor. | |
1345 | */ | |
1346 | vcontig_sg->dma_length = vcontig_len; | |
1347 | ||
1348 | vcontig_sg = startsg; | |
1349 | vcontig_len = startsg->length; | |
1350 | ||
1351 | /* | |
1352 | ** 3) do the entries end/start on page boundaries? | |
1353 | ** Don't update vcontig_end until we've checked. | |
1354 | */ | |
1355 | if (DMA_CONTIG(vcontig_end, vaddr)) | |
1356 | { | |
1357 | vcontig_end = vcontig_len + vaddr; | |
1358 | dma_len += vcontig_len; | |
1359 | continue; | |
1360 | } else { | |
1361 | break; | |
1362 | } | |
1363 | } | |
1364 | ||
1365 | /* | |
1366 | ** End of DMA Stream | |
1367 | ** Terminate last VCONTIG block. | |
1368 | ** Allocate space for DMA stream. | |
1369 | */ | |
1370 | vcontig_sg->dma_length = vcontig_len; | |
1371 | dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; | |
1372 | ASSERT(dma_len <= DMA_CHUNK_SIZE); | |
1373 | dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG | |
1374 | | (sba_alloc_range(ioc, dma_len) << iovp_shift) | |
1375 | | dma_offset); | |
1376 | n_mappings++; | |
1377 | } | |
1378 | ||
1379 | return n_mappings; | |
1380 | } | |
1381 | ||
1382 | ||
1383 | /** | |
1384 | * sba_map_sg - map Scatter/Gather list | |
1385 | * @dev: instance of PCI owned by the driver that's asking. | |
1386 | * @sglist: array of buffer/length pairs | |
1387 | * @nents: number of entries in list | |
1388 | * @dir: R/W or both. | |
1389 | * | |
1390 | * See Documentation/DMA-mapping.txt | |
1391 | */ | |
1392 | int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir) | |
1393 | { | |
1394 | struct ioc *ioc; | |
1395 | int coalesced, filled = 0; | |
1396 | #ifdef ASSERT_PDIR_SANITY | |
1397 | unsigned long flags; | |
1398 | #endif | |
1399 | #ifdef ALLOW_IOV_BYPASS_SG | |
1400 | struct scatterlist *sg; | |
1401 | #endif | |
1402 | ||
1403 | DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); | |
1404 | ioc = GET_IOC(dev); | |
1405 | ASSERT(ioc); | |
1406 | ||
1407 | #ifdef ALLOW_IOV_BYPASS_SG | |
1408 | ASSERT(to_pci_dev(dev)->dma_mask); | |
1409 | if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) { | |
9b6eccfc | 1410 | for_each_sg(sglist, sg, nents, filled) { |
1da177e4 LT |
1411 | sg->dma_length = sg->length; |
1412 | sg->dma_address = virt_to_phys(sba_sg_address(sg)); | |
1413 | } | |
1414 | return filled; | |
1415 | } | |
1416 | #endif | |
1417 | /* Fast path single entry scatterlists. */ | |
1418 | if (nents == 1) { | |
1419 | sglist->dma_length = sglist->length; | |
1420 | sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir); | |
1421 | return 1; | |
1422 | } | |
1423 | ||
1424 | #ifdef ASSERT_PDIR_SANITY | |
1425 | spin_lock_irqsave(&ioc->res_lock, flags); | |
1426 | if (sba_check_pdir(ioc,"Check before sba_map_sg()")) | |
1427 | { | |
1428 | sba_dump_sg(ioc, sglist, nents); | |
1429 | panic("Check before sba_map_sg()"); | |
1430 | } | |
1431 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
1432 | #endif | |
1433 | ||
1434 | prefetch(ioc->res_hint); | |
1435 | ||
1436 | /* | |
1437 | ** First coalesce the chunks and allocate I/O pdir space | |
1438 | ** | |
1439 | ** If this is one DMA stream, we can properly map using the | |
1440 | ** correct virtual address associated with each DMA page. | |
1441 | ** w/o this association, we wouldn't have coherent DMA! | |
1442 | ** Access to the virtual address is what forces a two pass algorithm. | |
1443 | */ | |
1444 | coalesced = sba_coalesce_chunks(ioc, sglist, nents); | |
1445 | ||
1446 | /* | |
1447 | ** Program the I/O Pdir | |
1448 | ** | |
1449 | ** map the virtual addresses to the I/O Pdir | |
1450 | ** o dma_address will contain the pdir index | |
1451 | ** o dma_len will contain the number of bytes to map | |
1452 | ** o address contains the virtual address. | |
1453 | */ | |
1454 | filled = sba_fill_pdir(ioc, sglist, nents); | |
1455 | ||
1456 | #ifdef ASSERT_PDIR_SANITY | |
1457 | spin_lock_irqsave(&ioc->res_lock, flags); | |
1458 | if (sba_check_pdir(ioc,"Check after sba_map_sg()")) | |
1459 | { | |
1460 | sba_dump_sg(ioc, sglist, nents); | |
1461 | panic("Check after sba_map_sg()\n"); | |
1462 | } | |
1463 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
1464 | #endif | |
1465 | ||
1466 | ASSERT(coalesced == filled); | |
1467 | DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); | |
1468 | ||
1469 | return filled; | |
1470 | } | |
1471 | ||
1472 | ||
1473 | /** | |
1474 | * sba_unmap_sg - unmap Scatter/Gather list | |
1475 | * @dev: instance of PCI owned by the driver that's asking. | |
1476 | * @sglist: array of buffer/length pairs | |
1477 | * @nents: number of entries in list | |
1478 | * @dir: R/W or both. | |
1479 | * | |
1480 | * See Documentation/DMA-mapping.txt | |
1481 | */ | |
1482 | void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | |
1483 | { | |
1484 | #ifdef ASSERT_PDIR_SANITY | |
1485 | struct ioc *ioc; | |
1486 | unsigned long flags; | |
1487 | #endif | |
1488 | ||
1489 | DBG_RUN_SG("%s() START %d entries, %p,%x\n", | |
1490 | __FUNCTION__, nents, sba_sg_address(sglist), sglist->length); | |
1491 | ||
1492 | #ifdef ASSERT_PDIR_SANITY | |
1493 | ioc = GET_IOC(dev); | |
1494 | ASSERT(ioc); | |
1495 | ||
1496 | spin_lock_irqsave(&ioc->res_lock, flags); | |
1497 | sba_check_pdir(ioc,"Check before sba_unmap_sg()"); | |
1498 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
1499 | #endif | |
1500 | ||
1501 | while (nents && sglist->dma_length) { | |
1502 | ||
1503 | sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); | |
9b6eccfc | 1504 | sglist = sg_next(sglist); |
1da177e4 LT |
1505 | nents--; |
1506 | } | |
1507 | ||
1508 | DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); | |
1509 | ||
1510 | #ifdef ASSERT_PDIR_SANITY | |
1511 | spin_lock_irqsave(&ioc->res_lock, flags); | |
1512 | sba_check_pdir(ioc,"Check after sba_unmap_sg()"); | |
1513 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
1514 | #endif | |
1515 | ||
1516 | } | |
1517 | ||
1518 | /************************************************************** | |
1519 | * | |
1520 | * Initialization and claim | |
1521 | * | |
1522 | ***************************************************************/ | |
1523 | ||
1524 | static void __init | |
1525 | ioc_iova_init(struct ioc *ioc) | |
1526 | { | |
1527 | int tcnfg; | |
1528 | int agp_found = 0; | |
1529 | struct pci_dev *device = NULL; | |
1530 | #ifdef FULL_VALID_PDIR | |
1531 | unsigned long index; | |
1532 | #endif | |
1533 | ||
1534 | /* | |
1535 | ** Firmware programs the base and size of a "safe IOVA space" | |
1536 | ** (one that doesn't overlap memory or LMMIO space) in the | |
1537 | ** IBASE and IMASK registers. | |
1538 | */ | |
1539 | ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL; | |
1540 | ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL; | |
1541 | ||
1542 | ioc->iov_size = ~ioc->imask + 1; | |
1543 | ||
1544 | DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n", | |
1545 | __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask, | |
1546 | ioc->iov_size >> 20); | |
1547 | ||
1548 | switch (iovp_size) { | |
1549 | case 4*1024: tcnfg = 0; break; | |
1550 | case 8*1024: tcnfg = 1; break; | |
1551 | case 16*1024: tcnfg = 2; break; | |
1552 | case 64*1024: tcnfg = 3; break; | |
1553 | default: | |
1554 | panic(PFX "Unsupported IOTLB page size %ldK", | |
1555 | iovp_size >> 10); | |
1556 | break; | |
1557 | } | |
1558 | WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG); | |
1559 | ||
1560 | ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE; | |
1561 | ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL, | |
1562 | get_order(ioc->pdir_size)); | |
1563 | if (!ioc->pdir_base) | |
1564 | panic(PFX "Couldn't allocate I/O Page Table\n"); | |
1565 | ||
1566 | memset(ioc->pdir_base, 0, ioc->pdir_size); | |
1567 | ||
1568 | DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__, | |
1569 | iovp_size >> 10, ioc->pdir_base, ioc->pdir_size); | |
1570 | ||
1571 | ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base); | |
1572 | WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE); | |
1573 | ||
1574 | /* | |
1575 | ** If an AGP device is present, only use half of the IOV space | |
1576 | ** for PCI DMA. Unfortunately we can't know ahead of time | |
1577 | ** whether GART support will actually be used, for now we | |
1578 | ** can just key on an AGP device found in the system. | |
1579 | ** We program the next pdir index after we stop w/ a key for | |
1580 | ** the GART code to handshake on. | |
1581 | */ | |
1582 | for_each_pci_dev(device) | |
1583 | agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP); | |
1584 | ||
1585 | if (agp_found && reserve_sba_gart) { | |
1586 | printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n", | |
1587 | ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2); | |
1588 | ioc->pdir_size /= 2; | |
1589 | ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE; | |
1590 | } | |
1591 | #ifdef FULL_VALID_PDIR | |
1592 | /* | |
1593 | ** Check to see if the spill page has been allocated, we don't need more than | |
1594 | ** one across multiple SBAs. | |
1595 | */ | |
1596 | if (!prefetch_spill_page) { | |
1597 | char *spill_poison = "SBAIOMMU POISON"; | |
1598 | int poison_size = 16; | |
1599 | void *poison_addr, *addr; | |
1600 | ||
1601 | addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size)); | |
1602 | if (!addr) | |
1603 | panic(PFX "Couldn't allocate PDIR spill page\n"); | |
1604 | ||
1605 | poison_addr = addr; | |
1606 | for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size) | |
1607 | memcpy(poison_addr, spill_poison, poison_size); | |
1608 | ||
1609 | prefetch_spill_page = virt_to_phys(addr); | |
1610 | ||
1611 | DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page); | |
1612 | } | |
1613 | /* | |
1614 | ** Set all the PDIR entries valid w/ the spill page as the target | |
1615 | */ | |
1616 | for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++) | |
1617 | ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page); | |
1618 | #endif | |
1619 | ||
1620 | /* Clear I/O TLB of any possible entries */ | |
1621 | WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM); | |
1622 | READ_REG(ioc->ioc_hpa + IOC_PCOM); | |
1623 | ||
1624 | /* Enable IOVA translation */ | |
1625 | WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE); | |
1626 | READ_REG(ioc->ioc_hpa + IOC_IBASE); | |
1627 | } | |
1628 | ||
1629 | static void __init | |
1630 | ioc_resource_init(struct ioc *ioc) | |
1631 | { | |
1632 | spin_lock_init(&ioc->res_lock); | |
1633 | #if DELAYED_RESOURCE_CNT > 0 | |
1634 | spin_lock_init(&ioc->saved_lock); | |
1635 | #endif | |
1636 | ||
1637 | /* resource map size dictated by pdir_size */ | |
1638 | ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */ | |
1639 | ioc->res_size >>= 3; /* convert bit count to byte count */ | |
1640 | DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); | |
1641 | ||
1642 | ioc->res_map = (char *) __get_free_pages(GFP_KERNEL, | |
1643 | get_order(ioc->res_size)); | |
1644 | if (!ioc->res_map) | |
1645 | panic(PFX "Couldn't allocate resource map\n"); | |
1646 | ||
1647 | memset(ioc->res_map, 0, ioc->res_size); | |
1648 | /* next available IOVP - circular search */ | |
1649 | ioc->res_hint = (unsigned long *) ioc->res_map; | |
1650 | ||
1651 | #ifdef ASSERT_PDIR_SANITY | |
1652 | /* Mark first bit busy - ie no IOVA 0 */ | |
1653 | ioc->res_map[0] = 0x1; | |
1654 | ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE; | |
1655 | #endif | |
1656 | #ifdef FULL_VALID_PDIR | |
1657 | /* Mark the last resource used so we don't prefetch beyond IOVA space */ | |
1658 | ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */ | |
1659 | ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF | |
1660 | | prefetch_spill_page); | |
1661 | #endif | |
1662 | ||
1663 | DBG_INIT("%s() res_map %x %p\n", __FUNCTION__, | |
1664 | ioc->res_size, (void *) ioc->res_map); | |
1665 | } | |
1666 | ||
1667 | static void __init | |
1668 | ioc_sac_init(struct ioc *ioc) | |
1669 | { | |
1670 | struct pci_dev *sac = NULL; | |
1671 | struct pci_controller *controller = NULL; | |
1672 | ||
1673 | /* | |
1674 | * pci_alloc_coherent() must return a DMA address which is | |
1675 | * SAC (single address cycle) addressable, so allocate a | |
1676 | * pseudo-device to enforce that. | |
1677 | */ | |
52fd9108 | 1678 | sac = kzalloc(sizeof(*sac), GFP_KERNEL); |
1da177e4 LT |
1679 | if (!sac) |
1680 | panic(PFX "Couldn't allocate struct pci_dev"); | |
1da177e4 | 1681 | |
52fd9108 | 1682 | controller = kzalloc(sizeof(*controller), GFP_KERNEL); |
1da177e4 LT |
1683 | if (!controller) |
1684 | panic(PFX "Couldn't allocate struct pci_controller"); | |
1da177e4 LT |
1685 | |
1686 | controller->iommu = ioc; | |
1687 | sac->sysdata = controller; | |
1688 | sac->dma_mask = 0xFFFFFFFFUL; | |
1689 | #ifdef CONFIG_PCI | |
1690 | sac->dev.bus = &pci_bus_type; | |
1691 | #endif | |
1692 | ioc->sac_only_dev = sac; | |
1693 | } | |
1694 | ||
1695 | static void __init | |
1696 | ioc_zx1_init(struct ioc *ioc) | |
1697 | { | |
1698 | unsigned long rope_config; | |
1699 | unsigned int i; | |
1700 | ||
1701 | if (ioc->rev < 0x20) | |
1702 | panic(PFX "IOC 2.0 or later required for IOMMU support\n"); | |
1703 | ||
1704 | /* 38 bit memory controller + extra bit for range displaced by MMIO */ | |
1705 | ioc->dma_mask = (0x1UL << 39) - 1; | |
1706 | ||
1707 | /* | |
1708 | ** Clear ROPE(N)_CONFIG AO bit. | |
1709 | ** Disables "NT Ordering" (~= !"Relaxed Ordering") | |
1710 | ** Overrides bit 1 in DMA Hint Sets. | |
1711 | ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701. | |
1712 | */ | |
1713 | for (i=0; i<(8*8); i+=8) { | |
1714 | rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i); | |
1715 | rope_config &= ~IOC_ROPE_AO; | |
1716 | WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i); | |
1717 | } | |
1718 | } | |
1719 | ||
1720 | typedef void (initfunc)(struct ioc *); | |
1721 | ||
1722 | struct ioc_iommu { | |
1723 | u32 func_id; | |
1724 | char *name; | |
1725 | initfunc *init; | |
1726 | }; | |
1727 | ||
1728 | static struct ioc_iommu ioc_iommu_info[] __initdata = { | |
1729 | { ZX1_IOC_ID, "zx1", ioc_zx1_init }, | |
1730 | { ZX2_IOC_ID, "zx2", NULL }, | |
1731 | { SX1000_IOC_ID, "sx1000", NULL }, | |
e15da401 | 1732 | { SX2000_IOC_ID, "sx2000", NULL }, |
1da177e4 LT |
1733 | }; |
1734 | ||
1735 | static struct ioc * __init | |
1736 | ioc_init(u64 hpa, void *handle) | |
1737 | { | |
1738 | struct ioc *ioc; | |
1739 | struct ioc_iommu *info; | |
1740 | ||
52fd9108 | 1741 | ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); |
1da177e4 LT |
1742 | if (!ioc) |
1743 | return NULL; | |
1744 | ||
1da177e4 LT |
1745 | ioc->next = ioc_list; |
1746 | ioc_list = ioc; | |
1747 | ||
1748 | ioc->handle = handle; | |
1749 | ioc->ioc_hpa = ioremap(hpa, 0x1000); | |
1750 | ||
1751 | ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID); | |
1752 | ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL; | |
1753 | ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */ | |
1754 | ||
1755 | for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) { | |
1756 | if (ioc->func_id == info->func_id) { | |
1757 | ioc->name = info->name; | |
1758 | if (info->init) | |
1759 | (info->init)(ioc); | |
1760 | } | |
1761 | } | |
1762 | ||
1763 | iovp_size = (1 << iovp_shift); | |
1764 | iovp_mask = ~(iovp_size - 1); | |
1765 | ||
1766 | DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__, | |
1767 | PAGE_SIZE >> 10, iovp_size >> 10); | |
1768 | ||
1769 | if (!ioc->name) { | |
1770 | ioc->name = kmalloc(24, GFP_KERNEL); | |
1771 | if (ioc->name) | |
1772 | sprintf((char *) ioc->name, "Unknown (%04x:%04x)", | |
1773 | ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF); | |
1774 | else | |
1775 | ioc->name = "Unknown"; | |
1776 | } | |
1777 | ||
1778 | ioc_iova_init(ioc); | |
1779 | ioc_resource_init(ioc); | |
1780 | ioc_sac_init(ioc); | |
1781 | ||
1782 | if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask) | |
1783 | ia64_max_iommu_merge_mask = ~iovp_mask; | |
1784 | ||
1785 | printk(KERN_INFO PFX | |
1786 | "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n", | |
1787 | ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF, | |
1788 | hpa, ioc->iov_size >> 20, ioc->ibase); | |
1789 | ||
1790 | return ioc; | |
1791 | } | |
1792 | ||
1793 | ||
1794 | ||
1795 | /************************************************************************** | |
1796 | ** | |
1797 | ** SBA initialization code (HW and SW) | |
1798 | ** | |
1799 | ** o identify SBA chip itself | |
1800 | ** o FIXME: initialize DMA hints for reasonable defaults | |
1801 | ** | |
1802 | **************************************************************************/ | |
1803 | ||
1804 | #ifdef CONFIG_PROC_FS | |
1805 | static void * | |
1806 | ioc_start(struct seq_file *s, loff_t *pos) | |
1807 | { | |
1808 | struct ioc *ioc; | |
1809 | loff_t n = *pos; | |
1810 | ||
1811 | for (ioc = ioc_list; ioc; ioc = ioc->next) | |
1812 | if (!n--) | |
1813 | return ioc; | |
1814 | ||
1815 | return NULL; | |
1816 | } | |
1817 | ||
1818 | static void * | |
1819 | ioc_next(struct seq_file *s, void *v, loff_t *pos) | |
1820 | { | |
1821 | struct ioc *ioc = v; | |
1822 | ||
1823 | ++*pos; | |
1824 | return ioc->next; | |
1825 | } | |
1826 | ||
1827 | static void | |
1828 | ioc_stop(struct seq_file *s, void *v) | |
1829 | { | |
1830 | } | |
1831 | ||
1832 | static int | |
1833 | ioc_show(struct seq_file *s, void *v) | |
1834 | { | |
1835 | struct ioc *ioc = v; | |
1836 | unsigned long *res_ptr = (unsigned long *)ioc->res_map; | |
1837 | int i, used = 0; | |
1838 | ||
1839 | seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n", | |
1840 | ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF)); | |
1841 | #ifdef CONFIG_NUMA | |
1842 | if (ioc->node != MAX_NUMNODES) | |
1843 | seq_printf(s, "NUMA node : %d\n", ioc->node); | |
1844 | #endif | |
1845 | seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024)); | |
1846 | seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024); | |
1847 | ||
1848 | for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr) | |
1849 | used += hweight64(*res_ptr); | |
1850 | ||
1851 | seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3); | |
1852 | seq_printf(s, "PDIR used : %d entries\n", used); | |
1853 | ||
1854 | #ifdef PDIR_SEARCH_TIMING | |
1855 | { | |
1856 | unsigned long i = 0, avg = 0, min, max; | |
1857 | min = max = ioc->avg_search[0]; | |
1858 | for (i = 0; i < SBA_SEARCH_SAMPLE; i++) { | |
1859 | avg += ioc->avg_search[i]; | |
1860 | if (ioc->avg_search[i] > max) max = ioc->avg_search[i]; | |
1861 | if (ioc->avg_search[i] < min) min = ioc->avg_search[i]; | |
1862 | } | |
1863 | avg /= SBA_SEARCH_SAMPLE; | |
1864 | seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n", | |
1865 | min, avg, max); | |
1866 | } | |
1867 | #endif | |
1868 | #ifndef ALLOW_IOV_BYPASS | |
1869 | seq_printf(s, "IOVA bypass disabled\n"); | |
1870 | #endif | |
1871 | return 0; | |
1872 | } | |
1873 | ||
1874 | static struct seq_operations ioc_seq_ops = { | |
1875 | .start = ioc_start, | |
1876 | .next = ioc_next, | |
1877 | .stop = ioc_stop, | |
1878 | .show = ioc_show | |
1879 | }; | |
1880 | ||
1881 | static int | |
1882 | ioc_open(struct inode *inode, struct file *file) | |
1883 | { | |
1884 | return seq_open(file, &ioc_seq_ops); | |
1885 | } | |
1886 | ||
5dfe4c96 | 1887 | static const struct file_operations ioc_fops = { |
1da177e4 LT |
1888 | .open = ioc_open, |
1889 | .read = seq_read, | |
1890 | .llseek = seq_lseek, | |
1891 | .release = seq_release | |
1892 | }; | |
1893 | ||
1894 | static void __init | |
1895 | ioc_proc_init(void) | |
1896 | { | |
1897 | struct proc_dir_entry *dir, *entry; | |
1898 | ||
1899 | dir = proc_mkdir("bus/mckinley", NULL); | |
1900 | if (!dir) | |
1901 | return; | |
1902 | ||
1903 | entry = create_proc_entry(ioc_list->name, 0, dir); | |
1904 | if (entry) | |
1905 | entry->proc_fops = &ioc_fops; | |
1906 | } | |
1907 | #endif | |
1908 | ||
1909 | static void | |
1910 | sba_connect_bus(struct pci_bus *bus) | |
1911 | { | |
1912 | acpi_handle handle, parent; | |
1913 | acpi_status status; | |
1914 | struct ioc *ioc; | |
1915 | ||
1916 | if (!PCI_CONTROLLER(bus)) | |
1917 | panic(PFX "no sysdata on bus %d!\n", bus->number); | |
1918 | ||
1919 | if (PCI_CONTROLLER(bus)->iommu) | |
1920 | return; | |
1921 | ||
1922 | handle = PCI_CONTROLLER(bus)->acpi_handle; | |
1923 | if (!handle) | |
1924 | return; | |
1925 | ||
1926 | /* | |
1927 | * The IOC scope encloses PCI root bridges in the ACPI | |
1928 | * namespace, so work our way out until we find an IOC we | |
1929 | * claimed previously. | |
1930 | */ | |
1931 | do { | |
1932 | for (ioc = ioc_list; ioc; ioc = ioc->next) | |
1933 | if (ioc->handle == handle) { | |
1934 | PCI_CONTROLLER(bus)->iommu = ioc; | |
1935 | return; | |
1936 | } | |
1937 | ||
1938 | status = acpi_get_parent(handle, &parent); | |
1939 | handle = parent; | |
1940 | } while (ACPI_SUCCESS(status)); | |
1941 | ||
1942 | printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number); | |
1943 | } | |
1944 | ||
1945 | #ifdef CONFIG_NUMA | |
1946 | static void __init | |
1947 | sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle) | |
1948 | { | |
1da177e4 | 1949 | unsigned int node; |
bb0fc085 | 1950 | int pxm; |
1da177e4 LT |
1951 | |
1952 | ioc->node = MAX_NUMNODES; | |
1953 | ||
bb0fc085 | 1954 | pxm = acpi_get_pxm(handle); |
1da177e4 | 1955 | |
bb0fc085 | 1956 | if (pxm < 0) |
1da177e4 LT |
1957 | return; |
1958 | ||
762834e8 | 1959 | node = pxm_to_node(pxm); |
1da177e4 LT |
1960 | |
1961 | if (node >= MAX_NUMNODES || !node_online(node)) | |
1962 | return; | |
1963 | ||
1964 | ioc->node = node; | |
1965 | return; | |
1966 | } | |
1967 | #else | |
1968 | #define sba_map_ioc_to_node(ioc, handle) | |
1969 | #endif | |
1970 | ||
1971 | static int __init | |
1972 | acpi_sba_ioc_add(struct acpi_device *device) | |
1973 | { | |
1974 | struct ioc *ioc; | |
1975 | acpi_status status; | |
1976 | u64 hpa, length; | |
1977 | struct acpi_buffer buffer; | |
1978 | struct acpi_device_info *dev_info; | |
1979 | ||
1980 | status = hp_acpi_csr_space(device->handle, &hpa, &length); | |
1981 | if (ACPI_FAILURE(status)) | |
1982 | return 1; | |
1983 | ||
1984 | buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; | |
1985 | status = acpi_get_object_info(device->handle, &buffer); | |
1986 | if (ACPI_FAILURE(status)) | |
1987 | return 1; | |
1988 | dev_info = buffer.pointer; | |
1989 | ||
1990 | /* | |
1991 | * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI | |
1992 | * root bridges, and its CSR space includes the IOC function. | |
1993 | */ | |
1994 | if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) { | |
1995 | hpa += ZX1_IOC_OFFSET; | |
1996 | /* zx1 based systems default to kernel page size iommu pages */ | |
1997 | if (!iovp_shift) | |
1998 | iovp_shift = min(PAGE_SHIFT, 16); | |
1999 | } | |
144c87b4 | 2000 | kfree(dev_info); |
1da177e4 LT |
2001 | |
2002 | /* | |
2003 | * default anything not caught above or specified on cmdline to 4k | |
2004 | * iommu page size | |
2005 | */ | |
2006 | if (!iovp_shift) | |
2007 | iovp_shift = 12; | |
2008 | ||
2009 | ioc = ioc_init(hpa, device->handle); | |
2010 | if (!ioc) | |
2011 | return 1; | |
2012 | ||
2013 | /* setup NUMA node association */ | |
2014 | sba_map_ioc_to_node(ioc, device->handle); | |
2015 | return 0; | |
2016 | } | |
2017 | ||
7091138f TR |
2018 | static const struct acpi_device_id hp_ioc_iommu_device_ids[] = { |
2019 | {"HWP0001", 0}, | |
2020 | {"HWP0004", 0}, | |
2021 | {"", 0}, | |
2022 | }; | |
1da177e4 LT |
2023 | static struct acpi_driver acpi_sba_ioc_driver = { |
2024 | .name = "IOC IOMMU Driver", | |
7091138f | 2025 | .ids = hp_ioc_iommu_device_ids, |
1da177e4 LT |
2026 | .ops = { |
2027 | .add = acpi_sba_ioc_add, | |
2028 | }, | |
2029 | }; | |
2030 | ||
2031 | static int __init | |
2032 | sba_init(void) | |
2033 | { | |
0b9afede AW |
2034 | if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb")) |
2035 | return 0; | |
2036 | ||
51b58e3e TL |
2037 | #if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) |
2038 | /* If we are booting a kdump kernel, the sba_iommu will | |
2039 | * cause devices that were not shutdown properly to MCA | |
2040 | * as soon as they are turned back on. Our only option for | |
2041 | * a successful kdump kernel boot is to use the swiotlb. | |
2042 | */ | |
2043 | if (elfcorehdr_addr < ELFCORE_ADDR_MAX) { | |
2044 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | |
2045 | panic("Unable to initialize software I/O TLB:" | |
2046 | " Try machvec=dig boot option"); | |
2047 | machvec_init("dig"); | |
2048 | return 0; | |
2049 | } | |
2050 | #endif | |
2051 | ||
1da177e4 | 2052 | acpi_bus_register_driver(&acpi_sba_ioc_driver); |
0b9afede AW |
2053 | if (!ioc_list) { |
2054 | #ifdef CONFIG_IA64_GENERIC | |
0b9afede AW |
2055 | /* |
2056 | * If we didn't find something sba_iommu can claim, we | |
2057 | * need to setup the swiotlb and switch to the dig machvec. | |
2058 | */ | |
2059 | if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) | |
2060 | panic("Unable to find SBA IOMMU or initialize " | |
2061 | "software I/O TLB: Try machvec=dig boot option"); | |
2062 | machvec_init("dig"); | |
2063 | #else | |
2064 | panic("Unable to find SBA IOMMU: Try a generic or DIG kernel"); | |
2065 | #endif | |
1da177e4 | 2066 | return 0; |
0b9afede AW |
2067 | } |
2068 | ||
2069 | #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB) | |
2070 | /* | |
2071 | * hpzx1_swiotlb needs to have a fairly small swiotlb bounce | |
2072 | * buffer setup to support devices with smaller DMA masks than | |
2073 | * sba_iommu can handle. | |
2074 | */ | |
2075 | if (ia64_platform_is("hpzx1_swiotlb")) { | |
2076 | extern void hwsw_init(void); | |
2077 | ||
2078 | hwsw_init(); | |
2079 | } | |
2080 | #endif | |
1da177e4 LT |
2081 | |
2082 | #ifdef CONFIG_PCI | |
2083 | { | |
2084 | struct pci_bus *b = NULL; | |
2085 | while ((b = pci_find_next_bus(b)) != NULL) | |
2086 | sba_connect_bus(b); | |
2087 | } | |
2088 | #endif | |
2089 | ||
2090 | #ifdef CONFIG_PROC_FS | |
2091 | ioc_proc_init(); | |
2092 | #endif | |
2093 | return 0; | |
2094 | } | |
2095 | ||
2096 | subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */ | |
2097 | ||
1da177e4 LT |
2098 | static int __init |
2099 | nosbagart(char *str) | |
2100 | { | |
2101 | reserve_sba_gart = 0; | |
2102 | return 1; | |
2103 | } | |
2104 | ||
2105 | int | |
2106 | sba_dma_supported (struct device *dev, u64 mask) | |
2107 | { | |
2108 | /* make sure it's at least 32bit capable */ | |
2109 | return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL); | |
2110 | } | |
2111 | ||
2112 | int | |
2113 | sba_dma_mapping_error (dma_addr_t dma_addr) | |
2114 | { | |
2115 | return 0; | |
2116 | } | |
2117 | ||
2118 | __setup("nosbagart", nosbagart); | |
2119 | ||
2120 | static int __init | |
2121 | sba_page_override(char *str) | |
2122 | { | |
2123 | unsigned long page_size; | |
2124 | ||
2125 | page_size = memparse(str, &str); | |
2126 | switch (page_size) { | |
2127 | case 4096: | |
2128 | case 8192: | |
2129 | case 16384: | |
2130 | case 65536: | |
2131 | iovp_shift = ffs(page_size) - 1; | |
2132 | break; | |
2133 | default: | |
2134 | printk("%s: unknown/unsupported iommu page size %ld\n", | |
2135 | __FUNCTION__, page_size); | |
2136 | } | |
2137 | ||
2138 | return 1; | |
2139 | } | |
2140 | ||
2141 | __setup("sbapagesize=",sba_page_override); | |
2142 | ||
2143 | EXPORT_SYMBOL(sba_dma_mapping_error); | |
2144 | EXPORT_SYMBOL(sba_map_single); | |
2145 | EXPORT_SYMBOL(sba_unmap_single); | |
2146 | EXPORT_SYMBOL(sba_map_sg); | |
2147 | EXPORT_SYMBOL(sba_unmap_sg); | |
2148 | EXPORT_SYMBOL(sba_dma_supported); | |
2149 | EXPORT_SYMBOL(sba_alloc_coherent); | |
2150 | EXPORT_SYMBOL(sba_free_coherent); |