]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | ** ccio-dma.c: | |
3 | ** DMA management routines for first generation cache-coherent machines. | |
4 | ** Program U2/Uturn in "Virtual Mode" and use the I/O MMU. | |
5 | ** | |
6 | ** (c) Copyright 2000 Grant Grundler | |
7 | ** (c) Copyright 2000 Ryan Bradetich | |
8 | ** (c) Copyright 2000 Hewlett-Packard Company | |
9 | ** | |
10 | ** This program is free software; you can redistribute it and/or modify | |
11 | ** it under the terms of the GNU General Public License as published by | |
12 | ** the Free Software Foundation; either version 2 of the License, or | |
13 | ** (at your option) any later version. | |
14 | ** | |
15 | ** | |
16 | ** "Real Mode" operation refers to U2/Uturn chip operation. | |
17 | ** U2/Uturn were designed to perform coherency checks w/o using | |
18 | ** the I/O MMU - basically what x86 does. | |
19 | ** | |
20 | ** Philipp Rumpf has a "Real Mode" driver for PCX-W machines at: | |
21 | ** CVSROOT=:pserver:[email protected]:/cvsroot/linux-parisc | |
22 | ** cvs -z3 co linux/arch/parisc/kernel/dma-rm.c | |
23 | ** | |
24 | ** I've rewritten his code to work under TPG's tree. See ccio-rm-dma.c. | |
25 | ** | |
26 | ** Drawbacks of using Real Mode are: | |
27 | ** o outbound DMA is slower - U2 won't prefetch data (GSC+ XQL signal). | |
28 | ** o Inbound DMA less efficient - U2 can't use DMA_FAST attribute. | |
29 | ** o Ability to do scatter/gather in HW is lost. | |
30 | ** o Doesn't work under PCX-U/U+ machines since they didn't follow | |
31 | ** the coherency design originally worked out. Only PCX-W does. | |
32 | */ | |
33 | ||
1da177e4 LT |
34 | #include <linux/types.h> |
35 | #include <linux/init.h> | |
36 | #include <linux/mm.h> | |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/slab.h> | |
39 | #include <linux/string.h> | |
40 | #include <linux/pci.h> | |
41 | #include <linux/reboot.h> | |
f823bcae KM |
42 | #include <linux/proc_fs.h> |
43 | #include <linux/seq_file.h> | |
1da177e4 LT |
44 | |
45 | #include <asm/byteorder.h> | |
46 | #include <asm/cache.h> /* for L1_CACHE_BYTES */ | |
47 | #include <asm/uaccess.h> | |
48 | #include <asm/page.h> | |
49 | #include <asm/dma.h> | |
50 | #include <asm/io.h> | |
51 | #include <asm/hardware.h> /* for register_module() */ | |
52 | #include <asm/parisc-device.h> | |
53 | ||
54 | /* | |
55 | ** Choose "ccio" since that's what HP-UX calls it. | |
56 | ** Make it easier for folks to migrate from one to the other :^) | |
57 | */ | |
58 | #define MODULE_NAME "ccio" | |
59 | ||
60 | #undef DEBUG_CCIO_RES | |
61 | #undef DEBUG_CCIO_RUN | |
62 | #undef DEBUG_CCIO_INIT | |
63 | #undef DEBUG_CCIO_RUN_SG | |
64 | ||
65 | #ifdef CONFIG_PROC_FS | |
66 | /* | |
67 | * CCIO_SEARCH_TIME can help measure how fast the bitmap search is. | |
68 | * impacts performance though - ditch it if you don't use it. | |
69 | */ | |
70 | #define CCIO_SEARCH_TIME | |
71 | #undef CCIO_MAP_STATS | |
72 | #else | |
73 | #undef CCIO_SEARCH_TIME | |
74 | #undef CCIO_MAP_STATS | |
75 | #endif | |
76 | ||
77 | #include <linux/proc_fs.h> | |
78 | #include <asm/runway.h> /* for proc_runway_root */ | |
79 | ||
80 | #ifdef DEBUG_CCIO_INIT | |
81 | #define DBG_INIT(x...) printk(x) | |
82 | #else | |
83 | #define DBG_INIT(x...) | |
84 | #endif | |
85 | ||
86 | #ifdef DEBUG_CCIO_RUN | |
87 | #define DBG_RUN(x...) printk(x) | |
88 | #else | |
89 | #define DBG_RUN(x...) | |
90 | #endif | |
91 | ||
92 | #ifdef DEBUG_CCIO_RES | |
93 | #define DBG_RES(x...) printk(x) | |
94 | #else | |
95 | #define DBG_RES(x...) | |
96 | #endif | |
97 | ||
98 | #ifdef DEBUG_CCIO_RUN_SG | |
99 | #define DBG_RUN_SG(x...) printk(x) | |
100 | #else | |
101 | #define DBG_RUN_SG(x...) | |
102 | #endif | |
103 | ||
86a61ee9 GG |
104 | #define CCIO_INLINE inline |
105 | #define WRITE_U32(value, addr) __raw_writel(value, addr) | |
106 | #define READ_U32(addr) __raw_readl(addr) | |
1da177e4 LT |
107 | |
108 | #define U2_IOA_RUNWAY 0x580 | |
109 | #define U2_BC_GSC 0x501 | |
110 | #define UTURN_IOA_RUNWAY 0x581 | |
111 | #define UTURN_BC_GSC 0x502 | |
112 | ||
113 | #define IOA_NORMAL_MODE 0x00020080 /* IO_CONTROL to turn on CCIO */ | |
114 | #define CMD_TLB_DIRECT_WRITE 35 /* IO_COMMAND for I/O TLB Writes */ | |
115 | #define CMD_TLB_PURGE 33 /* IO_COMMAND to Purge I/O TLB entry */ | |
116 | ||
117 | struct ioa_registers { | |
118 | /* Runway Supervisory Set */ | |
86a61ee9 GG |
119 | int32_t unused1[12]; |
120 | uint32_t io_command; /* Offset 12 */ | |
121 | uint32_t io_status; /* Offset 13 */ | |
122 | uint32_t io_control; /* Offset 14 */ | |
123 | int32_t unused2[1]; | |
1da177e4 LT |
124 | |
125 | /* Runway Auxiliary Register Set */ | |
86a61ee9 GG |
126 | uint32_t io_err_resp; /* Offset 0 */ |
127 | uint32_t io_err_info; /* Offset 1 */ | |
128 | uint32_t io_err_req; /* Offset 2 */ | |
129 | uint32_t io_err_resp_hi; /* Offset 3 */ | |
130 | uint32_t io_tlb_entry_m; /* Offset 4 */ | |
131 | uint32_t io_tlb_entry_l; /* Offset 5 */ | |
132 | uint32_t unused3[1]; | |
133 | uint32_t io_pdir_base; /* Offset 7 */ | |
134 | uint32_t io_io_low_hv; /* Offset 8 */ | |
135 | uint32_t io_io_high_hv; /* Offset 9 */ | |
136 | uint32_t unused4[1]; | |
137 | uint32_t io_chain_id_mask; /* Offset 11 */ | |
138 | uint32_t unused5[2]; | |
139 | uint32_t io_io_low; /* Offset 14 */ | |
140 | uint32_t io_io_high; /* Offset 15 */ | |
1da177e4 LT |
141 | }; |
142 | ||
143 | /* | |
144 | ** IOA Registers | |
145 | ** ------------- | |
146 | ** | |
147 | ** Runway IO_CONTROL Register (+0x38) | |
148 | ** | |
149 | ** The Runway IO_CONTROL register controls the forwarding of transactions. | |
150 | ** | |
151 | ** | 0 ... 13 | 14 15 | 16 ... 21 | 22 | 23 24 | 25 ... 31 | | |
152 | ** | HV | TLB | reserved | HV | mode | reserved | | |
153 | ** | |
154 | ** o mode field indicates the address translation of transactions | |
155 | ** forwarded from Runway to GSC+: | |
156 | ** Mode Name Value Definition | |
157 | ** Off (default) 0 Opaque to matching addresses. | |
158 | ** Include 1 Transparent for matching addresses. | |
159 | ** Peek 3 Map matching addresses. | |
160 | ** | |
161 | ** + "Off" mode: Runway transactions which match the I/O range | |
162 | ** specified by the IO_IO_LOW/IO_IO_HIGH registers will be ignored. | |
163 | ** + "Include" mode: all addresses within the I/O range specified | |
164 | ** by the IO_IO_LOW and IO_IO_HIGH registers are transparently | |
165 | ** forwarded. This is the I/O Adapter's normal operating mode. | |
166 | ** + "Peek" mode: used during system configuration to initialize the | |
167 | ** GSC+ bus. Runway Write_Shorts in the address range specified by | |
168 | ** IO_IO_LOW and IO_IO_HIGH are forwarded through the I/O Adapter | |
169 | ** *AND* the GSC+ address is remapped to the Broadcast Physical | |
170 | ** Address space by setting the 14 high order address bits of the | |
171 | ** 32 bit GSC+ address to ones. | |
172 | ** | |
173 | ** o TLB field affects transactions which are forwarded from GSC+ to Runway. | |
174 | ** "Real" mode is the poweron default. | |
175 | ** | |
176 | ** TLB Mode Value Description | |
177 | ** Real 0 No TLB translation. Address is directly mapped and the | |
178 | ** virtual address is composed of selected physical bits. | |
179 | ** Error 1 Software fills the TLB manually. | |
180 | ** Normal 2 IOA fetches IO TLB misses from IO PDIR (in host memory). | |
181 | ** | |
182 | ** | |
183 | ** IO_IO_LOW_HV +0x60 (HV dependent) | |
184 | ** IO_IO_HIGH_HV +0x64 (HV dependent) | |
185 | ** IO_IO_LOW +0x78 (Architected register) | |
186 | ** IO_IO_HIGH +0x7c (Architected register) | |
187 | ** | |
188 | ** IO_IO_LOW and IO_IO_HIGH set the lower and upper bounds of the | |
189 | ** I/O Adapter address space, respectively. | |
190 | ** | |
191 | ** 0 ... 7 | 8 ... 15 | 16 ... 31 | | |
192 | ** 11111111 | 11111111 | address | | |
193 | ** | |
194 | ** Each LOW/HIGH pair describes a disjoint address space region. | |
195 | ** (2 per GSC+ port). Each incoming Runway transaction address is compared | |
196 | ** with both sets of LOW/HIGH registers. If the address is in the range | |
197 | ** greater than or equal to IO_IO_LOW and less than IO_IO_HIGH the transaction | |
198 | ** for forwarded to the respective GSC+ bus. | |
199 | ** Specify IO_IO_LOW equal to or greater than IO_IO_HIGH to avoid specifying | |
200 | ** an address space region. | |
201 | ** | |
202 | ** In order for a Runway address to reside within GSC+ extended address space: | |
203 | ** Runway Address [0:7] must identically compare to 8'b11111111 | |
204 | ** Runway Address [8:11] must be equal to IO_IO_LOW(_HV)[16:19] | |
205 | ** Runway Address [12:23] must be greater than or equal to | |
206 | ** IO_IO_LOW(_HV)[20:31] and less than IO_IO_HIGH(_HV)[20:31]. | |
207 | ** Runway Address [24:39] is not used in the comparison. | |
208 | ** | |
209 | ** When the Runway transaction is forwarded to GSC+, the GSC+ address is | |
210 | ** as follows: | |
211 | ** GSC+ Address[0:3] 4'b1111 | |
212 | ** GSC+ Address[4:29] Runway Address[12:37] | |
213 | ** GSC+ Address[30:31] 2'b00 | |
214 | ** | |
215 | ** All 4 Low/High registers must be initialized (by PDC) once the lower bus | |
216 | ** is interrogated and address space is defined. The operating system will | |
217 | ** modify the architectural IO_IO_LOW and IO_IO_HIGH registers following | |
218 | ** the PDC initialization. However, the hardware version dependent IO_IO_LOW | |
219 | ** and IO_IO_HIGH registers should not be subsequently altered by the OS. | |
220 | ** | |
221 | ** Writes to both sets of registers will take effect immediately, bypassing | |
222 | ** the queues, which ensures that subsequent Runway transactions are checked | |
223 | ** against the updated bounds values. However reads are queued, introducing | |
224 | ** the possibility of a read being bypassed by a subsequent write to the same | |
225 | ** register. This sequence can be avoided by having software wait for read | |
226 | ** returns before issuing subsequent writes. | |
227 | */ | |
228 | ||
229 | struct ioc { | |
86a61ee9 | 230 | struct ioa_registers __iomem *ioc_regs; /* I/O MMU base address */ |
1da177e4 LT |
231 | u8 *res_map; /* resource map, bit == pdir entry */ |
232 | u64 *pdir_base; /* physical base address */ | |
233 | u32 pdir_size; /* bytes, function of IOV Space size */ | |
234 | u32 res_hint; /* next available IOVP - | |
235 | circular search */ | |
236 | u32 res_size; /* size of resource map in bytes */ | |
237 | spinlock_t res_lock; | |
238 | ||
239 | #ifdef CCIO_SEARCH_TIME | |
240 | #define CCIO_SEARCH_SAMPLE 0x100 | |
241 | unsigned long avg_search[CCIO_SEARCH_SAMPLE]; | |
242 | unsigned long avg_idx; /* current index into avg_search */ | |
243 | #endif | |
244 | #ifdef CCIO_MAP_STATS | |
245 | unsigned long used_pages; | |
246 | unsigned long msingle_calls; | |
247 | unsigned long msingle_pages; | |
248 | unsigned long msg_calls; | |
249 | unsigned long msg_pages; | |
250 | unsigned long usingle_calls; | |
251 | unsigned long usingle_pages; | |
252 | unsigned long usg_calls; | |
253 | unsigned long usg_pages; | |
254 | #endif | |
255 | unsigned short cujo20_bug; | |
256 | ||
257 | /* STUFF We don't need in performance path */ | |
258 | u32 chainid_shift; /* specify bit location of chain_id */ | |
259 | struct ioc *next; /* Linked list of discovered iocs */ | |
260 | const char *name; /* device name from firmware */ | |
261 | unsigned int hw_path; /* the hardware path this ioc is associatd with */ | |
262 | struct pci_dev *fake_pci_dev; /* the fake pci_dev for non-pci devs */ | |
263 | struct resource mmio_region[2]; /* The "routed" MMIO regions */ | |
264 | }; | |
265 | ||
266 | static struct ioc *ioc_list; | |
267 | static int ioc_count; | |
268 | ||
269 | /************************************************************** | |
270 | * | |
271 | * I/O Pdir Resource Management | |
272 | * | |
273 | * Bits set in the resource map are in use. | |
274 | * Each bit can represent a number of pages. | |
275 | * LSbs represent lower addresses (IOVA's). | |
276 | * | |
277 | * This was was copied from sba_iommu.c. Don't try to unify | |
278 | * the two resource managers unless a way to have different | |
279 | * allocation policies is also adjusted. We'd like to avoid | |
280 | * I/O TLB thrashing by having resource allocation policy | |
281 | * match the I/O TLB replacement policy. | |
282 | * | |
283 | ***************************************************************/ | |
284 | #define IOVP_SIZE PAGE_SIZE | |
285 | #define IOVP_SHIFT PAGE_SHIFT | |
286 | #define IOVP_MASK PAGE_MASK | |
287 | ||
288 | /* Convert from IOVP to IOVA and vice versa. */ | |
289 | #define CCIO_IOVA(iovp,offset) ((iovp) | (offset)) | |
290 | #define CCIO_IOVP(iova) ((iova) & IOVP_MASK) | |
291 | ||
292 | #define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT) | |
293 | #define MKIOVP(pdir_idx) ((long)(pdir_idx) << IOVP_SHIFT) | |
294 | #define MKIOVA(iovp,offset) (dma_addr_t)((long)iovp | (long)offset) | |
295 | #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1)) | |
296 | ||
297 | /* | |
298 | ** Don't worry about the 150% average search length on a miss. | |
299 | ** If the search wraps around, and passes the res_hint, it will | |
300 | ** cause the kernel to panic anyhow. | |
301 | */ | |
302 | #define CCIO_SEARCH_LOOP(ioc, res_idx, mask, size) \ | |
303 | for(; res_ptr < res_end; ++res_ptr) { \ | |
304 | if(0 == (*res_ptr & mask)) { \ | |
305 | *res_ptr |= mask; \ | |
306 | res_idx = (unsigned int)((unsigned long)res_ptr - (unsigned long)ioc->res_map); \ | |
307 | ioc->res_hint = res_idx + (size >> 3); \ | |
308 | goto resource_found; \ | |
309 | } \ | |
310 | } | |
311 | ||
312 | #define CCIO_FIND_FREE_MAPPING(ioa, res_idx, mask, size) \ | |
313 | u##size *res_ptr = (u##size *)&((ioc)->res_map[ioa->res_hint & ~((size >> 3) - 1)]); \ | |
314 | u##size *res_end = (u##size *)&(ioc)->res_map[ioa->res_size]; \ | |
315 | CCIO_SEARCH_LOOP(ioc, res_idx, mask, size); \ | |
316 | res_ptr = (u##size *)&(ioc)->res_map[0]; \ | |
317 | CCIO_SEARCH_LOOP(ioa, res_idx, mask, size); | |
318 | ||
319 | /* | |
320 | ** Find available bit in this ioa's resource map. | |
321 | ** Use a "circular" search: | |
322 | ** o Most IOVA's are "temporary" - avg search time should be small. | |
323 | ** o keep a history of what happened for debugging | |
324 | ** o KISS. | |
325 | ** | |
326 | ** Perf optimizations: | |
327 | ** o search for log2(size) bits at a time. | |
328 | ** o search for available resource bits using byte/word/whatever. | |
329 | ** o use different search for "large" (eg > 4 pages) or "very large" | |
330 | ** (eg > 16 pages) mappings. | |
331 | */ | |
332 | ||
333 | /** | |
334 | * ccio_alloc_range - Allocate pages in the ioc's resource map. | |
335 | * @ioc: The I/O Controller. | |
336 | * @pages_needed: The requested number of pages to be mapped into the | |
337 | * I/O Pdir... | |
338 | * | |
339 | * This function searches the resource map of the ioc to locate a range | |
340 | * of available pages for the requested size. | |
341 | */ | |
342 | static int | |
343 | ccio_alloc_range(struct ioc *ioc, size_t size) | |
344 | { | |
345 | unsigned int pages_needed = size >> IOVP_SHIFT; | |
346 | unsigned int res_idx; | |
347 | #ifdef CCIO_SEARCH_TIME | |
348 | unsigned long cr_start = mfctl(16); | |
349 | #endif | |
350 | ||
351 | BUG_ON(pages_needed == 0); | |
352 | BUG_ON((pages_needed * IOVP_SIZE) > DMA_CHUNK_SIZE); | |
353 | ||
354 | DBG_RES("%s() size: %d pages_needed %d\n", | |
355 | __FUNCTION__, size, pages_needed); | |
356 | ||
357 | /* | |
358 | ** "seek and ye shall find"...praying never hurts either... | |
359 | ** ggg sacrifices another 710 to the computer gods. | |
360 | */ | |
361 | ||
362 | if (pages_needed <= 8) { | |
363 | /* | |
364 | * LAN traffic will not thrash the TLB IFF the same NIC | |
365 | * uses 8 adjacent pages to map seperate payload data. | |
366 | * ie the same byte in the resource bit map. | |
367 | */ | |
368 | #if 0 | |
369 | /* FIXME: bit search should shift it's way through | |
370 | * an unsigned long - not byte at a time. As it is now, | |
371 | * we effectively allocate this byte to this mapping. | |
372 | */ | |
373 | unsigned long mask = ~(~0UL >> pages_needed); | |
374 | CCIO_FIND_FREE_MAPPING(ioc, res_idx, mask, 8); | |
375 | #else | |
376 | CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xff, 8); | |
377 | #endif | |
378 | } else if (pages_needed <= 16) { | |
379 | CCIO_FIND_FREE_MAPPING(ioc, res_idx, 0xffff, 16); | |
380 | } else if (pages_needed <= 32) { | |
381 | CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~(unsigned int)0, 32); | |
382 | #ifdef __LP64__ | |
383 | } else if (pages_needed <= 64) { | |
384 | CCIO_FIND_FREE_MAPPING(ioc, res_idx, ~0UL, 64); | |
385 | #endif | |
386 | } else { | |
387 | panic("%s: %s() Too many pages to map. pages_needed: %u\n", | |
388 | __FILE__, __FUNCTION__, pages_needed); | |
389 | } | |
390 | ||
391 | panic("%s: %s() I/O MMU is out of mapping resources.\n", __FILE__, | |
392 | __FUNCTION__); | |
393 | ||
394 | resource_found: | |
395 | ||
396 | DBG_RES("%s() res_idx %d res_hint: %d\n", | |
397 | __FUNCTION__, res_idx, ioc->res_hint); | |
398 | ||
399 | #ifdef CCIO_SEARCH_TIME | |
400 | { | |
401 | unsigned long cr_end = mfctl(16); | |
402 | unsigned long tmp = cr_end - cr_start; | |
403 | /* check for roll over */ | |
404 | cr_start = (cr_end < cr_start) ? -(tmp) : (tmp); | |
405 | } | |
406 | ioc->avg_search[ioc->avg_idx++] = cr_start; | |
407 | ioc->avg_idx &= CCIO_SEARCH_SAMPLE - 1; | |
408 | #endif | |
409 | #ifdef CCIO_MAP_STATS | |
410 | ioc->used_pages += pages_needed; | |
411 | #endif | |
412 | /* | |
413 | ** return the bit address. | |
414 | */ | |
415 | return res_idx << 3; | |
416 | } | |
417 | ||
418 | #define CCIO_FREE_MAPPINGS(ioc, res_idx, mask, size) \ | |
419 | u##size *res_ptr = (u##size *)&((ioc)->res_map[res_idx]); \ | |
420 | BUG_ON((*res_ptr & mask) != mask); \ | |
421 | *res_ptr &= ~(mask); | |
422 | ||
423 | /** | |
424 | * ccio_free_range - Free pages from the ioc's resource map. | |
425 | * @ioc: The I/O Controller. | |
426 | * @iova: The I/O Virtual Address. | |
427 | * @pages_mapped: The requested number of pages to be freed from the | |
428 | * I/O Pdir. | |
429 | * | |
430 | * This function frees the resouces allocated for the iova. | |
431 | */ | |
432 | static void | |
433 | ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) | |
434 | { | |
435 | unsigned long iovp = CCIO_IOVP(iova); | |
436 | unsigned int res_idx = PDIR_INDEX(iovp) >> 3; | |
437 | ||
438 | BUG_ON(pages_mapped == 0); | |
439 | BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE); | |
440 | BUG_ON(pages_mapped > BITS_PER_LONG); | |
441 | ||
442 | DBG_RES("%s(): res_idx: %d pages_mapped %d\n", | |
443 | __FUNCTION__, res_idx, pages_mapped); | |
444 | ||
445 | #ifdef CCIO_MAP_STATS | |
446 | ioc->used_pages -= pages_mapped; | |
447 | #endif | |
448 | ||
449 | if(pages_mapped <= 8) { | |
450 | #if 0 | |
451 | /* see matching comments in alloc_range */ | |
452 | unsigned long mask = ~(~0UL >> pages_mapped); | |
453 | CCIO_FREE_MAPPINGS(ioc, res_idx, mask, 8); | |
454 | #else | |
455 | CCIO_FREE_MAPPINGS(ioc, res_idx, 0xff, 8); | |
456 | #endif | |
457 | } else if(pages_mapped <= 16) { | |
458 | CCIO_FREE_MAPPINGS(ioc, res_idx, 0xffff, 16); | |
459 | } else if(pages_mapped <= 32) { | |
460 | CCIO_FREE_MAPPINGS(ioc, res_idx, ~(unsigned int)0, 32); | |
461 | #ifdef __LP64__ | |
462 | } else if(pages_mapped <= 64) { | |
463 | CCIO_FREE_MAPPINGS(ioc, res_idx, ~0UL, 64); | |
464 | #endif | |
465 | } else { | |
466 | panic("%s:%s() Too many pages to unmap.\n", __FILE__, | |
467 | __FUNCTION__); | |
468 | } | |
469 | } | |
470 | ||
471 | /**************************************************************** | |
472 | ** | |
473 | ** CCIO dma_ops support routines | |
474 | ** | |
475 | *****************************************************************/ | |
476 | ||
477 | typedef unsigned long space_t; | |
478 | #define KERNEL_SPACE 0 | |
479 | ||
480 | /* | |
481 | ** DMA "Page Type" and Hints | |
482 | ** o if SAFE_DMA isn't set, mapping is for FAST_DMA. SAFE_DMA should be | |
483 | ** set for subcacheline DMA transfers since we don't want to damage the | |
484 | ** other part of a cacheline. | |
485 | ** o SAFE_DMA must be set for "memory" allocated via pci_alloc_consistent(). | |
486 | ** This bit tells U2 to do R/M/W for partial cachelines. "Streaming" | |
487 | ** data can avoid this if the mapping covers full cache lines. | |
488 | ** o STOP_MOST is needed for atomicity across cachelines. | |
0779bf2d | 489 | ** Apparently only "some EISA devices" need this. |
1da177e4 LT |
490 | ** Using CONFIG_ISA is hack. Only the IOA with EISA under it needs |
491 | ** to use this hint iff the EISA devices needs this feature. | |
492 | ** According to the U2 ERS, STOP_MOST enabled pages hurt performance. | |
493 | ** o PREFETCH should *not* be set for cases like Multiple PCI devices | |
494 | ** behind GSCtoPCI (dino) bus converter. Only one cacheline per GSC | |
495 | ** device can be fetched and multiply DMA streams will thrash the | |
496 | ** prefetch buffer and burn memory bandwidth. See 6.7.3 "Prefetch Rules | |
497 | ** and Invalidation of Prefetch Entries". | |
498 | ** | |
499 | ** FIXME: the default hints need to be per GSC device - not global. | |
500 | ** | |
501 | ** HP-UX dorks: linux device driver programming model is totally different | |
502 | ** than HP-UX's. HP-UX always sets HINT_PREFETCH since it's drivers | |
503 | ** do special things to work on non-coherent platforms...linux has to | |
504 | ** be much more careful with this. | |
505 | */ | |
506 | #define IOPDIR_VALID 0x01UL | |
507 | #define HINT_SAFE_DMA 0x02UL /* used for pci_alloc_consistent() pages */ | |
508 | #ifdef CONFIG_EISA | |
509 | #define HINT_STOP_MOST 0x04UL /* LSL support */ | |
510 | #else | |
511 | #define HINT_STOP_MOST 0x00UL /* only needed for "some EISA devices" */ | |
512 | #endif | |
513 | #define HINT_UDPATE_ENB 0x08UL /* not used/supported by U2 */ | |
514 | #define HINT_PREFETCH 0x10UL /* for outbound pages which are not SAFE */ | |
515 | ||
516 | ||
517 | /* | |
518 | ** Use direction (ie PCI_DMA_TODEVICE) to pick hint. | |
519 | ** ccio_alloc_consistent() depends on this to get SAFE_DMA | |
520 | ** when it passes in BIDIRECTIONAL flag. | |
521 | */ | |
522 | static u32 hint_lookup[] = { | |
523 | [PCI_DMA_BIDIRECTIONAL] = HINT_STOP_MOST | HINT_SAFE_DMA | IOPDIR_VALID, | |
524 | [PCI_DMA_TODEVICE] = HINT_STOP_MOST | HINT_PREFETCH | IOPDIR_VALID, | |
525 | [PCI_DMA_FROMDEVICE] = HINT_STOP_MOST | IOPDIR_VALID, | |
526 | }; | |
527 | ||
528 | /** | |
529 | * ccio_io_pdir_entry - Initialize an I/O Pdir. | |
530 | * @pdir_ptr: A pointer into I/O Pdir. | |
531 | * @sid: The Space Identifier. | |
532 | * @vba: The virtual address. | |
533 | * @hints: The DMA Hint. | |
534 | * | |
535 | * Given a virtual address (vba, arg2) and space id, (sid, arg1), | |
536 | * load the I/O PDIR entry pointed to by pdir_ptr (arg0). Each IO Pdir | |
537 | * entry consists of 8 bytes as shown below (MSB == bit 0): | |
538 | * | |
539 | * | |
540 | * WORD 0: | |
541 | * +------+----------------+-----------------------------------------------+ | |
542 | * | Phys | Virtual Index | Phys | | |
543 | * | 0:3 | 0:11 | 4:19 | | |
544 | * |4 bits| 12 bits | 16 bits | | |
545 | * +------+----------------+-----------------------------------------------+ | |
546 | * WORD 1: | |
547 | * +-----------------------+-----------------------------------------------+ | |
548 | * | Phys | Rsvd | Prefetch |Update |Rsvd |Lock |Safe |Valid | | |
549 | * | 20:39 | | Enable |Enable | |Enable|DMA | | | |
550 | * | 20 bits | 5 bits | 1 bit |1 bit |2 bits|1 bit |1 bit |1 bit | | |
551 | * +-----------------------+-----------------------------------------------+ | |
552 | * | |
553 | * The virtual index field is filled with the results of the LCI | |
554 | * (Load Coherence Index) instruction. The 8 bits used for the virtual | |
555 | * index are bits 12:19 of the value returned by LCI. | |
556 | */ | |
557 | void CCIO_INLINE | |
558 | ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba, | |
559 | unsigned long hints) | |
560 | { | |
561 | register unsigned long pa; | |
562 | register unsigned long ci; /* coherent index */ | |
563 | ||
564 | /* We currently only support kernel addresses */ | |
565 | BUG_ON(sid != KERNEL_SPACE); | |
566 | ||
567 | mtsp(sid,1); | |
568 | ||
569 | /* | |
570 | ** WORD 1 - low order word | |
571 | ** "hints" parm includes the VALID bit! | |
572 | ** "dep" clobbers the physical address offset bits as well. | |
573 | */ | |
574 | pa = virt_to_phys(vba); | |
575 | asm volatile("depw %1,31,12,%0" : "+r" (pa) : "r" (hints)); | |
576 | ((u32 *)pdir_ptr)[1] = (u32) pa; | |
577 | ||
578 | /* | |
579 | ** WORD 0 - high order word | |
580 | */ | |
581 | ||
582 | #ifdef __LP64__ | |
583 | /* | |
584 | ** get bits 12:15 of physical address | |
585 | ** shift bits 16:31 of physical address | |
586 | ** and deposit them | |
587 | */ | |
588 | asm volatile ("extrd,u %1,15,4,%0" : "=r" (ci) : "r" (pa)); | |
589 | asm volatile ("extrd,u %1,31,16,%0" : "+r" (pa) : "r" (pa)); | |
590 | asm volatile ("depd %1,35,4,%0" : "+r" (pa) : "r" (ci)); | |
591 | #else | |
592 | pa = 0; | |
593 | #endif | |
594 | /* | |
595 | ** get CPU coherency index bits | |
596 | ** Grab virtual index [0:11] | |
597 | ** Deposit virt_idx bits into I/O PDIR word | |
598 | */ | |
86a61ee9 | 599 | asm volatile ("lci %%r0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba)); |
1da177e4 LT |
600 | asm volatile ("extru %1,19,12,%0" : "+r" (ci) : "r" (ci)); |
601 | asm volatile ("depw %1,15,12,%0" : "+r" (pa) : "r" (ci)); | |
602 | ||
603 | ((u32 *)pdir_ptr)[0] = (u32) pa; | |
604 | ||
605 | ||
606 | /* FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360) | |
607 | ** PCX-U/U+ do. (eg C200/C240) | |
608 | ** PCX-T'? Don't know. (eg C110 or similar K-class) | |
609 | ** | |
610 | ** See PDC_MODEL/option 0/SW_CAP word for "Non-coherent IO-PDIR bit". | |
611 | ** Hopefully we can patch (NOP) these out at boot time somehow. | |
612 | ** | |
613 | ** "Since PCX-U employs an offset hash that is incompatible with | |
614 | ** the real mode coherence index generation of U2, the PDIR entry | |
615 | ** must be flushed to memory to retain coherence." | |
616 | */ | |
86a61ee9 | 617 | asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr)); |
1da177e4 LT |
618 | asm volatile("sync"); |
619 | } | |
620 | ||
621 | /** | |
622 | * ccio_clear_io_tlb - Remove stale entries from the I/O TLB. | |
623 | * @ioc: The I/O Controller. | |
624 | * @iovp: The I/O Virtual Page. | |
625 | * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir. | |
626 | * | |
627 | * Purge invalid I/O PDIR entries from the I/O TLB. | |
628 | * | |
629 | * FIXME: Can we change the byte_cnt to pages_mapped? | |
630 | */ | |
631 | static CCIO_INLINE void | |
632 | ccio_clear_io_tlb(struct ioc *ioc, dma_addr_t iovp, size_t byte_cnt) | |
633 | { | |
634 | u32 chain_size = 1 << ioc->chainid_shift; | |
635 | ||
636 | iovp &= IOVP_MASK; /* clear offset bits, just want pagenum */ | |
637 | byte_cnt += chain_size; | |
638 | ||
639 | while(byte_cnt > chain_size) { | |
86a61ee9 | 640 | WRITE_U32(CMD_TLB_PURGE | iovp, &ioc->ioc_regs->io_command); |
1da177e4 LT |
641 | iovp += chain_size; |
642 | byte_cnt -= chain_size; | |
643 | } | |
644 | } | |
645 | ||
646 | /** | |
647 | * ccio_mark_invalid - Mark the I/O Pdir entries invalid. | |
648 | * @ioc: The I/O Controller. | |
649 | * @iova: The I/O Virtual Address. | |
650 | * @byte_cnt: The requested number of bytes to be freed from the I/O Pdir. | |
651 | * | |
652 | * Mark the I/O Pdir entries invalid and blow away the corresponding I/O | |
653 | * TLB entries. | |
654 | * | |
655 | * FIXME: at some threshhold it might be "cheaper" to just blow | |
656 | * away the entire I/O TLB instead of individual entries. | |
657 | * | |
658 | * FIXME: Uturn has 256 TLB entries. We don't need to purge every | |
659 | * PDIR entry - just once for each possible TLB entry. | |
660 | * (We do need to maker I/O PDIR entries invalid regardless). | |
661 | * | |
662 | * FIXME: Can we change byte_cnt to pages_mapped? | |
663 | */ | |
664 | static CCIO_INLINE void | |
665 | ccio_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |
666 | { | |
667 | u32 iovp = (u32)CCIO_IOVP(iova); | |
668 | size_t saved_byte_cnt; | |
669 | ||
670 | /* round up to nearest page size */ | |
671 | saved_byte_cnt = byte_cnt = ROUNDUP(byte_cnt, IOVP_SIZE); | |
672 | ||
673 | while(byte_cnt > 0) { | |
674 | /* invalidate one page at a time */ | |
675 | unsigned int idx = PDIR_INDEX(iovp); | |
676 | char *pdir_ptr = (char *) &(ioc->pdir_base[idx]); | |
677 | ||
678 | BUG_ON(idx >= (ioc->pdir_size / sizeof(u64))); | |
679 | pdir_ptr[7] = 0; /* clear only VALID bit */ | |
680 | /* | |
681 | ** FIXME: PCX_W platforms don't need FDC/SYNC. (eg C360) | |
682 | ** PCX-U/U+ do. (eg C200/C240) | |
683 | ** See PDC_MODEL/option 0/SW_CAP for "Non-coherent IO-PDIR bit". | |
684 | ** | |
685 | ** Hopefully someone figures out how to patch (NOP) the | |
686 | ** FDC/SYNC out at boot time. | |
687 | */ | |
86a61ee9 | 688 | asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr[7])); |
1da177e4 LT |
689 | |
690 | iovp += IOVP_SIZE; | |
691 | byte_cnt -= IOVP_SIZE; | |
692 | } | |
693 | ||
694 | asm volatile("sync"); | |
695 | ccio_clear_io_tlb(ioc, CCIO_IOVP(iova), saved_byte_cnt); | |
696 | } | |
697 | ||
698 | /**************************************************************** | |
699 | ** | |
700 | ** CCIO dma_ops | |
701 | ** | |
702 | *****************************************************************/ | |
703 | ||
704 | /** | |
705 | * ccio_dma_supported - Verify the IOMMU supports the DMA address range. | |
706 | * @dev: The PCI device. | |
707 | * @mask: A bit mask describing the DMA address range of the device. | |
708 | * | |
709 | * This function implements the pci_dma_supported function. | |
710 | */ | |
711 | static int | |
712 | ccio_dma_supported(struct device *dev, u64 mask) | |
713 | { | |
714 | if(dev == NULL) { | |
715 | printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n"); | |
716 | BUG(); | |
717 | return 0; | |
718 | } | |
719 | ||
720 | /* only support 32-bit devices (ie PCI/GSC) */ | |
721 | return (int)(mask == 0xffffffffUL); | |
722 | } | |
723 | ||
724 | /** | |
725 | * ccio_map_single - Map an address range into the IOMMU. | |
726 | * @dev: The PCI device. | |
727 | * @addr: The start address of the DMA region. | |
728 | * @size: The length of the DMA region. | |
729 | * @direction: The direction of the DMA transaction (to/from device). | |
730 | * | |
731 | * This function implements the pci_map_single function. | |
732 | */ | |
733 | static dma_addr_t | |
734 | ccio_map_single(struct device *dev, void *addr, size_t size, | |
735 | enum dma_data_direction direction) | |
736 | { | |
737 | int idx; | |
738 | struct ioc *ioc; | |
739 | unsigned long flags; | |
740 | dma_addr_t iovp; | |
741 | dma_addr_t offset; | |
742 | u64 *pdir_start; | |
743 | unsigned long hint = hint_lookup[(int)direction]; | |
744 | ||
745 | BUG_ON(!dev); | |
746 | ioc = GET_IOC(dev); | |
747 | ||
748 | BUG_ON(size <= 0); | |
749 | ||
750 | /* save offset bits */ | |
751 | offset = ((unsigned long) addr) & ~IOVP_MASK; | |
752 | ||
753 | /* round up to nearest IOVP_SIZE */ | |
754 | size = ROUNDUP(size + offset, IOVP_SIZE); | |
755 | spin_lock_irqsave(&ioc->res_lock, flags); | |
756 | ||
757 | #ifdef CCIO_MAP_STATS | |
758 | ioc->msingle_calls++; | |
759 | ioc->msingle_pages += size >> IOVP_SHIFT; | |
760 | #endif | |
761 | ||
762 | idx = ccio_alloc_range(ioc, size); | |
763 | iovp = (dma_addr_t)MKIOVP(idx); | |
764 | ||
765 | pdir_start = &(ioc->pdir_base[idx]); | |
766 | ||
767 | DBG_RUN("%s() 0x%p -> 0x%lx size: %0x%x\n", | |
768 | __FUNCTION__, addr, (long)iovp | offset, size); | |
769 | ||
770 | /* If not cacheline aligned, force SAFE_DMA on the whole mess */ | |
771 | if((size % L1_CACHE_BYTES) || ((unsigned long)addr % L1_CACHE_BYTES)) | |
772 | hint |= HINT_SAFE_DMA; | |
773 | ||
774 | while(size > 0) { | |
775 | ccio_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long)addr, hint); | |
776 | ||
777 | DBG_RUN(" pdir %p %08x%08x\n", | |
778 | pdir_start, | |
779 | (u32) (((u32 *) pdir_start)[0]), | |
780 | (u32) (((u32 *) pdir_start)[1])); | |
781 | ++pdir_start; | |
782 | addr += IOVP_SIZE; | |
783 | size -= IOVP_SIZE; | |
784 | } | |
785 | ||
786 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
787 | ||
788 | /* form complete address */ | |
789 | return CCIO_IOVA(iovp, offset); | |
790 | } | |
791 | ||
792 | /** | |
793 | * ccio_unmap_single - Unmap an address range from the IOMMU. | |
794 | * @dev: The PCI device. | |
795 | * @addr: The start address of the DMA region. | |
796 | * @size: The length of the DMA region. | |
797 | * @direction: The direction of the DMA transaction (to/from device). | |
798 | * | |
799 | * This function implements the pci_unmap_single function. | |
800 | */ | |
801 | static void | |
802 | ccio_unmap_single(struct device *dev, dma_addr_t iova, size_t size, | |
803 | enum dma_data_direction direction) | |
804 | { | |
805 | struct ioc *ioc; | |
806 | unsigned long flags; | |
807 | dma_addr_t offset = iova & ~IOVP_MASK; | |
808 | ||
809 | BUG_ON(!dev); | |
810 | ioc = GET_IOC(dev); | |
811 | ||
812 | DBG_RUN("%s() iovp 0x%lx/%x\n", | |
813 | __FUNCTION__, (long)iova, size); | |
814 | ||
815 | iova ^= offset; /* clear offset bits */ | |
816 | size += offset; | |
817 | size = ROUNDUP(size, IOVP_SIZE); | |
818 | ||
819 | spin_lock_irqsave(&ioc->res_lock, flags); | |
820 | ||
821 | #ifdef CCIO_MAP_STATS | |
822 | ioc->usingle_calls++; | |
823 | ioc->usingle_pages += size >> IOVP_SHIFT; | |
824 | #endif | |
825 | ||
826 | ccio_mark_invalid(ioc, iova, size); | |
827 | ccio_free_range(ioc, iova, (size >> IOVP_SHIFT)); | |
828 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
829 | } | |
830 | ||
831 | /** | |
832 | * ccio_alloc_consistent - Allocate a consistent DMA mapping. | |
833 | * @dev: The PCI device. | |
834 | * @size: The length of the DMA region. | |
835 | * @dma_handle: The DMA address handed back to the device (not the cpu). | |
836 | * | |
837 | * This function implements the pci_alloc_consistent function. | |
838 | */ | |
839 | static void * | |
5c1fb41f | 840 | ccio_alloc_consistent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) |
1da177e4 LT |
841 | { |
842 | void *ret; | |
843 | #if 0 | |
844 | /* GRANT Need to establish hierarchy for non-PCI devs as well | |
845 | ** and then provide matching gsc_map_xxx() functions for them as well. | |
846 | */ | |
847 | if(!hwdev) { | |
848 | /* only support PCI */ | |
849 | *dma_handle = 0; | |
850 | return 0; | |
851 | } | |
852 | #endif | |
853 | ret = (void *) __get_free_pages(flag, get_order(size)); | |
854 | ||
855 | if (ret) { | |
856 | memset(ret, 0, size); | |
857 | *dma_handle = ccio_map_single(dev, ret, size, PCI_DMA_BIDIRECTIONAL); | |
858 | } | |
859 | ||
860 | return ret; | |
861 | } | |
862 | ||
863 | /** | |
864 | * ccio_free_consistent - Free a consistent DMA mapping. | |
865 | * @dev: The PCI device. | |
866 | * @size: The length of the DMA region. | |
867 | * @cpu_addr: The cpu address returned from the ccio_alloc_consistent. | |
868 | * @dma_handle: The device address returned from the ccio_alloc_consistent. | |
869 | * | |
870 | * This function implements the pci_free_consistent function. | |
871 | */ | |
872 | static void | |
873 | ccio_free_consistent(struct device *dev, size_t size, void *cpu_addr, | |
874 | dma_addr_t dma_handle) | |
875 | { | |
876 | ccio_unmap_single(dev, dma_handle, size, 0); | |
877 | free_pages((unsigned long)cpu_addr, get_order(size)); | |
878 | } | |
879 | ||
880 | /* | |
881 | ** Since 0 is a valid pdir_base index value, can't use that | |
882 | ** to determine if a value is valid or not. Use a flag to indicate | |
883 | ** the SG list entry contains a valid pdir index. | |
884 | */ | |
885 | #define PIDE_FLAG 0x80000000UL | |
886 | ||
887 | #ifdef CCIO_MAP_STATS | |
888 | #define IOMMU_MAP_STATS | |
889 | #endif | |
890 | #include "iommu-helpers.h" | |
891 | ||
892 | /** | |
893 | * ccio_map_sg - Map the scatter/gather list into the IOMMU. | |
894 | * @dev: The PCI device. | |
895 | * @sglist: The scatter/gather list to be mapped in the IOMMU. | |
896 | * @nents: The number of entries in the scatter/gather list. | |
897 | * @direction: The direction of the DMA transaction (to/from device). | |
898 | * | |
899 | * This function implements the pci_map_sg function. | |
900 | */ | |
901 | static int | |
902 | ccio_map_sg(struct device *dev, struct scatterlist *sglist, int nents, | |
903 | enum dma_data_direction direction) | |
904 | { | |
905 | struct ioc *ioc; | |
906 | int coalesced, filled = 0; | |
907 | unsigned long flags; | |
908 | unsigned long hint = hint_lookup[(int)direction]; | |
909 | unsigned long prev_len = 0, current_len = 0; | |
910 | int i; | |
911 | ||
912 | BUG_ON(!dev); | |
913 | ioc = GET_IOC(dev); | |
914 | ||
915 | DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents); | |
916 | ||
917 | /* Fast path single entry scatterlists. */ | |
918 | if (nents == 1) { | |
919 | sg_dma_address(sglist) = ccio_map_single(dev, | |
920 | (void *)sg_virt_addr(sglist), sglist->length, | |
921 | direction); | |
922 | sg_dma_len(sglist) = sglist->length; | |
923 | return 1; | |
924 | } | |
925 | ||
926 | for(i = 0; i < nents; i++) | |
927 | prev_len += sglist[i].length; | |
928 | ||
929 | spin_lock_irqsave(&ioc->res_lock, flags); | |
930 | ||
931 | #ifdef CCIO_MAP_STATS | |
932 | ioc->msg_calls++; | |
933 | #endif | |
934 | ||
935 | /* | |
936 | ** First coalesce the chunks and allocate I/O pdir space | |
937 | ** | |
938 | ** If this is one DMA stream, we can properly map using the | |
939 | ** correct virtual address associated with each DMA page. | |
940 | ** w/o this association, we wouldn't have coherent DMA! | |
941 | ** Access to the virtual address is what forces a two pass algorithm. | |
942 | */ | |
943 | coalesced = iommu_coalesce_chunks(ioc, sglist, nents, ccio_alloc_range); | |
944 | ||
945 | /* | |
946 | ** Program the I/O Pdir | |
947 | ** | |
948 | ** map the virtual addresses to the I/O Pdir | |
949 | ** o dma_address will contain the pdir index | |
950 | ** o dma_len will contain the number of bytes to map | |
951 | ** o page/offset contain the virtual address. | |
952 | */ | |
953 | filled = iommu_fill_pdir(ioc, sglist, nents, hint, ccio_io_pdir_entry); | |
954 | ||
955 | spin_unlock_irqrestore(&ioc->res_lock, flags); | |
956 | ||
957 | BUG_ON(coalesced != filled); | |
958 | ||
959 | DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled); | |
960 | ||
961 | for (i = 0; i < filled; i++) | |
962 | current_len += sg_dma_len(sglist + i); | |
963 | ||
964 | BUG_ON(current_len != prev_len); | |
965 | ||
966 | return filled; | |
967 | } | |
968 | ||
969 | /** | |
970 | * ccio_unmap_sg - Unmap the scatter/gather list from the IOMMU. | |
971 | * @dev: The PCI device. | |
972 | * @sglist: The scatter/gather list to be unmapped from the IOMMU. | |
973 | * @nents: The number of entries in the scatter/gather list. | |
974 | * @direction: The direction of the DMA transaction (to/from device). | |
975 | * | |
976 | * This function implements the pci_unmap_sg function. | |
977 | */ | |
978 | static void | |
979 | ccio_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, | |
980 | enum dma_data_direction direction) | |
981 | { | |
982 | struct ioc *ioc; | |
983 | ||
984 | BUG_ON(!dev); | |
985 | ioc = GET_IOC(dev); | |
986 | ||
987 | DBG_RUN_SG("%s() START %d entries, %08lx,%x\n", | |
988 | __FUNCTION__, nents, sg_virt_addr(sglist), sglist->length); | |
989 | ||
990 | #ifdef CCIO_MAP_STATS | |
991 | ioc->usg_calls++; | |
992 | #endif | |
993 | ||
994 | while(sg_dma_len(sglist) && nents--) { | |
995 | ||
996 | #ifdef CCIO_MAP_STATS | |
997 | ioc->usg_pages += sg_dma_len(sglist) >> PAGE_SHIFT; | |
998 | #endif | |
999 | ccio_unmap_single(dev, sg_dma_address(sglist), | |
1000 | sg_dma_len(sglist), direction); | |
1001 | ++sglist; | |
1002 | } | |
1003 | ||
1004 | DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents); | |
1005 | } | |
1006 | ||
1007 | static struct hppa_dma_ops ccio_ops = { | |
1008 | .dma_supported = ccio_dma_supported, | |
1009 | .alloc_consistent = ccio_alloc_consistent, | |
1010 | .alloc_noncoherent = ccio_alloc_consistent, | |
1011 | .free_consistent = ccio_free_consistent, | |
1012 | .map_single = ccio_map_single, | |
1013 | .unmap_single = ccio_unmap_single, | |
1014 | .map_sg = ccio_map_sg, | |
1015 | .unmap_sg = ccio_unmap_sg, | |
1016 | .dma_sync_single_for_cpu = NULL, /* NOP for U2/Uturn */ | |
1017 | .dma_sync_single_for_device = NULL, /* NOP for U2/Uturn */ | |
1018 | .dma_sync_sg_for_cpu = NULL, /* ditto */ | |
1019 | .dma_sync_sg_for_device = NULL, /* ditto */ | |
1020 | }; | |
1021 | ||
1022 | #ifdef CONFIG_PROC_FS | |
f823bcae | 1023 | static int ccio_proc_info(struct seq_file *m, void *p) |
1da177e4 | 1024 | { |
f823bcae | 1025 | int len = 0; |
1da177e4 LT |
1026 | struct ioc *ioc = ioc_list; |
1027 | ||
1028 | while (ioc != NULL) { | |
1029 | unsigned int total_pages = ioc->res_size << 3; | |
1030 | unsigned long avg = 0, min, max; | |
f823bcae | 1031 | int j; |
1da177e4 | 1032 | |
f823bcae | 1033 | len += seq_printf(m, "%s\n", ioc->name); |
1da177e4 | 1034 | |
f823bcae KM |
1035 | len += seq_printf(m, "Cujo 2.0 bug : %s\n", |
1036 | (ioc->cujo20_bug ? "yes" : "no")); | |
1da177e4 | 1037 | |
f823bcae KM |
1038 | len += seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n", |
1039 | total_pages * 8, total_pages); | |
1040 | ||
1da177e4 | 1041 | #ifdef CCIO_MAP_STATS |
f823bcae KM |
1042 | len += seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n", |
1043 | total_pages - ioc->used_pages, ioc->used_pages, | |
1044 | (int)(ioc->used_pages * 100 / total_pages)); | |
1da177e4 | 1045 | #endif |
f823bcae KM |
1046 | |
1047 | len += seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n", | |
1048 | ioc->res_size, total_pages); | |
1049 | ||
1da177e4 LT |
1050 | #ifdef CCIO_SEARCH_TIME |
1051 | min = max = ioc->avg_search[0]; | |
1052 | for(j = 0; j < CCIO_SEARCH_SAMPLE; ++j) { | |
1053 | avg += ioc->avg_search[j]; | |
1054 | if(ioc->avg_search[j] > max) | |
1055 | max = ioc->avg_search[j]; | |
1056 | if(ioc->avg_search[j] < min) | |
1057 | min = ioc->avg_search[j]; | |
1058 | } | |
1059 | avg /= CCIO_SEARCH_SAMPLE; | |
f823bcae KM |
1060 | len += seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n", |
1061 | min, avg, max); | |
1da177e4 LT |
1062 | #endif |
1063 | #ifdef CCIO_MAP_STATS | |
f823bcae KM |
1064 | len += seq_printf(m, "pci_map_single(): %8ld calls %8ld pages (avg %d/1000)\n", |
1065 | ioc->msingle_calls, ioc->msingle_pages, | |
1066 | (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls)); | |
1da177e4 LT |
1067 | |
1068 | /* KLUGE - unmap_sg calls unmap_single for each mapped page */ | |
1069 | min = ioc->usingle_calls - ioc->usg_calls; | |
1070 | max = ioc->usingle_pages - ioc->usg_pages; | |
f823bcae KM |
1071 | len += seq_printf(m, "pci_unmap_single: %8ld calls %8ld pages (avg %d/1000)\n", |
1072 | min, max, (int)((max * 1000)/min)); | |
1da177e4 | 1073 | |
f823bcae KM |
1074 | len += seq_printf(m, "pci_map_sg() : %8ld calls %8ld pages (avg %d/1000)\n", |
1075 | ioc->msg_calls, ioc->msg_pages, | |
1076 | (int)((ioc->msg_pages * 1000)/ioc->msg_calls)); | |
1077 | ||
1078 | len += seq_printf(m, "pci_unmap_sg() : %8ld calls %8ld pages (avg %d/1000)\n\n\n", | |
1079 | ioc->usg_calls, ioc->usg_pages, | |
1080 | (int)((ioc->usg_pages * 1000)/ioc->usg_calls)); | |
1da177e4 | 1081 | #endif /* CCIO_MAP_STATS */ |
f823bcae | 1082 | |
1da177e4 LT |
1083 | ioc = ioc->next; |
1084 | } | |
1085 | ||
f823bcae KM |
1086 | return 0; |
1087 | } | |
1088 | ||
1089 | static int ccio_proc_info_open(struct inode *inode, struct file *file) | |
1090 | { | |
1091 | return single_open(file, &ccio_proc_info, NULL); | |
1da177e4 LT |
1092 | } |
1093 | ||
f823bcae KM |
1094 | static struct file_operations ccio_proc_info_fops = { |
1095 | .owner = THIS_MODULE, | |
1096 | .open = ccio_proc_info_open, | |
1097 | .read = seq_read, | |
1098 | .llseek = seq_lseek, | |
1099 | .release = single_release, | |
1100 | }; | |
1101 | ||
1102 | static int ccio_proc_bitmap_info(struct seq_file *m, void *p) | |
1da177e4 | 1103 | { |
f823bcae | 1104 | int len = 0; |
1da177e4 LT |
1105 | struct ioc *ioc = ioc_list; |
1106 | ||
1da177e4 LT |
1107 | while (ioc != NULL) { |
1108 | u32 *res_ptr = (u32 *)ioc->res_map; | |
1109 | int j; | |
1110 | ||
1111 | for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) { | |
1112 | if ((j & 7) == 0) | |
f823bcae KM |
1113 | len += seq_puts(m, "\n "); |
1114 | len += seq_printf(m, "%08x", *res_ptr); | |
1da177e4 LT |
1115 | res_ptr++; |
1116 | } | |
f823bcae | 1117 | len += seq_puts(m, "\n\n"); |
1da177e4 LT |
1118 | ioc = ioc->next; |
1119 | break; /* XXX - remove me */ | |
1120 | } | |
1121 | ||
f823bcae | 1122 | return 0; |
1da177e4 | 1123 | } |
f823bcae KM |
1124 | |
1125 | static int ccio_proc_bitmap_open(struct inode *inode, struct file *file) | |
1126 | { | |
1127 | return single_open(file, &ccio_proc_bitmap_info, NULL); | |
1128 | } | |
1129 | ||
1130 | static struct file_operations ccio_proc_bitmap_fops = { | |
1131 | .owner = THIS_MODULE, | |
1132 | .open = ccio_proc_bitmap_open, | |
1133 | .read = seq_read, | |
1134 | .llseek = seq_lseek, | |
1135 | .release = single_release, | |
1136 | }; | |
1da177e4 LT |
1137 | #endif |
1138 | ||
1139 | /** | |
1140 | * ccio_find_ioc - Find the ioc in the ioc_list | |
1141 | * @hw_path: The hardware path of the ioc. | |
1142 | * | |
1143 | * This function searches the ioc_list for an ioc that matches | |
1144 | * the provide hardware path. | |
1145 | */ | |
1146 | static struct ioc * ccio_find_ioc(int hw_path) | |
1147 | { | |
1148 | int i; | |
1149 | struct ioc *ioc; | |
1150 | ||
1151 | ioc = ioc_list; | |
1152 | for (i = 0; i < ioc_count; i++) { | |
1153 | if (ioc->hw_path == hw_path) | |
1154 | return ioc; | |
1155 | ||
1156 | ioc = ioc->next; | |
1157 | } | |
1158 | ||
1159 | return NULL; | |
1160 | } | |
1161 | ||
1162 | /** | |
1163 | * ccio_get_iommu - Find the iommu which controls this device | |
1164 | * @dev: The parisc device. | |
1165 | * | |
1166 | * This function searches through the registered IOMMU's and returns | |
1167 | * the appropriate IOMMU for the device based on its hardware path. | |
1168 | */ | |
1169 | void * ccio_get_iommu(const struct parisc_device *dev) | |
1170 | { | |
1171 | dev = find_pa_parent_type(dev, HPHW_IOA); | |
1172 | if (!dev) | |
1173 | return NULL; | |
1174 | ||
1175 | return ccio_find_ioc(dev->hw_path); | |
1176 | } | |
1177 | ||
1178 | #define CUJO_20_STEP 0x10000000 /* inc upper nibble */ | |
1179 | ||
1180 | /* Cujo 2.0 has a bug which will silently corrupt data being transferred | |
1181 | * to/from certain pages. To avoid this happening, we mark these pages | |
1182 | * as `used', and ensure that nothing will try to allocate from them. | |
1183 | */ | |
1184 | void ccio_cujo20_fixup(struct parisc_device *cujo, u32 iovp) | |
1185 | { | |
1186 | unsigned int idx; | |
1187 | struct parisc_device *dev = parisc_parent(cujo); | |
1188 | struct ioc *ioc = ccio_get_iommu(dev); | |
1189 | u8 *res_ptr; | |
1190 | ||
1191 | ioc->cujo20_bug = 1; | |
1192 | res_ptr = ioc->res_map; | |
1193 | idx = PDIR_INDEX(iovp) >> 3; | |
1194 | ||
1195 | while (idx < ioc->res_size) { | |
1196 | res_ptr[idx] |= 0xff; | |
1197 | idx += PDIR_INDEX(CUJO_20_STEP) >> 3; | |
1198 | } | |
1199 | } | |
1200 | ||
1201 | #if 0 | |
1202 | /* GRANT - is this needed for U2 or not? */ | |
1203 | ||
1204 | /* | |
1205 | ** Get the size of the I/O TLB for this I/O MMU. | |
1206 | ** | |
1207 | ** If spa_shift is non-zero (ie probably U2), | |
1208 | ** then calculate the I/O TLB size using spa_shift. | |
1209 | ** | |
1210 | ** Otherwise we are supposed to get the IODC entry point ENTRY TLB | |
1211 | ** and execute it. However, both U2 and Uturn firmware supplies spa_shift. | |
1212 | ** I think only Java (K/D/R-class too?) systems don't do this. | |
1213 | */ | |
1214 | static int | |
1215 | ccio_get_iotlb_size(struct parisc_device *dev) | |
1216 | { | |
1217 | if (dev->spa_shift == 0) { | |
1218 | panic("%s() : Can't determine I/O TLB size.\n", __FUNCTION__); | |
1219 | } | |
1220 | return (1 << dev->spa_shift); | |
1221 | } | |
1222 | #else | |
1223 | ||
1224 | /* Uturn supports 256 TLB entries */ | |
1225 | #define CCIO_CHAINID_SHIFT 8 | |
1226 | #define CCIO_CHAINID_MASK 0xff | |
1227 | #endif /* 0 */ | |
1228 | ||
1229 | /* We *can't* support JAVA (T600). Venture there at your own risk. */ | |
1230 | static struct parisc_device_id ccio_tbl[] = { | |
1231 | { HPHW_IOA, HVERSION_REV_ANY_ID, U2_IOA_RUNWAY, 0xb }, /* U2 */ | |
1232 | { HPHW_IOA, HVERSION_REV_ANY_ID, UTURN_IOA_RUNWAY, 0xb }, /* UTurn */ | |
1233 | { 0, } | |
1234 | }; | |
1235 | ||
1236 | static int ccio_probe(struct parisc_device *dev); | |
1237 | ||
1238 | static struct parisc_driver ccio_driver = { | |
bdad1f83 | 1239 | .name = "ccio", |
1da177e4 LT |
1240 | .id_table = ccio_tbl, |
1241 | .probe = ccio_probe, | |
1242 | }; | |
1243 | ||
1244 | /** | |
1245 | * ccio_ioc_init - Initalize the I/O Controller | |
1246 | * @ioc: The I/O Controller. | |
1247 | * | |
1248 | * Initalize the I/O Controller which includes setting up the | |
1249 | * I/O Page Directory, the resource map, and initalizing the | |
1250 | * U2/Uturn chip into virtual mode. | |
1251 | */ | |
1252 | static void | |
1253 | ccio_ioc_init(struct ioc *ioc) | |
1254 | { | |
1255 | int i; | |
1256 | unsigned int iov_order; | |
1257 | u32 iova_space_size; | |
1258 | ||
1259 | /* | |
1260 | ** Determine IOVA Space size from memory size. | |
1261 | ** | |
1262 | ** Ideally, PCI drivers would register the maximum number | |
1263 | ** of DMA they can have outstanding for each device they | |
1264 | ** own. Next best thing would be to guess how much DMA | |
1265 | ** can be outstanding based on PCI Class/sub-class. Both | |
1266 | ** methods still require some "extra" to support PCI | |
1267 | ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD). | |
1268 | */ | |
1269 | ||
1270 | iova_space_size = (u32) (num_physpages / count_parisc_driver(&ccio_driver)); | |
1271 | ||
1272 | /* limit IOVA space size to 1MB-1GB */ | |
1273 | ||
1274 | if (iova_space_size < (1 << (20 - PAGE_SHIFT))) { | |
1275 | iova_space_size = 1 << (20 - PAGE_SHIFT); | |
1276 | #ifdef __LP64__ | |
1277 | } else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) { | |
1278 | iova_space_size = 1 << (30 - PAGE_SHIFT); | |
1279 | #endif | |
1280 | } | |
1281 | ||
1282 | /* | |
1283 | ** iova space must be log2() in size. | |
1284 | ** thus, pdir/res_map will also be log2(). | |
1285 | */ | |
1286 | ||
1287 | /* We could use larger page sizes in order to *decrease* the number | |
1288 | ** of mappings needed. (ie 8k pages means 1/2 the mappings). | |
1289 | ** | |
1290 | ** Note: Grant Grunder says "Using 8k I/O pages isn't trivial either | |
1291 | ** since the pages must also be physically contiguous - typically | |
1292 | ** this is the case under linux." | |
1293 | */ | |
1294 | ||
1295 | iov_order = get_order(iova_space_size << PAGE_SHIFT); | |
1296 | ||
1297 | /* iova_space_size is now bytes, not pages */ | |
1298 | iova_space_size = 1 << (iov_order + PAGE_SHIFT); | |
1299 | ||
1300 | ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64); | |
1301 | ||
86a61ee9 | 1302 | BUG_ON(ioc->pdir_size > 8 * 1024 * 1024); /* max pdir size <= 8MB */ |
1da177e4 LT |
1303 | |
1304 | /* Verify it's a power of two */ | |
1305 | BUG_ON((1 << get_order(ioc->pdir_size)) != (ioc->pdir_size >> PAGE_SHIFT)); | |
1306 | ||
86a61ee9 GG |
1307 | DBG_INIT("%s() hpa 0x%p mem %luMB IOV %dMB (%d bits)\n", |
1308 | __FUNCTION__, ioc->ioc_regs, | |
1da177e4 LT |
1309 | (unsigned long) num_physpages >> (20 - PAGE_SHIFT), |
1310 | iova_space_size>>20, | |
1311 | iov_order + PAGE_SHIFT); | |
1312 | ||
1313 | ioc->pdir_base = (u64 *)__get_free_pages(GFP_KERNEL, | |
1314 | get_order(ioc->pdir_size)); | |
1315 | if(NULL == ioc->pdir_base) { | |
86a61ee9 | 1316 | panic("%s() could not allocate I/O Page Table\n", __FUNCTION__); |
1da177e4 LT |
1317 | } |
1318 | memset(ioc->pdir_base, 0, ioc->pdir_size); | |
1319 | ||
1320 | BUG_ON((((unsigned long)ioc->pdir_base) & PAGE_MASK) != (unsigned long)ioc->pdir_base); | |
86a61ee9 | 1321 | DBG_INIT(" base %p\n", ioc->pdir_base); |
1da177e4 LT |
1322 | |
1323 | /* resource map size dictated by pdir_size */ | |
1324 | ioc->res_size = (ioc->pdir_size / sizeof(u64)) >> 3; | |
1325 | DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size); | |
1326 | ||
1327 | ioc->res_map = (u8 *)__get_free_pages(GFP_KERNEL, | |
1328 | get_order(ioc->res_size)); | |
1329 | if(NULL == ioc->res_map) { | |
86a61ee9 | 1330 | panic("%s() could not allocate resource map\n", __FUNCTION__); |
1da177e4 LT |
1331 | } |
1332 | memset(ioc->res_map, 0, ioc->res_size); | |
1333 | ||
1334 | /* Initialize the res_hint to 16 */ | |
1335 | ioc->res_hint = 16; | |
1336 | ||
1337 | /* Initialize the spinlock */ | |
1338 | spin_lock_init(&ioc->res_lock); | |
1339 | ||
1340 | /* | |
1341 | ** Chainid is the upper most bits of an IOVP used to determine | |
1342 | ** which TLB entry an IOVP will use. | |
1343 | */ | |
1344 | ioc->chainid_shift = get_order(iova_space_size) + PAGE_SHIFT - CCIO_CHAINID_SHIFT; | |
1345 | DBG_INIT(" chainid_shift 0x%x\n", ioc->chainid_shift); | |
1346 | ||
1347 | /* | |
1348 | ** Initialize IOA hardware | |
1349 | */ | |
1350 | WRITE_U32(CCIO_CHAINID_MASK << ioc->chainid_shift, | |
86a61ee9 | 1351 | &ioc->ioc_regs->io_chain_id_mask); |
1da177e4 LT |
1352 | |
1353 | WRITE_U32(virt_to_phys(ioc->pdir_base), | |
86a61ee9 | 1354 | &ioc->ioc_regs->io_pdir_base); |
1da177e4 LT |
1355 | |
1356 | /* | |
1357 | ** Go to "Virtual Mode" | |
1358 | */ | |
86a61ee9 | 1359 | WRITE_U32(IOA_NORMAL_MODE, &ioc->ioc_regs->io_control); |
1da177e4 LT |
1360 | |
1361 | /* | |
1362 | ** Initialize all I/O TLB entries to 0 (Valid bit off). | |
1363 | */ | |
86a61ee9 GG |
1364 | WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_m); |
1365 | WRITE_U32(0, &ioc->ioc_regs->io_tlb_entry_l); | |
1da177e4 LT |
1366 | |
1367 | for(i = 1 << CCIO_CHAINID_SHIFT; i ; i--) { | |
1368 | WRITE_U32((CMD_TLB_DIRECT_WRITE | (i << ioc->chainid_shift)), | |
86a61ee9 | 1369 | &ioc->ioc_regs->io_command); |
1da177e4 LT |
1370 | } |
1371 | } | |
1372 | ||
1373 | static void | |
86a61ee9 | 1374 | ccio_init_resource(struct resource *res, char *name, void __iomem *ioaddr) |
1da177e4 LT |
1375 | { |
1376 | int result; | |
1377 | ||
1378 | res->parent = NULL; | |
1379 | res->flags = IORESOURCE_MEM; | |
86a61ee9 GG |
1380 | /* |
1381 | * bracing ((signed) ...) are required for 64bit kernel because | |
1382 | * we only want to sign extend the lower 16 bits of the register. | |
1383 | * The upper 16-bits of range registers are hardcoded to 0xffff. | |
1384 | */ | |
1385 | res->start = (unsigned long)((signed) READ_U32(ioaddr) << 16); | |
1386 | res->end = (unsigned long)((signed) (READ_U32(ioaddr + 4) << 16) - 1); | |
1da177e4 | 1387 | res->name = name; |
86a61ee9 GG |
1388 | /* |
1389 | * Check if this MMIO range is disable | |
1390 | */ | |
1da177e4 LT |
1391 | if (res->end + 1 == res->start) |
1392 | return; | |
86a61ee9 GG |
1393 | |
1394 | /* On some platforms (e.g. K-Class), we have already registered | |
1395 | * resources for devices reported by firmware. Some are children | |
1396 | * of ccio. | |
1397 | * "insert" ccio ranges in the mmio hierarchy (/proc/iomem). | |
1398 | */ | |
1399 | result = insert_resource(&iomem_resource, res); | |
1da177e4 | 1400 | if (result < 0) { |
86a61ee9 GG |
1401 | printk(KERN_ERR "%s() failed to claim CCIO bus address space (%08lx,%08lx)\n", |
1402 | __FUNCTION__, res->start, res->end); | |
1da177e4 LT |
1403 | } |
1404 | } | |
1405 | ||
1406 | static void __init ccio_init_resources(struct ioc *ioc) | |
1407 | { | |
1408 | struct resource *res = ioc->mmio_region; | |
1409 | char *name = kmalloc(14, GFP_KERNEL); | |
1410 | ||
cb6fc18e | 1411 | snprintf(name, 14, "GSC Bus [%d/]", ioc->hw_path); |
1da177e4 | 1412 | |
86a61ee9 GG |
1413 | ccio_init_resource(res, name, &ioc->ioc_regs->io_io_low); |
1414 | ccio_init_resource(res + 1, name, &ioc->ioc_regs->io_io_low_hv); | |
1da177e4 LT |
1415 | } |
1416 | ||
1417 | static int new_ioc_area(struct resource *res, unsigned long size, | |
1418 | unsigned long min, unsigned long max, unsigned long align) | |
1419 | { | |
1420 | if (max <= min) | |
1421 | return -EBUSY; | |
1422 | ||
1423 | res->start = (max - size + 1) &~ (align - 1); | |
1424 | res->end = res->start + size; | |
86a61ee9 GG |
1425 | |
1426 | /* We might be trying to expand the MMIO range to include | |
1427 | * a child device that has already registered it's MMIO space. | |
1428 | * Use "insert" instead of request_resource(). | |
1429 | */ | |
1430 | if (!insert_resource(&iomem_resource, res)) | |
1da177e4 LT |
1431 | return 0; |
1432 | ||
1433 | return new_ioc_area(res, size, min, max - size, align); | |
1434 | } | |
1435 | ||
1436 | static int expand_ioc_area(struct resource *res, unsigned long size, | |
1437 | unsigned long min, unsigned long max, unsigned long align) | |
1438 | { | |
1439 | unsigned long start, len; | |
1440 | ||
1441 | if (!res->parent) | |
1442 | return new_ioc_area(res, size, min, max, align); | |
1443 | ||
1444 | start = (res->start - size) &~ (align - 1); | |
1445 | len = res->end - start + 1; | |
1446 | if (start >= min) { | |
1447 | if (!adjust_resource(res, start, len)) | |
1448 | return 0; | |
1449 | } | |
1450 | ||
1451 | start = res->start; | |
1452 | len = ((size + res->end + align) &~ (align - 1)) - start; | |
1453 | if (start + len <= max) { | |
1454 | if (!adjust_resource(res, start, len)) | |
1455 | return 0; | |
1456 | } | |
1457 | ||
1458 | return -EBUSY; | |
1459 | } | |
1460 | ||
1461 | /* | |
1462 | * Dino calls this function. Beware that we may get called on systems | |
1463 | * which have no IOC (725, B180, C160L, etc) but do have a Dino. | |
1464 | * So it's legal to find no parent IOC. | |
1465 | * | |
1466 | * Some other issues: one of the resources in the ioc may be unassigned. | |
1467 | */ | |
1468 | int ccio_allocate_resource(const struct parisc_device *dev, | |
1469 | struct resource *res, unsigned long size, | |
1470 | unsigned long min, unsigned long max, unsigned long align) | |
1471 | { | |
1472 | struct resource *parent = &iomem_resource; | |
1473 | struct ioc *ioc = ccio_get_iommu(dev); | |
1474 | if (!ioc) | |
1475 | goto out; | |
1476 | ||
1477 | parent = ioc->mmio_region; | |
1478 | if (parent->parent && | |
1479 | !allocate_resource(parent, res, size, min, max, align, NULL, NULL)) | |
1480 | return 0; | |
1481 | ||
1482 | if ((parent + 1)->parent && | |
1483 | !allocate_resource(parent + 1, res, size, min, max, align, | |
1484 | NULL, NULL)) | |
1485 | return 0; | |
1486 | ||
1487 | if (!expand_ioc_area(parent, size, min, max, align)) { | |
1488 | __raw_writel(((parent->start)>>16) | 0xffff0000, | |
86a61ee9 | 1489 | &ioc->ioc_regs->io_io_low); |
1da177e4 | 1490 | __raw_writel(((parent->end)>>16) | 0xffff0000, |
86a61ee9 | 1491 | &ioc->ioc_regs->io_io_high); |
1da177e4 LT |
1492 | } else if (!expand_ioc_area(parent + 1, size, min, max, align)) { |
1493 | parent++; | |
1494 | __raw_writel(((parent->start)>>16) | 0xffff0000, | |
86a61ee9 | 1495 | &ioc->ioc_regs->io_io_low_hv); |
1da177e4 | 1496 | __raw_writel(((parent->end)>>16) | 0xffff0000, |
86a61ee9 | 1497 | &ioc->ioc_regs->io_io_high_hv); |
1da177e4 LT |
1498 | } else { |
1499 | return -EBUSY; | |
1500 | } | |
1501 | ||
1502 | out: | |
1503 | return allocate_resource(parent, res, size, min, max, align, NULL,NULL); | |
1504 | } | |
1505 | ||
1506 | int ccio_request_resource(const struct parisc_device *dev, | |
1507 | struct resource *res) | |
1508 | { | |
1509 | struct resource *parent; | |
1510 | struct ioc *ioc = ccio_get_iommu(dev); | |
1511 | ||
1512 | if (!ioc) { | |
1513 | parent = &iomem_resource; | |
1514 | } else if ((ioc->mmio_region->start <= res->start) && | |
1515 | (res->end <= ioc->mmio_region->end)) { | |
1516 | parent = ioc->mmio_region; | |
1517 | } else if (((ioc->mmio_region + 1)->start <= res->start) && | |
1518 | (res->end <= (ioc->mmio_region + 1)->end)) { | |
1519 | parent = ioc->mmio_region + 1; | |
1520 | } else { | |
1521 | return -EBUSY; | |
1522 | } | |
1523 | ||
86a61ee9 GG |
1524 | /* "transparent" bus bridges need to register MMIO resources |
1525 | * firmware assigned them. e.g. children of hppb.c (e.g. K-class) | |
1526 | * registered their resources in the PDC "bus walk" (See | |
1527 | * arch/parisc/kernel/inventory.c). | |
1528 | */ | |
1529 | return insert_resource(parent, res); | |
1da177e4 LT |
1530 | } |
1531 | ||
1532 | /** | |
1533 | * ccio_probe - Determine if ccio should claim this device. | |
1534 | * @dev: The device which has been found | |
1535 | * | |
1536 | * Determine if ccio should claim this chip (return 0) or not (return 1). | |
1537 | * If so, initialize the chip and tell other partners in crime they | |
1538 | * have work to do. | |
1539 | */ | |
1540 | static int ccio_probe(struct parisc_device *dev) | |
1541 | { | |
1542 | int i; | |
1543 | struct ioc *ioc, **ioc_p = &ioc_list; | |
f823bcae | 1544 | struct proc_dir_entry *info_entry, *bitmap_entry; |
1da177e4 | 1545 | |
cb6fc18e | 1546 | ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); |
1da177e4 LT |
1547 | if (ioc == NULL) { |
1548 | printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); | |
1549 | return 1; | |
1550 | } | |
1da177e4 LT |
1551 | |
1552 | ioc->name = dev->id.hversion == U2_IOA_RUNWAY ? "U2" : "UTurn"; | |
1553 | ||
53f01bba | 1554 | printk(KERN_INFO "Found %s at 0x%lx\n", ioc->name, dev->hpa.start); |
1da177e4 LT |
1555 | |
1556 | for (i = 0; i < ioc_count; i++) { | |
1557 | ioc_p = &(*ioc_p)->next; | |
1558 | } | |
1559 | *ioc_p = ioc; | |
1560 | ||
1561 | ioc->hw_path = dev->hw_path; | |
5076c158 | 1562 | ioc->ioc_regs = ioremap_nocache(dev->hpa.start, 4096); |
1da177e4 LT |
1563 | ccio_ioc_init(ioc); |
1564 | ccio_init_resources(ioc); | |
1565 | hppa_dma_ops = &ccio_ops; | |
cb6fc18e | 1566 | dev->dev.platform_data = kzalloc(sizeof(struct pci_hba_data), GFP_KERNEL); |
1da177e4 LT |
1567 | |
1568 | /* if this fails, no I/O cards will work, so may as well bug */ | |
1569 | BUG_ON(dev->dev.platform_data == NULL); | |
1570 | HBA_DATA(dev->dev.platform_data)->iommu = ioc; | |
1571 | ||
1da177e4 | 1572 | if (ioc_count == 0) { |
f823bcae KM |
1573 | info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root); |
1574 | if (info_entry) | |
1575 | info_entry->proc_fops = &ccio_proc_info_fops; | |
1576 | ||
1577 | bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root); | |
1578 | if (bitmap_entry) | |
1579 | bitmap_entry->proc_fops = &ccio_proc_bitmap_fops; | |
1da177e4 LT |
1580 | } |
1581 | ||
1582 | ioc_count++; | |
1583 | ||
1584 | parisc_vmerge_boundary = IOVP_SIZE; | |
1585 | parisc_vmerge_max_size = BITS_PER_LONG * IOVP_SIZE; | |
1586 | parisc_has_iommu(); | |
1587 | return 0; | |
1588 | } | |
1589 | ||
1590 | /** | |
1591 | * ccio_init - ccio initalization procedure. | |
1592 | * | |
1593 | * Register this driver. | |
1594 | */ | |
1595 | void __init ccio_init(void) | |
1596 | { | |
1597 | register_parisc_driver(&ccio_driver); | |
1598 | } | |
1599 |