]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* $Id: ioport.c,v 1.45 2001/10/30 04:54:21 davem Exp $ |
2 | * ioport.c: Simple io mapping allocator. | |
3 | * | |
4 | * Copyright (C) 1995 David S. Miller ([email protected]) | |
5 | * Copyright (C) 1995 Miguel de Icaza ([email protected]) | |
6 | * | |
7 | * 1996: sparc_free_io, 1999: ioremap()/iounmap() by Pete Zaitcev. | |
8 | * | |
9 | * 2000/01/29 | |
10 | * <rth> zait: as long as pci_alloc_consistent produces something addressable, | |
11 | * things are ok. | |
12 | * <zaitcev> rth: no, it is relevant, because get_free_pages returns you a | |
13 | * pointer into the big page mapping | |
14 | * <rth> zait: so what? | |
15 | * <rth> zait: remap_it_my_way(virt_to_phys(get_free_page())) | |
16 | * <zaitcev> Hmm | |
17 | * <zaitcev> Suppose I did this remap_it_my_way(virt_to_phys(get_free_page())). | |
18 | * So far so good. | |
19 | * <zaitcev> Now, driver calls pci_free_consistent(with result of | |
20 | * remap_it_my_way()). | |
21 | * <zaitcev> How do you find the address to pass to free_pages()? | |
22 | * <rth> zait: walk the page tables? It's only two or three level after all. | |
23 | * <rth> zait: you have to walk them anyway to remove the mapping. | |
24 | * <zaitcev> Hmm | |
25 | * <zaitcev> Sounds reasonable | |
26 | */ | |
27 | ||
3ca9fab4 | 28 | #include <linux/module.h> |
1da177e4 LT |
29 | #include <linux/sched.h> |
30 | #include <linux/kernel.h> | |
31 | #include <linux/errno.h> | |
32 | #include <linux/types.h> | |
33 | #include <linux/ioport.h> | |
34 | #include <linux/mm.h> | |
35 | #include <linux/slab.h> | |
36 | #include <linux/pci.h> /* struct pci_dev */ | |
37 | #include <linux/proc_fs.h> | |
38 | ||
39 | #include <asm/io.h> | |
40 | #include <asm/vaddrs.h> | |
41 | #include <asm/oplib.h> | |
576c352e | 42 | #include <asm/prom.h> |
3ca9fab4 | 43 | #include <asm/of_device.h> |
576c352e | 44 | #include <asm/sbus.h> |
1da177e4 LT |
45 | #include <asm/page.h> |
46 | #include <asm/pgalloc.h> | |
47 | #include <asm/dma.h> | |
48 | ||
49 | #define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ | |
50 | ||
51 | struct resource *_sparc_find_resource(struct resource *r, unsigned long); | |
52 | ||
53 | static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); | |
54 | static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, | |
55 | unsigned long size, char *name); | |
56 | static void _sparc_free_io(struct resource *res); | |
57 | ||
58 | /* This points to the next to use virtual memory for DVMA mappings */ | |
59 | static struct resource _sparc_dvma = { | |
60 | .name = "sparc_dvma", .start = DVMA_VADDR, .end = DVMA_END - 1 | |
61 | }; | |
62 | /* This points to the start of I/O mappings, cluable from outside. */ | |
63 | /*ext*/ struct resource sparc_iomap = { | |
64 | .name = "sparc_iomap", .start = IOBASE_VADDR, .end = IOBASE_END - 1 | |
65 | }; | |
66 | ||
67 | /* | |
68 | * Our mini-allocator... | |
69 | * Boy this is gross! We need it because we must map I/O for | |
70 | * timers and interrupt controller before the kmalloc is available. | |
71 | */ | |
72 | ||
73 | #define XNMLN 15 | |
74 | #define XNRES 10 /* SS-10 uses 8 */ | |
75 | ||
76 | struct xresource { | |
77 | struct resource xres; /* Must be first */ | |
78 | int xflag; /* 1 == used */ | |
79 | char xname[XNMLN+1]; | |
80 | }; | |
81 | ||
82 | static struct xresource xresv[XNRES]; | |
83 | ||
84 | static struct xresource *xres_alloc(void) { | |
85 | struct xresource *xrp; | |
86 | int n; | |
87 | ||
88 | xrp = xresv; | |
89 | for (n = 0; n < XNRES; n++) { | |
90 | if (xrp->xflag == 0) { | |
91 | xrp->xflag = 1; | |
92 | return xrp; | |
93 | } | |
94 | xrp++; | |
95 | } | |
96 | return NULL; | |
97 | } | |
98 | ||
99 | static void xres_free(struct xresource *xrp) { | |
100 | xrp->xflag = 0; | |
101 | } | |
102 | ||
103 | /* | |
104 | * These are typically used in PCI drivers | |
105 | * which are trying to be cross-platform. | |
106 | * | |
107 | * Bus type is always zero on IIep. | |
108 | */ | |
109 | void __iomem *ioremap(unsigned long offset, unsigned long size) | |
110 | { | |
111 | char name[14]; | |
112 | ||
113 | sprintf(name, "phys_%08x", (u32)offset); | |
114 | return _sparc_alloc_io(0, offset, size, name); | |
115 | } | |
116 | ||
117 | /* | |
118 | * Comlimentary to ioremap(). | |
119 | */ | |
120 | void iounmap(volatile void __iomem *virtual) | |
121 | { | |
122 | unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; | |
123 | struct resource *res; | |
124 | ||
125 | if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) { | |
126 | printk("free_io/iounmap: cannot free %lx\n", vaddr); | |
127 | return; | |
128 | } | |
129 | _sparc_free_io(res); | |
130 | ||
131 | if ((char *)res >= (char*)xresv && (char *)res < (char *)&xresv[XNRES]) { | |
132 | xres_free((struct xresource *)res); | |
133 | } else { | |
134 | kfree(res); | |
135 | } | |
136 | } | |
137 | ||
138 | /* | |
139 | */ | |
140 | void __iomem *sbus_ioremap(struct resource *phyres, unsigned long offset, | |
141 | unsigned long size, char *name) | |
142 | { | |
143 | return _sparc_alloc_io(phyres->flags & 0xF, | |
144 | phyres->start + offset, size, name); | |
145 | } | |
146 | ||
3ca9fab4 DM |
147 | void __iomem *of_ioremap(struct resource *res, unsigned long offset, |
148 | unsigned long size, char *name) | |
149 | { | |
150 | return _sparc_alloc_io(res->flags & 0xF, | |
151 | res->start + offset, | |
152 | size, name); | |
153 | } | |
154 | EXPORT_SYMBOL(of_ioremap); | |
155 | ||
e3a411a3 | 156 | void of_iounmap(struct resource *res, void __iomem *base, unsigned long size) |
3ca9fab4 DM |
157 | { |
158 | iounmap(base); | |
159 | } | |
160 | EXPORT_SYMBOL(of_iounmap); | |
161 | ||
1da177e4 LT |
162 | /* |
163 | */ | |
164 | void sbus_iounmap(volatile void __iomem *addr, unsigned long size) | |
165 | { | |
166 | iounmap(addr); | |
167 | } | |
168 | ||
169 | /* | |
170 | * Meat of mapping | |
171 | */ | |
172 | static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, | |
173 | unsigned long size, char *name) | |
174 | { | |
175 | static int printed_full; | |
176 | struct xresource *xres; | |
177 | struct resource *res; | |
178 | char *tack; | |
179 | int tlen; | |
180 | void __iomem *va; /* P3 diag */ | |
181 | ||
182 | if (name == NULL) name = "???"; | |
183 | ||
184 | if ((xres = xres_alloc()) != 0) { | |
185 | tack = xres->xname; | |
186 | res = &xres->xres; | |
187 | } else { | |
188 | if (!printed_full) { | |
189 | printk("ioremap: done with statics, switching to malloc\n"); | |
190 | printed_full = 1; | |
191 | } | |
192 | tlen = strlen(name); | |
193 | tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); | |
194 | if (tack == NULL) return NULL; | |
195 | memset(tack, 0, sizeof(struct resource)); | |
196 | res = (struct resource *) tack; | |
197 | tack += sizeof (struct resource); | |
198 | } | |
199 | ||
200 | strlcpy(tack, name, XNMLN+1); | |
201 | res->name = tack; | |
202 | ||
203 | va = _sparc_ioremap(res, busno, phys, size); | |
204 | /* printk("ioremap(0x%x:%08lx[0x%lx])=%p\n", busno, phys, size, va); */ /* P3 diag */ | |
205 | return va; | |
206 | } | |
207 | ||
208 | /* | |
209 | */ | |
210 | static void __iomem * | |
211 | _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) | |
212 | { | |
213 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | |
214 | ||
215 | if (allocate_resource(&sparc_iomap, res, | |
216 | (offset + sz + PAGE_SIZE-1) & PAGE_MASK, | |
217 | sparc_iomap.start, sparc_iomap.end, PAGE_SIZE, NULL, NULL) != 0) { | |
218 | /* Usually we cannot see printks in this case. */ | |
219 | prom_printf("alloc_io_res(%s): cannot occupy\n", | |
220 | (res->name != NULL)? res->name: "???"); | |
221 | prom_halt(); | |
222 | } | |
223 | ||
224 | pa &= PAGE_MASK; | |
225 | sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1); | |
226 | ||
d75fc8bb | 227 | return (void __iomem *)(unsigned long)(res->start + offset); |
1da177e4 LT |
228 | } |
229 | ||
230 | /* | |
231 | * Comlimentary to _sparc_ioremap(). | |
232 | */ | |
233 | static void _sparc_free_io(struct resource *res) | |
234 | { | |
235 | unsigned long plen; | |
236 | ||
237 | plen = res->end - res->start + 1; | |
30d4d1ff | 238 | BUG_ON((plen & (PAGE_SIZE-1)) != 0); |
1da177e4 LT |
239 | sparc_unmapiorange(res->start, plen); |
240 | release_resource(res); | |
241 | } | |
242 | ||
243 | #ifdef CONFIG_SBUS | |
244 | ||
8fae097d DM |
245 | void sbus_set_sbus64(struct sbus_dev *sdev, int x) |
246 | { | |
1da177e4 LT |
247 | printk("sbus_set_sbus64: unsupported\n"); |
248 | } | |
249 | ||
8fae097d DM |
250 | extern unsigned int sun4d_build_irq(struct sbus_dev *sdev, int irq); |
251 | void __init sbus_fill_device_irq(struct sbus_dev *sdev) | |
252 | { | |
253 | struct linux_prom_irqs irqs[PROMINTR_MAX]; | |
254 | int len; | |
255 | ||
256 | len = prom_getproperty(sdev->prom_node, "intr", | |
257 | (char *)irqs, sizeof(irqs)); | |
258 | if (len != -1) { | |
259 | sdev->num_irqs = len / 8; | |
260 | if (sdev->num_irqs == 0) { | |
261 | sdev->irqs[0] = 0; | |
262 | } else if (sparc_cpu_model == sun4d) { | |
263 | for (len = 0; len < sdev->num_irqs; len++) | |
264 | sdev->irqs[len] = | |
265 | sun4d_build_irq(sdev, irqs[len].pri); | |
266 | } else { | |
267 | for (len = 0; len < sdev->num_irqs; len++) | |
268 | sdev->irqs[len] = irqs[len].pri; | |
269 | } | |
270 | } else { | |
271 | int interrupts[PROMINTR_MAX]; | |
272 | ||
273 | /* No "intr" node found-- check for "interrupts" node. | |
274 | * This node contains SBus interrupt levels, not IPLs | |
275 | * as in "intr", and no vector values. We convert | |
276 | * SBus interrupt levels to PILs (platform specific). | |
277 | */ | |
278 | len = prom_getproperty(sdev->prom_node, "interrupts", | |
279 | (char *)interrupts, sizeof(interrupts)); | |
280 | if (len == -1) { | |
281 | sdev->irqs[0] = 0; | |
282 | sdev->num_irqs = 0; | |
283 | } else { | |
284 | sdev->num_irqs = len / sizeof(int); | |
285 | for (len = 0; len < sdev->num_irqs; len++) { | |
286 | sdev->irqs[len] = | |
287 | sbint_to_irq(sdev, interrupts[len]); | |
288 | } | |
289 | } | |
290 | } | |
291 | } | |
292 | ||
1da177e4 LT |
293 | /* |
294 | * Allocate a chunk of memory suitable for DMA. | |
295 | * Typically devices use them for control blocks. | |
296 | * CPU may access them without any explicit flushing. | |
297 | * | |
298 | * XXX Some clever people know that sdev is not used and supply NULL. Watch. | |
299 | */ | |
300 | void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp) | |
301 | { | |
302 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | |
303 | unsigned long va; | |
304 | struct resource *res; | |
305 | int order; | |
306 | ||
307 | /* XXX why are some lenghts signed, others unsigned? */ | |
308 | if (len <= 0) { | |
309 | return NULL; | |
310 | } | |
311 | /* XXX So what is maxphys for us and how do drivers know it? */ | |
312 | if (len > 256*1024) { /* __get_free_pages() limit */ | |
313 | return NULL; | |
314 | } | |
315 | ||
316 | order = get_order(len_total); | |
f3d48f03 | 317 | if ((va = __get_free_pages(GFP_KERNEL|__GFP_COMP, order)) == 0) |
1da177e4 LT |
318 | goto err_nopages; |
319 | ||
c80892d1 | 320 | if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) |
1da177e4 | 321 | goto err_nomem; |
1da177e4 LT |
322 | |
323 | if (allocate_resource(&_sparc_dvma, res, len_total, | |
324 | _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | |
325 | printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); | |
326 | goto err_nova; | |
327 | } | |
328 | mmu_inval_dma_area(va, len_total); | |
329 | // XXX The mmu_map_dma_area does this for us below, see comments. | |
330 | // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | |
331 | /* | |
332 | * XXX That's where sdev would be used. Currently we load | |
333 | * all iommu tables with the same translations. | |
334 | */ | |
335 | if (mmu_map_dma_area(dma_addrp, va, res->start, len_total) != 0) | |
336 | goto err_noiommu; | |
337 | ||
4cfbd7eb MH |
338 | /* Set the resource name, if known. */ |
339 | if (sdev) { | |
340 | res->name = sdev->prom_name; | |
341 | } | |
342 | ||
d75fc8bb | 343 | return (void *)(unsigned long)res->start; |
1da177e4 LT |
344 | |
345 | err_noiommu: | |
346 | release_resource(res); | |
347 | err_nova: | |
348 | free_pages(va, order); | |
349 | err_nomem: | |
350 | kfree(res); | |
351 | err_nopages: | |
352 | return NULL; | |
353 | } | |
354 | ||
355 | void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba) | |
356 | { | |
357 | struct resource *res; | |
358 | struct page *pgv; | |
359 | ||
360 | if ((res = _sparc_find_resource(&_sparc_dvma, | |
361 | (unsigned long)p)) == NULL) { | |
362 | printk("sbus_free_consistent: cannot free %p\n", p); | |
363 | return; | |
364 | } | |
365 | ||
366 | if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { | |
367 | printk("sbus_free_consistent: unaligned va %p\n", p); | |
368 | return; | |
369 | } | |
370 | ||
371 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | |
372 | if ((res->end-res->start)+1 != n) { | |
373 | printk("sbus_free_consistent: region 0x%lx asked 0x%lx\n", | |
374 | (long)((res->end-res->start)+1), n); | |
375 | return; | |
376 | } | |
377 | ||
378 | release_resource(res); | |
379 | kfree(res); | |
380 | ||
381 | /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ | |
382 | pgv = mmu_translate_dvma(ba); | |
383 | mmu_unmap_dma_area(ba, n); | |
384 | ||
385 | __free_pages(pgv, get_order(n)); | |
386 | } | |
387 | ||
388 | /* | |
389 | * Map a chunk of memory so that devices can see it. | |
390 | * CPU view of this memory may be inconsistent with | |
391 | * a device view and explicit flushing is necessary. | |
392 | */ | |
393 | dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction) | |
394 | { | |
395 | /* XXX why are some lenghts signed, others unsigned? */ | |
396 | if (len <= 0) { | |
397 | return 0; | |
398 | } | |
399 | /* XXX So what is maxphys for us and how do drivers know it? */ | |
400 | if (len > 256*1024) { /* __get_free_pages() limit */ | |
401 | return 0; | |
402 | } | |
403 | return mmu_get_scsi_one(va, len, sdev->bus); | |
404 | } | |
405 | ||
406 | void sbus_unmap_single(struct sbus_dev *sdev, dma_addr_t ba, size_t n, int direction) | |
407 | { | |
408 | mmu_release_scsi_one(ba, n, sdev->bus); | |
409 | } | |
410 | ||
411 | int sbus_map_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | |
412 | { | |
413 | mmu_get_scsi_sgl(sg, n, sdev->bus); | |
414 | ||
415 | /* | |
416 | * XXX sparc64 can return a partial length here. sun4c should do this | |
417 | * but it currently panics if it can't fulfill the request - Anton | |
418 | */ | |
419 | return n; | |
420 | } | |
421 | ||
422 | void sbus_unmap_sg(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | |
423 | { | |
424 | mmu_release_scsi_sgl(sg, n, sdev->bus); | |
425 | } | |
426 | ||
427 | /* | |
428 | */ | |
429 | void sbus_dma_sync_single_for_cpu(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction) | |
430 | { | |
431 | #if 0 | |
432 | unsigned long va; | |
433 | struct resource *res; | |
434 | ||
435 | /* We do not need the resource, just print a message if invalid. */ | |
436 | res = _sparc_find_resource(&_sparc_dvma, ba); | |
437 | if (res == NULL) | |
438 | panic("sbus_dma_sync_single: 0x%x\n", ba); | |
439 | ||
440 | va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */ | |
441 | /* | |
442 | * XXX This bogosity will be fixed with the iommu rewrite coming soon | |
443 | * to a kernel near you. - Anton | |
444 | */ | |
445 | /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */ | |
446 | #endif | |
447 | } | |
448 | ||
449 | void sbus_dma_sync_single_for_device(struct sbus_dev *sdev, dma_addr_t ba, size_t size, int direction) | |
450 | { | |
451 | #if 0 | |
452 | unsigned long va; | |
453 | struct resource *res; | |
454 | ||
455 | /* We do not need the resource, just print a message if invalid. */ | |
456 | res = _sparc_find_resource(&_sparc_dvma, ba); | |
457 | if (res == NULL) | |
458 | panic("sbus_dma_sync_single: 0x%x\n", ba); | |
459 | ||
460 | va = page_address(mmu_translate_dvma(ba)); /* XXX higmem */ | |
461 | /* | |
462 | * XXX This bogosity will be fixed with the iommu rewrite coming soon | |
463 | * to a kernel near you. - Anton | |
464 | */ | |
465 | /* mmu_inval_dma_area(va, (size + PAGE_SIZE-1) & PAGE_MASK); */ | |
466 | #endif | |
467 | } | |
468 | ||
469 | void sbus_dma_sync_sg_for_cpu(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | |
470 | { | |
471 | printk("sbus_dma_sync_sg_for_cpu: not implemented yet\n"); | |
472 | } | |
473 | ||
474 | void sbus_dma_sync_sg_for_device(struct sbus_dev *sdev, struct scatterlist *sg, int n, int direction) | |
475 | { | |
476 | printk("sbus_dma_sync_sg_for_device: not implemented yet\n"); | |
477 | } | |
576c352e DM |
478 | |
479 | /* Support code for sbus_init(). */ | |
480 | /* | |
481 | * XXX This functions appears to be a distorted version of | |
482 | * prom_sbus_ranges_init(), with all sun4d stuff cut away. | |
483 | * Ask DaveM what is going on here, how is sun4d supposed to work... XXX | |
484 | */ | |
485 | /* added back sun4d patch from Thomas Bogendoerfer - should be OK (crn) */ | |
486 | void __init sbus_arch_bus_ranges_init(struct device_node *pn, struct sbus_bus *sbus) | |
487 | { | |
488 | int parent_node = pn->node; | |
489 | ||
490 | if (sparc_cpu_model == sun4d) { | |
491 | struct linux_prom_ranges iounit_ranges[PROMREG_MAX]; | |
492 | int num_iounit_ranges, len; | |
493 | ||
494 | len = prom_getproperty(parent_node, "ranges", | |
495 | (char *) iounit_ranges, | |
496 | sizeof (iounit_ranges)); | |
497 | if (len != -1) { | |
498 | num_iounit_ranges = | |
499 | (len / sizeof(struct linux_prom_ranges)); | |
500 | prom_adjust_ranges(sbus->sbus_ranges, | |
501 | sbus->num_sbus_ranges, | |
502 | iounit_ranges, num_iounit_ranges); | |
503 | } | |
504 | } | |
505 | } | |
506 | ||
507 | void __init sbus_setup_iommu(struct sbus_bus *sbus, struct device_node *dp) | |
508 | { | |
5932ef07 | 509 | #ifndef CONFIG_SUN4 |
576c352e DM |
510 | struct device_node *parent = dp->parent; |
511 | ||
512 | if (sparc_cpu_model != sun4d && | |
513 | parent != NULL && | |
514 | !strcmp(parent->name, "iommu")) { | |
515 | extern void iommu_init(int iommu_node, struct sbus_bus *sbus); | |
516 | ||
517 | iommu_init(parent->node, sbus); | |
518 | } | |
519 | ||
520 | if (sparc_cpu_model == sun4d) { | |
521 | extern void iounit_init(int sbi_node, int iounit_node, | |
522 | struct sbus_bus *sbus); | |
523 | ||
524 | iounit_init(dp->node, parent->node, sbus); | |
525 | } | |
5932ef07 | 526 | #endif |
576c352e DM |
527 | } |
528 | ||
529 | void __init sbus_setup_arch_props(struct sbus_bus *sbus, struct device_node *dp) | |
530 | { | |
531 | if (sparc_cpu_model == sun4d) { | |
532 | struct device_node *parent = dp->parent; | |
533 | ||
534 | sbus->devid = of_getintprop_default(parent, "device-id", 0); | |
535 | sbus->board = of_getintprop_default(parent, "board#", 0); | |
536 | } | |
537 | } | |
538 | ||
539 | int __init sbus_arch_preinit(void) | |
540 | { | |
541 | extern void register_proc_sparc_ioport(void); | |
542 | ||
543 | register_proc_sparc_ioport(); | |
544 | ||
545 | #ifdef CONFIG_SUN4 | |
546 | { | |
547 | extern void sun4_dvma_init(void); | |
548 | sun4_dvma_init(); | |
549 | } | |
550 | return 1; | |
551 | #else | |
552 | return 0; | |
553 | #endif | |
554 | } | |
555 | ||
556 | void __init sbus_arch_postinit(void) | |
557 | { | |
558 | if (sparc_cpu_model == sun4d) { | |
559 | extern void sun4d_init_sbi_irq(void); | |
560 | sun4d_init_sbi_irq(); | |
561 | } | |
562 | } | |
1da177e4 LT |
563 | #endif /* CONFIG_SBUS */ |
564 | ||
565 | #ifdef CONFIG_PCI | |
566 | ||
567 | /* Allocate and map kernel buffer using consistent mode DMA for a device. | |
568 | * hwdev should be valid struct pci_dev pointer for PCI devices. | |
569 | */ | |
570 | void *pci_alloc_consistent(struct pci_dev *pdev, size_t len, dma_addr_t *pba) | |
571 | { | |
572 | unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; | |
573 | unsigned long va; | |
574 | struct resource *res; | |
575 | int order; | |
576 | ||
577 | if (len == 0) { | |
578 | return NULL; | |
579 | } | |
580 | if (len > 256*1024) { /* __get_free_pages() limit */ | |
581 | return NULL; | |
582 | } | |
583 | ||
584 | order = get_order(len_total); | |
585 | va = __get_free_pages(GFP_KERNEL, order); | |
586 | if (va == 0) { | |
587 | printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); | |
588 | return NULL; | |
589 | } | |
590 | ||
c80892d1 | 591 | if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { |
1da177e4 LT |
592 | free_pages(va, order); |
593 | printk("pci_alloc_consistent: no core\n"); | |
594 | return NULL; | |
595 | } | |
1da177e4 LT |
596 | |
597 | if (allocate_resource(&_sparc_dvma, res, len_total, | |
598 | _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { | |
599 | printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); | |
600 | free_pages(va, order); | |
601 | kfree(res); | |
602 | return NULL; | |
603 | } | |
604 | mmu_inval_dma_area(va, len_total); | |
605 | #if 0 | |
606 | /* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n", | |
607 | (long)va, (long)res->start, (long)virt_to_phys(va), len_total); | |
608 | #endif | |
609 | sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); | |
610 | ||
611 | *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ | |
612 | return (void *) res->start; | |
613 | } | |
614 | ||
615 | /* Free and unmap a consistent DMA buffer. | |
616 | * cpu_addr is what was returned from pci_alloc_consistent, | |
617 | * size must be the same as what as passed into pci_alloc_consistent, | |
618 | * and likewise dma_addr must be the same as what *dma_addrp was set to. | |
619 | * | |
620 | * References to the memory and mappings assosciated with cpu_addr/dma_addr | |
621 | * past this call are illegal. | |
622 | */ | |
623 | void pci_free_consistent(struct pci_dev *pdev, size_t n, void *p, dma_addr_t ba) | |
624 | { | |
625 | struct resource *res; | |
626 | unsigned long pgp; | |
627 | ||
628 | if ((res = _sparc_find_resource(&_sparc_dvma, | |
629 | (unsigned long)p)) == NULL) { | |
630 | printk("pci_free_consistent: cannot free %p\n", p); | |
631 | return; | |
632 | } | |
633 | ||
634 | if (((unsigned long)p & (PAGE_SIZE-1)) != 0) { | |
635 | printk("pci_free_consistent: unaligned va %p\n", p); | |
636 | return; | |
637 | } | |
638 | ||
639 | n = (n + PAGE_SIZE-1) & PAGE_MASK; | |
640 | if ((res->end-res->start)+1 != n) { | |
641 | printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", | |
642 | (long)((res->end-res->start)+1), (long)n); | |
643 | return; | |
644 | } | |
645 | ||
646 | pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ | |
647 | mmu_inval_dma_area(pgp, n); | |
648 | sparc_unmapiorange((unsigned long)p, n); | |
649 | ||
650 | release_resource(res); | |
651 | kfree(res); | |
652 | ||
653 | free_pages(pgp, get_order(n)); | |
654 | } | |
655 | ||
656 | /* Map a single buffer of the indicated size for DMA in streaming mode. | |
657 | * The 32-bit bus address to use is returned. | |
658 | * | |
659 | * Once the device is given the dma address, the device owns this memory | |
660 | * until either pci_unmap_single or pci_dma_sync_single_* is performed. | |
661 | */ | |
662 | dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, | |
663 | int direction) | |
664 | { | |
30d4d1ff | 665 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
666 | /* IIep is write-through, not flushing. */ |
667 | return virt_to_phys(ptr); | |
668 | } | |
669 | ||
670 | /* Unmap a single streaming mode DMA translation. The dma_addr and size | |
671 | * must match what was provided for in a previous pci_map_single call. All | |
672 | * other usages are undefined. | |
673 | * | |
674 | * After this call, reads by the cpu to the buffer are guaranteed to see | |
675 | * whatever the device wrote there. | |
676 | */ | |
677 | void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t ba, size_t size, | |
678 | int direction) | |
679 | { | |
30d4d1ff | 680 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
681 | if (direction != PCI_DMA_TODEVICE) { |
682 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | |
683 | (size + PAGE_SIZE-1) & PAGE_MASK); | |
684 | } | |
685 | } | |
686 | ||
687 | /* | |
688 | * Same as pci_map_single, but with pages. | |
689 | */ | |
690 | dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page, | |
691 | unsigned long offset, size_t size, int direction) | |
692 | { | |
30d4d1ff | 693 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
694 | /* IIep is write-through, not flushing. */ |
695 | return page_to_phys(page) + offset; | |
696 | } | |
697 | ||
698 | void pci_unmap_page(struct pci_dev *hwdev, | |
699 | dma_addr_t dma_address, size_t size, int direction) | |
700 | { | |
30d4d1ff | 701 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
702 | /* mmu_inval_dma_area XXX */ |
703 | } | |
704 | ||
705 | /* Map a set of buffers described by scatterlist in streaming | |
706 | * mode for DMA. This is the scather-gather version of the | |
707 | * above pci_map_single interface. Here the scatter gather list | |
708 | * elements are each tagged with the appropriate dma address | |
709 | * and length. They are obtained via sg_dma_{address,length}(SG). | |
710 | * | |
711 | * NOTE: An implementation may be able to use a smaller number of | |
712 | * DMA address/length pairs than there are SG table elements. | |
713 | * (for example via virtual mapping capabilities) | |
714 | * The routine returns the number of addr/length pairs actually | |
715 | * used, at most nents. | |
716 | * | |
717 | * Device ownership issues as mentioned above for pci_map_single are | |
718 | * the same here. | |
719 | */ | |
720 | int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |
721 | int direction) | |
722 | { | |
723 | int n; | |
724 | ||
30d4d1ff | 725 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
726 | /* IIep is write-through, not flushing. */ |
727 | for (n = 0; n < nents; n++) { | |
30d4d1ff | 728 | BUG_ON(page_address(sg->page) == NULL); |
dbc1333a JA |
729 | sg->dvma_address = |
730 | virt_to_phys(page_address(sg->page)) + sg->offset; | |
1da177e4 LT |
731 | sg->dvma_length = sg->length; |
732 | sg++; | |
733 | } | |
734 | return nents; | |
735 | } | |
736 | ||
737 | /* Unmap a set of streaming mode DMA translations. | |
738 | * Again, cpu read rules concerning calls here are the same as for | |
739 | * pci_unmap_single() above. | |
740 | */ | |
741 | void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, | |
742 | int direction) | |
743 | { | |
744 | int n; | |
745 | ||
30d4d1ff | 746 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
747 | if (direction != PCI_DMA_TODEVICE) { |
748 | for (n = 0; n < nents; n++) { | |
30d4d1ff | 749 | BUG_ON(page_address(sg->page) == NULL); |
1da177e4 LT |
750 | mmu_inval_dma_area( |
751 | (unsigned long) page_address(sg->page), | |
752 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | |
753 | sg++; | |
754 | } | |
755 | } | |
756 | } | |
757 | ||
758 | /* Make physical memory consistent for a single | |
759 | * streaming mode DMA translation before or after a transfer. | |
760 | * | |
761 | * If you perform a pci_map_single() but wish to interrogate the | |
762 | * buffer using the cpu, yet do not wish to teardown the PCI dma | |
763 | * mapping, you must call this function before doing so. At the | |
764 | * next point you give the PCI dma address back to the card, you | |
765 | * must first perform a pci_dma_sync_for_device, and then the | |
766 | * device again owns the buffer. | |
767 | */ | |
768 | void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | |
769 | { | |
30d4d1ff | 770 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
771 | if (direction != PCI_DMA_TODEVICE) { |
772 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | |
773 | (size + PAGE_SIZE-1) & PAGE_MASK); | |
774 | } | |
775 | } | |
776 | ||
777 | void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t size, int direction) | |
778 | { | |
30d4d1ff | 779 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
780 | if (direction != PCI_DMA_TODEVICE) { |
781 | mmu_inval_dma_area((unsigned long)phys_to_virt(ba), | |
782 | (size + PAGE_SIZE-1) & PAGE_MASK); | |
783 | } | |
784 | } | |
785 | ||
786 | /* Make physical memory consistent for a set of streaming | |
787 | * mode DMA translations after a transfer. | |
788 | * | |
789 | * The same as pci_dma_sync_single_* but for a scatter-gather list, | |
790 | * same rules and usage. | |
791 | */ | |
792 | void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) | |
793 | { | |
794 | int n; | |
795 | ||
30d4d1ff | 796 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
797 | if (direction != PCI_DMA_TODEVICE) { |
798 | for (n = 0; n < nents; n++) { | |
30d4d1ff | 799 | BUG_ON(page_address(sg->page) == NULL); |
1da177e4 LT |
800 | mmu_inval_dma_area( |
801 | (unsigned long) page_address(sg->page), | |
802 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | |
803 | sg++; | |
804 | } | |
805 | } | |
806 | } | |
807 | ||
808 | void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction) | |
809 | { | |
810 | int n; | |
811 | ||
30d4d1ff | 812 | BUG_ON(direction == PCI_DMA_NONE); |
1da177e4 LT |
813 | if (direction != PCI_DMA_TODEVICE) { |
814 | for (n = 0; n < nents; n++) { | |
30d4d1ff | 815 | BUG_ON(page_address(sg->page) == NULL); |
1da177e4 LT |
816 | mmu_inval_dma_area( |
817 | (unsigned long) page_address(sg->page), | |
818 | (sg->length + PAGE_SIZE-1) & PAGE_MASK); | |
819 | sg++; | |
820 | } | |
821 | } | |
822 | } | |
823 | #endif /* CONFIG_PCI */ | |
824 | ||
825 | #ifdef CONFIG_PROC_FS | |
826 | ||
827 | static int | |
828 | _sparc_io_get_info(char *buf, char **start, off_t fpos, int length, int *eof, | |
829 | void *data) | |
830 | { | |
831 | char *p = buf, *e = buf + length; | |
832 | struct resource *r; | |
833 | const char *nm; | |
834 | ||
835 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | |
836 | if (p + 32 >= e) /* Better than nothing */ | |
837 | break; | |
838 | if ((nm = r->name) == 0) nm = "???"; | |
685143ac GKH |
839 | p += sprintf(p, "%016llx-%016llx: %s\n", |
840 | (unsigned long long)r->start, | |
841 | (unsigned long long)r->end, nm); | |
1da177e4 LT |
842 | } |
843 | ||
844 | return p-buf; | |
845 | } | |
846 | ||
847 | #endif /* CONFIG_PROC_FS */ | |
848 | ||
849 | /* | |
850 | * This is a version of find_resource and it belongs to kernel/resource.c. | |
851 | * Until we have agreement with Linus and Martin, it lingers here. | |
852 | * | |
853 | * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. | |
854 | * This probably warrants some sort of hashing. | |
855 | */ | |
856 | struct resource * | |
857 | _sparc_find_resource(struct resource *root, unsigned long hit) | |
858 | { | |
859 | struct resource *tmp; | |
860 | ||
861 | for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { | |
862 | if (tmp->start <= hit && tmp->end >= hit) | |
863 | return tmp; | |
864 | } | |
865 | return NULL; | |
866 | } | |
867 | ||
868 | void register_proc_sparc_ioport(void) | |
869 | { | |
870 | #ifdef CONFIG_PROC_FS | |
871 | create_proc_read_entry("io_map",0,NULL,_sparc_io_get_info,&sparc_iomap); | |
872 | create_proc_read_entry("dvma_map",0,NULL,_sparc_io_get_info,&_sparc_dvma); | |
873 | #endif | |
874 | } |