]>
Commit | Line | Data |
---|---|---|
a9dcad5e HD |
1 | /* |
2 | * omap iommu: tlb and pagetable primitives | |
3 | * | |
4 | * Copyright (C) 2008-2009 Nokia Corporation | |
5 | * | |
6 | * Written by Hiroshi DOYU <[email protected]>, | |
7 | * Paul Mundt and Toshihiro Kobayashi | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | */ | |
13 | ||
14 | #include <linux/err.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/ioport.h> | |
18 | #include <linux/clk.h> | |
19 | #include <linux/platform_device.h> | |
20 | ||
21 | #include <asm/cacheflush.h> | |
22 | ||
23 | #include <mach/iommu.h> | |
24 | ||
25 | #include "iopgtable.h" | |
26 | ||
27 | /* accommodate the difference between omap1 and omap2/3 */ | |
28 | static const struct iommu_functions *arch_iommu; | |
29 | ||
30 | static struct platform_driver omap_iommu_driver; | |
31 | static struct kmem_cache *iopte_cachep; | |
32 | ||
33 | /** | |
34 | * install_iommu_arch - Install archtecure specific iommu functions | |
35 | * @ops: a pointer to architecture specific iommu functions | |
36 | * | |
37 | * There are several kind of iommu algorithm(tlb, pagetable) among | |
38 | * omap series. This interface installs such an iommu algorighm. | |
39 | **/ | |
40 | int install_iommu_arch(const struct iommu_functions *ops) | |
41 | { | |
42 | if (arch_iommu) | |
43 | return -EBUSY; | |
44 | ||
45 | arch_iommu = ops; | |
46 | return 0; | |
47 | } | |
48 | EXPORT_SYMBOL_GPL(install_iommu_arch); | |
49 | ||
50 | /** | |
51 | * uninstall_iommu_arch - Uninstall archtecure specific iommu functions | |
52 | * @ops: a pointer to architecture specific iommu functions | |
53 | * | |
54 | * This interface uninstalls the iommu algorighm installed previously. | |
55 | **/ | |
56 | void uninstall_iommu_arch(const struct iommu_functions *ops) | |
57 | { | |
58 | if (arch_iommu != ops) | |
59 | pr_err("%s: not your arch\n", __func__); | |
60 | ||
61 | arch_iommu = NULL; | |
62 | } | |
63 | EXPORT_SYMBOL_GPL(uninstall_iommu_arch); | |
64 | ||
65 | /** | |
66 | * iommu_save_ctx - Save registers for pm off-mode support | |
67 | * @obj: target iommu | |
68 | **/ | |
69 | void iommu_save_ctx(struct iommu *obj) | |
70 | { | |
71 | arch_iommu->save_ctx(obj); | |
72 | } | |
73 | EXPORT_SYMBOL_GPL(iommu_save_ctx); | |
74 | ||
75 | /** | |
76 | * iommu_restore_ctx - Restore registers for pm off-mode support | |
77 | * @obj: target iommu | |
78 | **/ | |
79 | void iommu_restore_ctx(struct iommu *obj) | |
80 | { | |
81 | arch_iommu->restore_ctx(obj); | |
82 | } | |
83 | EXPORT_SYMBOL_GPL(iommu_restore_ctx); | |
84 | ||
85 | /** | |
86 | * iommu_arch_version - Return running iommu arch version | |
87 | **/ | |
88 | u32 iommu_arch_version(void) | |
89 | { | |
90 | return arch_iommu->version; | |
91 | } | |
92 | EXPORT_SYMBOL_GPL(iommu_arch_version); | |
93 | ||
94 | static int iommu_enable(struct iommu *obj) | |
95 | { | |
96 | int err; | |
97 | ||
98 | if (!obj) | |
99 | return -EINVAL; | |
100 | ||
101 | clk_enable(obj->clk); | |
102 | ||
103 | err = arch_iommu->enable(obj); | |
104 | ||
105 | clk_disable(obj->clk); | |
106 | return err; | |
107 | } | |
108 | ||
109 | static void iommu_disable(struct iommu *obj) | |
110 | { | |
111 | if (!obj) | |
112 | return; | |
113 | ||
114 | clk_enable(obj->clk); | |
115 | ||
116 | arch_iommu->disable(obj); | |
117 | ||
118 | clk_disable(obj->clk); | |
119 | } | |
120 | ||
121 | /* | |
122 | * TLB operations | |
123 | */ | |
124 | void iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e) | |
125 | { | |
126 | BUG_ON(!cr || !e); | |
127 | ||
128 | arch_iommu->cr_to_e(cr, e); | |
129 | } | |
130 | EXPORT_SYMBOL_GPL(iotlb_cr_to_e); | |
131 | ||
132 | static inline int iotlb_cr_valid(struct cr_regs *cr) | |
133 | { | |
134 | if (!cr) | |
135 | return -EINVAL; | |
136 | ||
137 | return arch_iommu->cr_valid(cr); | |
138 | } | |
139 | ||
140 | static inline struct cr_regs *iotlb_alloc_cr(struct iommu *obj, | |
141 | struct iotlb_entry *e) | |
142 | { | |
143 | if (!e) | |
144 | return NULL; | |
145 | ||
146 | return arch_iommu->alloc_cr(obj, e); | |
147 | } | |
148 | ||
149 | u32 iotlb_cr_to_virt(struct cr_regs *cr) | |
150 | { | |
151 | return arch_iommu->cr_to_virt(cr); | |
152 | } | |
153 | EXPORT_SYMBOL_GPL(iotlb_cr_to_virt); | |
154 | ||
155 | static u32 get_iopte_attr(struct iotlb_entry *e) | |
156 | { | |
157 | return arch_iommu->get_pte_attr(e); | |
158 | } | |
159 | ||
160 | static u32 iommu_report_fault(struct iommu *obj, u32 *da) | |
161 | { | |
162 | return arch_iommu->fault_isr(obj, da); | |
163 | } | |
164 | ||
165 | static void iotlb_lock_get(struct iommu *obj, struct iotlb_lock *l) | |
166 | { | |
167 | u32 val; | |
168 | ||
169 | val = iommu_read_reg(obj, MMU_LOCK); | |
170 | ||
171 | l->base = MMU_LOCK_BASE(val); | |
172 | l->vict = MMU_LOCK_VICT(val); | |
173 | ||
174 | BUG_ON(l->base != 0); /* Currently no preservation is used */ | |
175 | } | |
176 | ||
177 | static void iotlb_lock_set(struct iommu *obj, struct iotlb_lock *l) | |
178 | { | |
179 | u32 val; | |
180 | ||
181 | BUG_ON(l->base != 0); /* Currently no preservation is used */ | |
182 | ||
183 | val = (l->base << MMU_LOCK_BASE_SHIFT); | |
184 | val |= (l->vict << MMU_LOCK_VICT_SHIFT); | |
185 | ||
186 | iommu_write_reg(obj, val, MMU_LOCK); | |
187 | } | |
188 | ||
189 | static void iotlb_read_cr(struct iommu *obj, struct cr_regs *cr) | |
190 | { | |
191 | arch_iommu->tlb_read_cr(obj, cr); | |
192 | } | |
193 | ||
194 | static void iotlb_load_cr(struct iommu *obj, struct cr_regs *cr) | |
195 | { | |
196 | arch_iommu->tlb_load_cr(obj, cr); | |
197 | ||
198 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); | |
199 | iommu_write_reg(obj, 1, MMU_LD_TLB); | |
200 | } | |
201 | ||
202 | /** | |
203 | * iotlb_dump_cr - Dump an iommu tlb entry into buf | |
204 | * @obj: target iommu | |
205 | * @cr: contents of cam and ram register | |
206 | * @buf: output buffer | |
207 | **/ | |
208 | static inline ssize_t iotlb_dump_cr(struct iommu *obj, struct cr_regs *cr, | |
209 | char *buf) | |
210 | { | |
211 | BUG_ON(!cr || !buf); | |
212 | ||
213 | return arch_iommu->dump_cr(obj, cr, buf); | |
214 | } | |
215 | ||
216 | /** | |
217 | * load_iotlb_entry - Set an iommu tlb entry | |
218 | * @obj: target iommu | |
219 | * @e: an iommu tlb entry info | |
220 | **/ | |
221 | int load_iotlb_entry(struct iommu *obj, struct iotlb_entry *e) | |
222 | { | |
223 | int i; | |
224 | int err = 0; | |
225 | struct iotlb_lock l; | |
226 | struct cr_regs *cr; | |
227 | ||
228 | if (!obj || !obj->nr_tlb_entries || !e) | |
229 | return -EINVAL; | |
230 | ||
231 | clk_enable(obj->clk); | |
232 | ||
233 | for (i = 0; i < obj->nr_tlb_entries; i++) { | |
234 | struct cr_regs tmp; | |
235 | ||
236 | iotlb_lock_get(obj, &l); | |
237 | l.vict = i; | |
238 | iotlb_lock_set(obj, &l); | |
239 | iotlb_read_cr(obj, &tmp); | |
240 | if (!iotlb_cr_valid(&tmp)) | |
241 | break; | |
242 | } | |
243 | ||
244 | if (i == obj->nr_tlb_entries) { | |
245 | dev_dbg(obj->dev, "%s: full: no entry\n", __func__); | |
246 | err = -EBUSY; | |
247 | goto out; | |
248 | } | |
249 | ||
250 | cr = iotlb_alloc_cr(obj, e); | |
251 | if (IS_ERR(cr)) { | |
252 | clk_disable(obj->clk); | |
253 | return PTR_ERR(cr); | |
254 | } | |
255 | ||
256 | iotlb_load_cr(obj, cr); | |
257 | kfree(cr); | |
258 | ||
259 | /* increment victim for next tlb load */ | |
260 | if (++l.vict == obj->nr_tlb_entries) | |
261 | l.vict = 0; | |
262 | iotlb_lock_set(obj, &l); | |
263 | out: | |
264 | clk_disable(obj->clk); | |
265 | return err; | |
266 | } | |
267 | EXPORT_SYMBOL_GPL(load_iotlb_entry); | |
268 | ||
269 | /** | |
270 | * flush_iotlb_page - Clear an iommu tlb entry | |
271 | * @obj: target iommu | |
272 | * @da: iommu device virtual address | |
273 | * | |
274 | * Clear an iommu tlb entry which includes 'da' address. | |
275 | **/ | |
276 | void flush_iotlb_page(struct iommu *obj, u32 da) | |
277 | { | |
278 | struct iotlb_lock l; | |
279 | int i; | |
280 | ||
281 | clk_enable(obj->clk); | |
282 | ||
283 | for (i = 0; i < obj->nr_tlb_entries; i++) { | |
284 | struct cr_regs cr; | |
285 | u32 start; | |
286 | size_t bytes; | |
287 | ||
288 | iotlb_lock_get(obj, &l); | |
289 | l.vict = i; | |
290 | iotlb_lock_set(obj, &l); | |
291 | iotlb_read_cr(obj, &cr); | |
292 | if (!iotlb_cr_valid(&cr)) | |
293 | continue; | |
294 | ||
295 | start = iotlb_cr_to_virt(&cr); | |
296 | bytes = iopgsz_to_bytes(cr.cam & 3); | |
297 | ||
298 | if ((start <= da) && (da < start + bytes)) { | |
299 | dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", | |
300 | __func__, start, da, bytes); | |
f48ef99c | 301 | iotlb_load_cr(obj, &cr); |
a9dcad5e HD |
302 | iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); |
303 | } | |
304 | } | |
305 | clk_disable(obj->clk); | |
306 | ||
307 | if (i == obj->nr_tlb_entries) | |
308 | dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da); | |
309 | } | |
310 | EXPORT_SYMBOL_GPL(flush_iotlb_page); | |
311 | ||
312 | /** | |
313 | * flush_iotlb_range - Clear an iommu tlb entries | |
314 | * @obj: target iommu | |
315 | * @start: iommu device virtual address(start) | |
316 | * @end: iommu device virtual address(end) | |
317 | * | |
318 | * Clear an iommu tlb entry which includes 'da' address. | |
319 | **/ | |
320 | void flush_iotlb_range(struct iommu *obj, u32 start, u32 end) | |
321 | { | |
322 | u32 da = start; | |
323 | ||
324 | while (da < end) { | |
325 | flush_iotlb_page(obj, da); | |
326 | /* FIXME: Optimize for multiple page size */ | |
327 | da += IOPTE_SIZE; | |
328 | } | |
329 | } | |
330 | EXPORT_SYMBOL_GPL(flush_iotlb_range); | |
331 | ||
332 | /** | |
333 | * flush_iotlb_all - Clear all iommu tlb entries | |
334 | * @obj: target iommu | |
335 | **/ | |
336 | void flush_iotlb_all(struct iommu *obj) | |
337 | { | |
338 | struct iotlb_lock l; | |
339 | ||
340 | clk_enable(obj->clk); | |
341 | ||
342 | l.base = 0; | |
343 | l.vict = 0; | |
344 | iotlb_lock_set(obj, &l); | |
345 | ||
346 | iommu_write_reg(obj, 1, MMU_GFLUSH); | |
347 | ||
348 | clk_disable(obj->clk); | |
349 | } | |
350 | EXPORT_SYMBOL_GPL(flush_iotlb_all); | |
351 | ||
352 | #if defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) | |
353 | ||
14e0e679 | 354 | ssize_t iommu_dump_ctx(struct iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e | 355 | { |
a9dcad5e HD |
356 | if (!obj || !buf) |
357 | return -EINVAL; | |
358 | ||
359 | clk_enable(obj->clk); | |
360 | ||
14e0e679 | 361 | bytes = arch_iommu->dump_ctx(obj, buf, bytes); |
a9dcad5e HD |
362 | |
363 | clk_disable(obj->clk); | |
364 | ||
365 | return bytes; | |
366 | } | |
367 | EXPORT_SYMBOL_GPL(iommu_dump_ctx); | |
368 | ||
14e0e679 | 369 | static int __dump_tlb_entries(struct iommu *obj, struct cr_regs *crs, int num) |
a9dcad5e HD |
370 | { |
371 | int i; | |
372 | struct iotlb_lock saved, l; | |
373 | struct cr_regs *p = crs; | |
374 | ||
375 | clk_enable(obj->clk); | |
376 | ||
377 | iotlb_lock_get(obj, &saved); | |
378 | memcpy(&l, &saved, sizeof(saved)); | |
379 | ||
14e0e679 | 380 | for (i = 0; i < num; i++) { |
a9dcad5e HD |
381 | struct cr_regs tmp; |
382 | ||
383 | iotlb_lock_get(obj, &l); | |
384 | l.vict = i; | |
385 | iotlb_lock_set(obj, &l); | |
386 | iotlb_read_cr(obj, &tmp); | |
387 | if (!iotlb_cr_valid(&tmp)) | |
388 | continue; | |
389 | ||
390 | *p++ = tmp; | |
391 | } | |
392 | iotlb_lock_set(obj, &saved); | |
393 | clk_disable(obj->clk); | |
394 | ||
395 | return p - crs; | |
396 | } | |
397 | ||
398 | /** | |
399 | * dump_tlb_entries - dump cr arrays to given buffer | |
400 | * @obj: target iommu | |
401 | * @buf: output buffer | |
402 | **/ | |
14e0e679 | 403 | size_t dump_tlb_entries(struct iommu *obj, char *buf, ssize_t bytes) |
a9dcad5e | 404 | { |
14e0e679 | 405 | int i, num; |
a9dcad5e HD |
406 | struct cr_regs *cr; |
407 | char *p = buf; | |
408 | ||
14e0e679 HD |
409 | num = bytes / sizeof(*cr); |
410 | num = min(obj->nr_tlb_entries, num); | |
411 | ||
412 | cr = kcalloc(num, sizeof(*cr), GFP_KERNEL); | |
a9dcad5e HD |
413 | if (!cr) |
414 | return 0; | |
415 | ||
14e0e679 HD |
416 | num = __dump_tlb_entries(obj, cr, num); |
417 | for (i = 0; i < num; i++) | |
a9dcad5e HD |
418 | p += iotlb_dump_cr(obj, cr + i, p); |
419 | kfree(cr); | |
420 | ||
421 | return p - buf; | |
422 | } | |
423 | EXPORT_SYMBOL_GPL(dump_tlb_entries); | |
424 | ||
425 | int foreach_iommu_device(void *data, int (*fn)(struct device *, void *)) | |
426 | { | |
427 | return driver_for_each_device(&omap_iommu_driver.driver, | |
428 | NULL, data, fn); | |
429 | } | |
430 | EXPORT_SYMBOL_GPL(foreach_iommu_device); | |
431 | ||
432 | #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ | |
433 | ||
434 | /* | |
435 | * H/W pagetable operations | |
436 | */ | |
437 | static void flush_iopgd_range(u32 *first, u32 *last) | |
438 | { | |
439 | /* FIXME: L2 cache should be taken care of if it exists */ | |
440 | do { | |
441 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd" | |
442 | : : "r" (first)); | |
443 | first += L1_CACHE_BYTES / sizeof(*first); | |
444 | } while (first <= last); | |
445 | } | |
446 | ||
447 | static void flush_iopte_range(u32 *first, u32 *last) | |
448 | { | |
449 | /* FIXME: L2 cache should be taken care of if it exists */ | |
450 | do { | |
451 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte" | |
452 | : : "r" (first)); | |
453 | first += L1_CACHE_BYTES / sizeof(*first); | |
454 | } while (first <= last); | |
455 | } | |
456 | ||
457 | static void iopte_free(u32 *iopte) | |
458 | { | |
459 | /* Note: freed iopte's must be clean ready for re-use */ | |
460 | kmem_cache_free(iopte_cachep, iopte); | |
461 | } | |
462 | ||
463 | static u32 *iopte_alloc(struct iommu *obj, u32 *iopgd, u32 da) | |
464 | { | |
465 | u32 *iopte; | |
466 | ||
467 | /* a table has already existed */ | |
468 | if (*iopgd) | |
469 | goto pte_ready; | |
470 | ||
471 | /* | |
472 | * do the allocation outside the page table lock | |
473 | */ | |
474 | spin_unlock(&obj->page_table_lock); | |
475 | iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL); | |
476 | spin_lock(&obj->page_table_lock); | |
477 | ||
478 | if (!*iopgd) { | |
479 | if (!iopte) | |
480 | return ERR_PTR(-ENOMEM); | |
481 | ||
482 | *iopgd = virt_to_phys(iopte) | IOPGD_TABLE; | |
483 | flush_iopgd_range(iopgd, iopgd); | |
484 | ||
485 | dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte); | |
486 | } else { | |
487 | /* We raced, free the reduniovant table */ | |
488 | iopte_free(iopte); | |
489 | } | |
490 | ||
491 | pte_ready: | |
492 | iopte = iopte_offset(iopgd, da); | |
493 | ||
494 | dev_vdbg(obj->dev, | |
495 | "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | |
496 | __func__, da, iopgd, *iopgd, iopte, *iopte); | |
497 | ||
498 | return iopte; | |
499 | } | |
500 | ||
501 | static int iopgd_alloc_section(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
502 | { | |
503 | u32 *iopgd = iopgd_offset(obj, da); | |
504 | ||
505 | *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; | |
506 | flush_iopgd_range(iopgd, iopgd); | |
507 | return 0; | |
508 | } | |
509 | ||
510 | static int iopgd_alloc_super(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
511 | { | |
512 | u32 *iopgd = iopgd_offset(obj, da); | |
513 | int i; | |
514 | ||
515 | for (i = 0; i < 16; i++) | |
516 | *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; | |
517 | flush_iopgd_range(iopgd, iopgd + 15); | |
518 | return 0; | |
519 | } | |
520 | ||
521 | static int iopte_alloc_page(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
522 | { | |
523 | u32 *iopgd = iopgd_offset(obj, da); | |
524 | u32 *iopte = iopte_alloc(obj, iopgd, da); | |
525 | ||
526 | if (IS_ERR(iopte)) | |
527 | return PTR_ERR(iopte); | |
528 | ||
529 | *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; | |
530 | flush_iopte_range(iopte, iopte); | |
531 | ||
532 | dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n", | |
533 | __func__, da, pa, iopte, *iopte); | |
534 | ||
535 | return 0; | |
536 | } | |
537 | ||
538 | static int iopte_alloc_large(struct iommu *obj, u32 da, u32 pa, u32 prot) | |
539 | { | |
540 | u32 *iopgd = iopgd_offset(obj, da); | |
541 | u32 *iopte = iopte_alloc(obj, iopgd, da); | |
542 | int i; | |
543 | ||
544 | if (IS_ERR(iopte)) | |
545 | return PTR_ERR(iopte); | |
546 | ||
547 | for (i = 0; i < 16; i++) | |
548 | *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; | |
549 | flush_iopte_range(iopte, iopte + 15); | |
550 | return 0; | |
551 | } | |
552 | ||
553 | static int iopgtable_store_entry_core(struct iommu *obj, struct iotlb_entry *e) | |
554 | { | |
555 | int (*fn)(struct iommu *, u32, u32, u32); | |
556 | u32 prot; | |
557 | int err; | |
558 | ||
559 | if (!obj || !e) | |
560 | return -EINVAL; | |
561 | ||
562 | switch (e->pgsz) { | |
563 | case MMU_CAM_PGSZ_16M: | |
564 | fn = iopgd_alloc_super; | |
565 | break; | |
566 | case MMU_CAM_PGSZ_1M: | |
567 | fn = iopgd_alloc_section; | |
568 | break; | |
569 | case MMU_CAM_PGSZ_64K: | |
570 | fn = iopte_alloc_large; | |
571 | break; | |
572 | case MMU_CAM_PGSZ_4K: | |
573 | fn = iopte_alloc_page; | |
574 | break; | |
575 | default: | |
576 | fn = NULL; | |
577 | BUG(); | |
578 | break; | |
579 | } | |
580 | ||
581 | prot = get_iopte_attr(e); | |
582 | ||
583 | spin_lock(&obj->page_table_lock); | |
584 | err = fn(obj, e->da, e->pa, prot); | |
585 | spin_unlock(&obj->page_table_lock); | |
586 | ||
587 | return err; | |
588 | } | |
589 | ||
590 | /** | |
591 | * iopgtable_store_entry - Make an iommu pte entry | |
592 | * @obj: target iommu | |
593 | * @e: an iommu tlb entry info | |
594 | **/ | |
595 | int iopgtable_store_entry(struct iommu *obj, struct iotlb_entry *e) | |
596 | { | |
597 | int err; | |
598 | ||
599 | flush_iotlb_page(obj, e->da); | |
600 | err = iopgtable_store_entry_core(obj, e); | |
601 | #ifdef PREFETCH_IOTLB | |
602 | if (!err) | |
603 | load_iotlb_entry(obj, e); | |
604 | #endif | |
605 | return err; | |
606 | } | |
607 | EXPORT_SYMBOL_GPL(iopgtable_store_entry); | |
608 | ||
609 | /** | |
610 | * iopgtable_lookup_entry - Lookup an iommu pte entry | |
611 | * @obj: target iommu | |
612 | * @da: iommu device virtual address | |
613 | * @ppgd: iommu pgd entry pointer to be returned | |
614 | * @ppte: iommu pte entry pointer to be returned | |
615 | **/ | |
616 | void iopgtable_lookup_entry(struct iommu *obj, u32 da, u32 **ppgd, u32 **ppte) | |
617 | { | |
618 | u32 *iopgd, *iopte = NULL; | |
619 | ||
620 | iopgd = iopgd_offset(obj, da); | |
621 | if (!*iopgd) | |
622 | goto out; | |
623 | ||
624 | if (*iopgd & IOPGD_TABLE) | |
625 | iopte = iopte_offset(iopgd, da); | |
626 | out: | |
627 | *ppgd = iopgd; | |
628 | *ppte = iopte; | |
629 | } | |
630 | EXPORT_SYMBOL_GPL(iopgtable_lookup_entry); | |
631 | ||
632 | static size_t iopgtable_clear_entry_core(struct iommu *obj, u32 da) | |
633 | { | |
634 | size_t bytes; | |
635 | u32 *iopgd = iopgd_offset(obj, da); | |
636 | int nent = 1; | |
637 | ||
638 | if (!*iopgd) | |
639 | return 0; | |
640 | ||
641 | if (*iopgd & IOPGD_TABLE) { | |
642 | int i; | |
643 | u32 *iopte = iopte_offset(iopgd, da); | |
644 | ||
645 | bytes = IOPTE_SIZE; | |
646 | if (*iopte & IOPTE_LARGE) { | |
647 | nent *= 16; | |
648 | /* rewind to the 1st entry */ | |
649 | iopte = (u32 *)((u32)iopte & IOLARGE_MASK); | |
650 | } | |
651 | bytes *= nent; | |
652 | memset(iopte, 0, nent * sizeof(*iopte)); | |
653 | flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); | |
654 | ||
655 | /* | |
656 | * do table walk to check if this table is necessary or not | |
657 | */ | |
658 | iopte = iopte_offset(iopgd, 0); | |
659 | for (i = 0; i < PTRS_PER_IOPTE; i++) | |
660 | if (iopte[i]) | |
661 | goto out; | |
662 | ||
663 | iopte_free(iopte); | |
664 | nent = 1; /* for the next L1 entry */ | |
665 | } else { | |
666 | bytes = IOPGD_SIZE; | |
667 | if (*iopgd & IOPGD_SUPER) { | |
668 | nent *= 16; | |
669 | /* rewind to the 1st entry */ | |
670 | iopgd = (u32 *)((u32)iopgd & IOSUPER_MASK); | |
671 | } | |
672 | bytes *= nent; | |
673 | } | |
674 | memset(iopgd, 0, nent * sizeof(*iopgd)); | |
675 | flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); | |
676 | out: | |
677 | return bytes; | |
678 | } | |
679 | ||
680 | /** | |
681 | * iopgtable_clear_entry - Remove an iommu pte entry | |
682 | * @obj: target iommu | |
683 | * @da: iommu device virtual address | |
684 | **/ | |
685 | size_t iopgtable_clear_entry(struct iommu *obj, u32 da) | |
686 | { | |
687 | size_t bytes; | |
688 | ||
689 | spin_lock(&obj->page_table_lock); | |
690 | ||
691 | bytes = iopgtable_clear_entry_core(obj, da); | |
692 | flush_iotlb_page(obj, da); | |
693 | ||
694 | spin_unlock(&obj->page_table_lock); | |
695 | ||
696 | return bytes; | |
697 | } | |
698 | EXPORT_SYMBOL_GPL(iopgtable_clear_entry); | |
699 | ||
700 | static void iopgtable_clear_entry_all(struct iommu *obj) | |
701 | { | |
702 | int i; | |
703 | ||
704 | spin_lock(&obj->page_table_lock); | |
705 | ||
706 | for (i = 0; i < PTRS_PER_IOPGD; i++) { | |
707 | u32 da; | |
708 | u32 *iopgd; | |
709 | ||
710 | da = i << IOPGD_SHIFT; | |
711 | iopgd = iopgd_offset(obj, da); | |
712 | ||
713 | if (!*iopgd) | |
714 | continue; | |
715 | ||
716 | if (*iopgd & IOPGD_TABLE) | |
717 | iopte_free(iopte_offset(iopgd, 0)); | |
718 | ||
719 | *iopgd = 0; | |
720 | flush_iopgd_range(iopgd, iopgd); | |
721 | } | |
722 | ||
723 | flush_iotlb_all(obj); | |
724 | ||
725 | spin_unlock(&obj->page_table_lock); | |
726 | } | |
727 | ||
728 | /* | |
729 | * Device IOMMU generic operations | |
730 | */ | |
731 | static irqreturn_t iommu_fault_handler(int irq, void *data) | |
732 | { | |
733 | u32 stat, da; | |
734 | u32 *iopgd, *iopte; | |
735 | int err = -EIO; | |
736 | struct iommu *obj = data; | |
737 | ||
738 | if (!obj->refcount) | |
739 | return IRQ_NONE; | |
740 | ||
741 | /* Dynamic loading TLB or PTE */ | |
742 | if (obj->isr) | |
743 | err = obj->isr(obj); | |
744 | ||
745 | if (!err) | |
746 | return IRQ_HANDLED; | |
747 | ||
748 | clk_enable(obj->clk); | |
749 | stat = iommu_report_fault(obj, &da); | |
750 | clk_disable(obj->clk); | |
751 | if (!stat) | |
752 | return IRQ_HANDLED; | |
753 | ||
754 | iopgd = iopgd_offset(obj, da); | |
755 | ||
756 | if (!(*iopgd & IOPGD_TABLE)) { | |
757 | dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x\n", __func__, | |
758 | da, iopgd, *iopgd); | |
759 | return IRQ_NONE; | |
760 | } | |
761 | ||
762 | iopte = iopte_offset(iopgd, da); | |
763 | ||
764 | dev_err(obj->dev, "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n", | |
765 | __func__, da, iopgd, *iopgd, iopte, *iopte); | |
766 | ||
767 | return IRQ_NONE; | |
768 | } | |
769 | ||
770 | static int device_match_by_alias(struct device *dev, void *data) | |
771 | { | |
772 | struct iommu *obj = to_iommu(dev); | |
773 | const char *name = data; | |
774 | ||
775 | pr_debug("%s: %s %s\n", __func__, obj->name, name); | |
776 | ||
777 | return strcmp(obj->name, name) == 0; | |
778 | } | |
779 | ||
780 | /** | |
781 | * iommu_get - Get iommu handler | |
782 | * @name: target iommu name | |
783 | **/ | |
784 | struct iommu *iommu_get(const char *name) | |
785 | { | |
786 | int err = -ENOMEM; | |
787 | struct device *dev; | |
788 | struct iommu *obj; | |
789 | ||
790 | dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name, | |
791 | device_match_by_alias); | |
792 | if (!dev) | |
793 | return ERR_PTR(-ENODEV); | |
794 | ||
795 | obj = to_iommu(dev); | |
796 | ||
797 | mutex_lock(&obj->iommu_lock); | |
798 | ||
799 | if (obj->refcount++ == 0) { | |
800 | err = iommu_enable(obj); | |
801 | if (err) | |
802 | goto err_enable; | |
803 | flush_iotlb_all(obj); | |
804 | } | |
805 | ||
806 | if (!try_module_get(obj->owner)) | |
807 | goto err_module; | |
808 | ||
809 | mutex_unlock(&obj->iommu_lock); | |
810 | ||
811 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | |
812 | return obj; | |
813 | ||
814 | err_module: | |
815 | if (obj->refcount == 1) | |
816 | iommu_disable(obj); | |
817 | err_enable: | |
818 | obj->refcount--; | |
819 | mutex_unlock(&obj->iommu_lock); | |
820 | return ERR_PTR(err); | |
821 | } | |
822 | EXPORT_SYMBOL_GPL(iommu_get); | |
823 | ||
824 | /** | |
825 | * iommu_put - Put back iommu handler | |
826 | * @obj: target iommu | |
827 | **/ | |
828 | void iommu_put(struct iommu *obj) | |
829 | { | |
830 | if (!obj && IS_ERR(obj)) | |
831 | return; | |
832 | ||
833 | mutex_lock(&obj->iommu_lock); | |
834 | ||
835 | if (--obj->refcount == 0) | |
836 | iommu_disable(obj); | |
837 | ||
838 | module_put(obj->owner); | |
839 | ||
840 | mutex_unlock(&obj->iommu_lock); | |
841 | ||
842 | dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name); | |
843 | } | |
844 | EXPORT_SYMBOL_GPL(iommu_put); | |
845 | ||
846 | /* | |
847 | * OMAP Device MMU(IOMMU) detection | |
848 | */ | |
849 | static int __devinit omap_iommu_probe(struct platform_device *pdev) | |
850 | { | |
851 | int err = -ENODEV; | |
852 | void *p; | |
853 | int irq; | |
854 | struct iommu *obj; | |
855 | struct resource *res; | |
856 | struct iommu_platform_data *pdata = pdev->dev.platform_data; | |
857 | ||
858 | if (pdev->num_resources != 2) | |
859 | return -EINVAL; | |
860 | ||
861 | obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); | |
862 | if (!obj) | |
863 | return -ENOMEM; | |
864 | ||
865 | obj->clk = clk_get(&pdev->dev, pdata->clk_name); | |
866 | if (IS_ERR(obj->clk)) | |
867 | goto err_clk; | |
868 | ||
869 | obj->nr_tlb_entries = pdata->nr_tlb_entries; | |
870 | obj->name = pdata->name; | |
871 | obj->dev = &pdev->dev; | |
872 | obj->ctx = (void *)obj + sizeof(*obj); | |
873 | ||
874 | mutex_init(&obj->iommu_lock); | |
875 | mutex_init(&obj->mmap_lock); | |
876 | spin_lock_init(&obj->page_table_lock); | |
877 | INIT_LIST_HEAD(&obj->mmap); | |
878 | ||
879 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
880 | if (!res) { | |
881 | err = -ENODEV; | |
882 | goto err_mem; | |
883 | } | |
884 | obj->regbase = ioremap(res->start, resource_size(res)); | |
885 | if (!obj->regbase) { | |
886 | err = -ENOMEM; | |
887 | goto err_mem; | |
888 | } | |
889 | ||
890 | res = request_mem_region(res->start, resource_size(res), | |
891 | dev_name(&pdev->dev)); | |
892 | if (!res) { | |
893 | err = -EIO; | |
894 | goto err_mem; | |
895 | } | |
896 | ||
897 | irq = platform_get_irq(pdev, 0); | |
898 | if (irq < 0) { | |
899 | err = -ENODEV; | |
900 | goto err_irq; | |
901 | } | |
902 | err = request_irq(irq, iommu_fault_handler, IRQF_SHARED, | |
903 | dev_name(&pdev->dev), obj); | |
904 | if (err < 0) | |
905 | goto err_irq; | |
906 | platform_set_drvdata(pdev, obj); | |
907 | ||
908 | p = (void *)__get_free_pages(GFP_KERNEL, get_order(IOPGD_TABLE_SIZE)); | |
909 | if (!p) { | |
910 | err = -ENOMEM; | |
911 | goto err_pgd; | |
912 | } | |
913 | memset(p, 0, IOPGD_TABLE_SIZE); | |
914 | clean_dcache_area(p, IOPGD_TABLE_SIZE); | |
915 | obj->iopgd = p; | |
916 | ||
917 | BUG_ON(!IS_ALIGNED((unsigned long)obj->iopgd, IOPGD_TABLE_SIZE)); | |
918 | ||
919 | dev_info(&pdev->dev, "%s registered\n", obj->name); | |
920 | return 0; | |
921 | ||
922 | err_pgd: | |
923 | free_irq(irq, obj); | |
924 | err_irq: | |
925 | release_mem_region(res->start, resource_size(res)); | |
926 | iounmap(obj->regbase); | |
927 | err_mem: | |
928 | clk_put(obj->clk); | |
929 | err_clk: | |
930 | kfree(obj); | |
931 | return err; | |
932 | } | |
933 | ||
934 | static int __devexit omap_iommu_remove(struct platform_device *pdev) | |
935 | { | |
936 | int irq; | |
937 | struct resource *res; | |
938 | struct iommu *obj = platform_get_drvdata(pdev); | |
939 | ||
940 | platform_set_drvdata(pdev, NULL); | |
941 | ||
942 | iopgtable_clear_entry_all(obj); | |
943 | free_pages((unsigned long)obj->iopgd, get_order(IOPGD_TABLE_SIZE)); | |
944 | ||
945 | irq = platform_get_irq(pdev, 0); | |
946 | free_irq(irq, obj); | |
947 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
948 | release_mem_region(res->start, resource_size(res)); | |
949 | iounmap(obj->regbase); | |
950 | ||
951 | clk_put(obj->clk); | |
952 | dev_info(&pdev->dev, "%s removed\n", obj->name); | |
953 | kfree(obj); | |
954 | return 0; | |
955 | } | |
956 | ||
957 | static struct platform_driver omap_iommu_driver = { | |
958 | .probe = omap_iommu_probe, | |
959 | .remove = __devexit_p(omap_iommu_remove), | |
960 | .driver = { | |
961 | .name = "omap-iommu", | |
962 | }, | |
963 | }; | |
964 | ||
965 | static void iopte_cachep_ctor(void *iopte) | |
966 | { | |
967 | clean_dcache_area(iopte, IOPTE_TABLE_SIZE); | |
968 | } | |
969 | ||
970 | static int __init omap_iommu_init(void) | |
971 | { | |
972 | struct kmem_cache *p; | |
973 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
974 | size_t align = 1 << 10; /* L2 pagetable alignement */ | |
975 | ||
976 | p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, | |
977 | iopte_cachep_ctor); | |
978 | if (!p) | |
979 | return -ENOMEM; | |
980 | iopte_cachep = p; | |
981 | ||
982 | return platform_driver_register(&omap_iommu_driver); | |
983 | } | |
984 | module_init(omap_iommu_init); | |
985 | ||
986 | static void __exit omap_iommu_exit(void) | |
987 | { | |
988 | kmem_cache_destroy(iopte_cachep); | |
989 | ||
990 | platform_driver_unregister(&omap_iommu_driver); | |
991 | } | |
992 | module_exit(omap_iommu_exit); | |
993 | ||
994 | MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives"); | |
995 | MODULE_ALIAS("platform:omap-iommu"); | |
996 | MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi"); | |
997 | MODULE_LICENSE("GPL v2"); |