]>
Commit | Line | Data |
---|---|---|
0c7b1df6 PM |
1 | /* |
2 | * arch/sh/mm/pmb.c | |
3 | * | |
4 | * Privileged Space Mapping Buffer (PMB) Support. | |
5 | * | |
d4cc183f | 6 | * Copyright (C) 2005 - 2011 Paul Mundt |
3d467676 | 7 | * Copyright (C) 2010 Matt Fleming |
0c7b1df6 PM |
8 | * |
9 | * This file is subject to the terms and conditions of the GNU General Public | |
10 | * License. See the file "COPYING" in the main directory of this archive | |
11 | * for more details. | |
12 | */ | |
13 | #include <linux/init.h> | |
14 | #include <linux/kernel.h> | |
d4cc183f | 15 | #include <linux/syscore_ops.h> |
a83c0b73 | 16 | #include <linux/cpu.h> |
0c7b1df6 | 17 | #include <linux/module.h> |
0c7b1df6 PM |
18 | #include <linux/bitops.h> |
19 | #include <linux/debugfs.h> | |
20 | #include <linux/fs.h> | |
21 | #include <linux/seq_file.h> | |
22 | #include <linux/err.h> | |
51becfd9 | 23 | #include <linux/io.h> |
d53a0d33 | 24 | #include <linux/spinlock.h> |
90e7d649 | 25 | #include <linux/vmalloc.h> |
65fddcfc | 26 | #include <linux/pgtable.h> |
281983d6 | 27 | #include <asm/cacheflush.h> |
87dfb311 | 28 | #include <linux/sizes.h> |
7c0f6ba6 | 29 | #include <linux/uaccess.h> |
7bdda620 | 30 | #include <asm/page.h> |
0c7b1df6 | 31 | #include <asm/mmu.h> |
eddeeb32 | 32 | #include <asm/mmu_context.h> |
0c7b1df6 | 33 | |
d53a0d33 PM |
34 | struct pmb_entry; |
35 | ||
36 | struct pmb_entry { | |
37 | unsigned long vpn; | |
38 | unsigned long ppn; | |
39 | unsigned long flags; | |
40 | unsigned long size; | |
41 | ||
f7fcec93 | 42 | raw_spinlock_t lock; |
d53a0d33 PM |
43 | |
44 | /* | |
45 | * 0 .. NR_PMB_ENTRIES for specific entry selection, or | |
46 | * PMB_NO_ENTRY to search for a free one | |
47 | */ | |
48 | int entry; | |
49 | ||
50 | /* Adjacent entry link for contiguous multi-entry mappings */ | |
51 | struct pmb_entry *link; | |
52 | }; | |
53 | ||
90e7d649 PM |
54 | static struct { |
55 | unsigned long size; | |
56 | int flag; | |
57 | } pmb_sizes[] = { | |
58 | { .size = SZ_512M, .flag = PMB_SZ_512M, }, | |
59 | { .size = SZ_128M, .flag = PMB_SZ_128M, }, | |
60 | { .size = SZ_64M, .flag = PMB_SZ_64M, }, | |
61 | { .size = SZ_16M, .flag = PMB_SZ_16M, }, | |
62 | }; | |
63 | ||
d01447b3 | 64 | static void pmb_unmap_entry(struct pmb_entry *, int depth); |
fc2bdefd | 65 | |
d53a0d33 | 66 | static DEFINE_RWLOCK(pmb_rwlock); |
edd7de80 | 67 | static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; |
51becfd9 | 68 | static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); |
0c7b1df6 | 69 | |
4cfa8e75 PM |
70 | static unsigned int pmb_iomapping_enabled; |
71 | ||
51becfd9 | 72 | static __always_inline unsigned long mk_pmb_entry(unsigned int entry) |
0c7b1df6 PM |
73 | { |
74 | return (entry & PMB_E_MASK) << PMB_E_SHIFT; | |
75 | } | |
76 | ||
51becfd9 | 77 | static __always_inline unsigned long mk_pmb_addr(unsigned int entry) |
0c7b1df6 PM |
78 | { |
79 | return mk_pmb_entry(entry) | PMB_ADDR; | |
80 | } | |
81 | ||
51becfd9 | 82 | static __always_inline unsigned long mk_pmb_data(unsigned int entry) |
0c7b1df6 PM |
83 | { |
84 | return mk_pmb_entry(entry) | PMB_DATA; | |
85 | } | |
86 | ||
90e7d649 PM |
87 | static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) |
88 | { | |
89 | return ppn >= __pa(memory_start) && ppn < __pa(memory_end); | |
90 | } | |
91 | ||
92 | /* | |
93 | * Ensure that the PMB entries match our cache configuration. | |
94 | * | |
95 | * When we are in 32-bit address extended mode, CCR.CB becomes | |
96 | * invalid, so care must be taken to manually adjust cacheable | |
97 | * translations. | |
98 | */ | |
99 | static __always_inline unsigned long pmb_cache_flags(void) | |
100 | { | |
101 | unsigned long flags = 0; | |
102 | ||
103 | #if defined(CONFIG_CACHE_OFF) | |
104 | flags |= PMB_WT | PMB_UB; | |
105 | #elif defined(CONFIG_CACHE_WRITETHROUGH) | |
106 | flags |= PMB_C | PMB_WT | PMB_UB; | |
107 | #elif defined(CONFIG_CACHE_WRITEBACK) | |
108 | flags |= PMB_C; | |
109 | #endif | |
110 | ||
111 | return flags; | |
112 | } | |
113 | ||
114 | /* | |
115 | * Convert typical pgprot value to the PMB equivalent | |
116 | */ | |
117 | static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) | |
118 | { | |
119 | unsigned long pmb_flags = 0; | |
120 | u64 flags = pgprot_val(prot); | |
121 | ||
122 | if (flags & _PAGE_CACHABLE) | |
123 | pmb_flags |= PMB_C; | |
124 | if (flags & _PAGE_WT) | |
125 | pmb_flags |= PMB_WT | PMB_UB; | |
126 | ||
127 | return pmb_flags; | |
128 | } | |
129 | ||
a1042aa2 | 130 | static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) |
90e7d649 PM |
131 | { |
132 | return (b->vpn == (a->vpn + a->size)) && | |
133 | (b->ppn == (a->ppn + a->size)) && | |
134 | (b->flags == a->flags); | |
135 | } | |
136 | ||
a1042aa2 PM |
137 | static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys, |
138 | unsigned long size) | |
139 | { | |
140 | int i; | |
141 | ||
142 | read_lock(&pmb_rwlock); | |
143 | ||
144 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | |
145 | struct pmb_entry *pmbe, *iter; | |
146 | unsigned long span; | |
147 | ||
148 | if (!test_bit(i, pmb_map)) | |
149 | continue; | |
150 | ||
151 | pmbe = &pmb_entry_list[i]; | |
152 | ||
153 | /* | |
154 | * See if VPN and PPN are bounded by an existing mapping. | |
155 | */ | |
156 | if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) | |
157 | continue; | |
158 | if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size))) | |
159 | continue; | |
160 | ||
161 | /* | |
162 | * Now see if we're in range of a simple mapping. | |
163 | */ | |
164 | if (size <= pmbe->size) { | |
165 | read_unlock(&pmb_rwlock); | |
166 | return true; | |
167 | } | |
168 | ||
169 | span = pmbe->size; | |
170 | ||
171 | /* | |
172 | * Finally for sizes that involve compound mappings, walk | |
173 | * the chain. | |
174 | */ | |
175 | for (iter = pmbe->link; iter; iter = iter->link) | |
176 | span += iter->size; | |
177 | ||
178 | /* | |
179 | * Nothing else to do if the range requirements are met. | |
180 | */ | |
181 | if (size <= span) { | |
182 | read_unlock(&pmb_rwlock); | |
183 | return true; | |
184 | } | |
185 | } | |
186 | ||
187 | read_unlock(&pmb_rwlock); | |
188 | return false; | |
189 | } | |
190 | ||
90e7d649 PM |
191 | static bool pmb_size_valid(unsigned long size) |
192 | { | |
193 | int i; | |
194 | ||
195 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | |
196 | if (pmb_sizes[i].size == size) | |
197 | return true; | |
198 | ||
199 | return false; | |
200 | } | |
201 | ||
202 | static inline bool pmb_addr_valid(unsigned long addr, unsigned long size) | |
203 | { | |
204 | return (addr >= P1SEG && (addr + size - 1) < P3SEG); | |
205 | } | |
206 | ||
207 | static inline bool pmb_prot_valid(pgprot_t prot) | |
208 | { | |
209 | return (pgprot_val(prot) & _PAGE_USER) == 0; | |
210 | } | |
211 | ||
212 | static int pmb_size_to_flags(unsigned long size) | |
213 | { | |
214 | int i; | |
215 | ||
216 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | |
217 | if (pmb_sizes[i].size == size) | |
218 | return pmb_sizes[i].flag; | |
219 | ||
220 | return 0; | |
221 | } | |
222 | ||
067784f6 MF |
223 | static int pmb_alloc_entry(void) |
224 | { | |
d53a0d33 | 225 | int pos; |
067784f6 | 226 | |
51becfd9 | 227 | pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES); |
d53a0d33 PM |
228 | if (pos >= 0 && pos < NR_PMB_ENTRIES) |
229 | __set_bit(pos, pmb_map); | |
230 | else | |
231 | pos = -ENOSPC; | |
067784f6 MF |
232 | |
233 | return pos; | |
234 | } | |
235 | ||
8386aebb | 236 | static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, |
20b5014b | 237 | unsigned long flags, int entry) |
0c7b1df6 PM |
238 | { |
239 | struct pmb_entry *pmbe; | |
d53a0d33 PM |
240 | unsigned long irqflags; |
241 | void *ret = NULL; | |
067784f6 MF |
242 | int pos; |
243 | ||
d53a0d33 PM |
244 | write_lock_irqsave(&pmb_rwlock, irqflags); |
245 | ||
20b5014b MF |
246 | if (entry == PMB_NO_ENTRY) { |
247 | pos = pmb_alloc_entry(); | |
d53a0d33 PM |
248 | if (unlikely(pos < 0)) { |
249 | ret = ERR_PTR(pos); | |
250 | goto out; | |
251 | } | |
20b5014b | 252 | } else { |
d53a0d33 PM |
253 | if (__test_and_set_bit(entry, pmb_map)) { |
254 | ret = ERR_PTR(-ENOSPC); | |
255 | goto out; | |
256 | } | |
257 | ||
20b5014b MF |
258 | pos = entry; |
259 | } | |
0c7b1df6 | 260 | |
d53a0d33 PM |
261 | write_unlock_irqrestore(&pmb_rwlock, irqflags); |
262 | ||
edd7de80 | 263 | pmbe = &pmb_entry_list[pos]; |
d53a0d33 | 264 | |
d01447b3 PM |
265 | memset(pmbe, 0, sizeof(struct pmb_entry)); |
266 | ||
f7fcec93 | 267 | raw_spin_lock_init(&pmbe->lock); |
0c7b1df6 PM |
268 | |
269 | pmbe->vpn = vpn; | |
270 | pmbe->ppn = ppn; | |
271 | pmbe->flags = flags; | |
067784f6 | 272 | pmbe->entry = pos; |
0c7b1df6 PM |
273 | |
274 | return pmbe; | |
d53a0d33 PM |
275 | |
276 | out: | |
277 | write_unlock_irqrestore(&pmb_rwlock, irqflags); | |
278 | return ret; | |
0c7b1df6 PM |
279 | } |
280 | ||
8386aebb | 281 | static void pmb_free(struct pmb_entry *pmbe) |
0c7b1df6 | 282 | { |
d53a0d33 | 283 | __clear_bit(pmbe->entry, pmb_map); |
d01447b3 PM |
284 | |
285 | pmbe->entry = PMB_NO_ENTRY; | |
286 | pmbe->link = NULL; | |
0c7b1df6 PM |
287 | } |
288 | ||
289 | /* | |
51becfd9 | 290 | * Must be run uncached. |
0c7b1df6 | 291 | */ |
d53a0d33 | 292 | static void __set_pmb_entry(struct pmb_entry *pmbe) |
0c7b1df6 | 293 | { |
281983d6 PM |
294 | unsigned long addr, data; |
295 | ||
296 | addr = mk_pmb_addr(pmbe->entry); | |
297 | data = mk_pmb_data(pmbe->entry); | |
298 | ||
299 | jump_to_uncached(); | |
300 | ||
90e7d649 | 301 | /* Set V-bit */ |
281983d6 PM |
302 | __raw_writel(pmbe->vpn | PMB_V, addr); |
303 | __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data); | |
304 | ||
305 | back_to_cached(); | |
0c7b1df6 PM |
306 | } |
307 | ||
d53a0d33 | 308 | static void __clear_pmb_entry(struct pmb_entry *pmbe) |
0c7b1df6 | 309 | { |
2e450643 PM |
310 | unsigned long addr, data; |
311 | unsigned long addr_val, data_val; | |
0c7b1df6 | 312 | |
2e450643 PM |
313 | addr = mk_pmb_addr(pmbe->entry); |
314 | data = mk_pmb_data(pmbe->entry); | |
0c7b1df6 | 315 | |
2e450643 PM |
316 | addr_val = __raw_readl(addr); |
317 | data_val = __raw_readl(data); | |
0c7b1df6 | 318 | |
2e450643 PM |
319 | /* Clear V-bit */ |
320 | writel_uncached(addr_val & ~PMB_V, addr); | |
321 | writel_uncached(data_val & ~PMB_V, data); | |
0c7b1df6 PM |
322 | } |
323 | ||
3fe0f36c | 324 | #ifdef CONFIG_PM |
d53a0d33 PM |
325 | static void set_pmb_entry(struct pmb_entry *pmbe) |
326 | { | |
327 | unsigned long flags; | |
328 | ||
f7fcec93 | 329 | raw_spin_lock_irqsave(&pmbe->lock, flags); |
d53a0d33 | 330 | __set_pmb_entry(pmbe); |
f7fcec93 | 331 | raw_spin_unlock_irqrestore(&pmbe->lock, flags); |
d53a0d33 | 332 | } |
3fe0f36c | 333 | #endif /* CONFIG_PM */ |
d53a0d33 | 334 | |
90e7d649 PM |
335 | int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, |
336 | unsigned long size, pgprot_t prot) | |
d7cdc9e8 | 337 | { |
fc2bdefd | 338 | struct pmb_entry *pmbp, *pmbe; |
281983d6 | 339 | unsigned long orig_addr, orig_size; |
a1042aa2 | 340 | unsigned long flags, pmb_flags; |
90e7d649 | 341 | int i, mapped; |
90e7d649 | 342 | |
dfbca899 PM |
343 | if (size < SZ_16M) |
344 | return -EINVAL; | |
6eb3c735 PM |
345 | if (!pmb_addr_valid(vaddr, size)) |
346 | return -EFAULT; | |
a1042aa2 PM |
347 | if (pmb_mapping_exists(vaddr, phys, size)) |
348 | return 0; | |
7bdda620 | 349 | |
281983d6 PM |
350 | orig_addr = vaddr; |
351 | orig_size = size; | |
352 | ||
353 | flush_tlb_kernel_range(vaddr, vaddr + size); | |
354 | ||
90e7d649 | 355 | pmb_flags = pgprot_to_pmb_flags(prot); |
6eb3c735 | 356 | pmbp = NULL; |
d7cdc9e8 | 357 | |
a1042aa2 PM |
358 | do { |
359 | for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { | |
360 | if (size < pmb_sizes[i].size) | |
361 | continue; | |
d7cdc9e8 | 362 | |
a1042aa2 PM |
363 | pmbe = pmb_alloc(vaddr, phys, pmb_flags | |
364 | pmb_sizes[i].flag, PMB_NO_ENTRY); | |
365 | if (IS_ERR(pmbe)) { | |
366 | pmb_unmap_entry(pmbp, mapped); | |
367 | return PTR_ERR(pmbe); | |
368 | } | |
d53a0d33 | 369 | |
f7fcec93 | 370 | raw_spin_lock_irqsave(&pmbe->lock, flags); |
d7cdc9e8 | 371 | |
a1042aa2 | 372 | pmbe->size = pmb_sizes[i].size; |
d7cdc9e8 | 373 | |
a1042aa2 | 374 | __set_pmb_entry(pmbe); |
d7813bc9 | 375 | |
a1042aa2 PM |
376 | phys += pmbe->size; |
377 | vaddr += pmbe->size; | |
378 | size -= pmbe->size; | |
d7cdc9e8 | 379 | |
a1042aa2 PM |
380 | /* |
381 | * Link adjacent entries that span multiple PMB | |
382 | * entries for easier tear-down. | |
383 | */ | |
384 | if (likely(pmbp)) { | |
f7fcec93 PM |
385 | raw_spin_lock_nested(&pmbp->lock, |
386 | SINGLE_DEPTH_NESTING); | |
a1042aa2 | 387 | pmbp->link = pmbe; |
f7fcec93 | 388 | raw_spin_unlock(&pmbp->lock); |
a1042aa2 | 389 | } |
a2767cfb | 390 | |
a1042aa2 | 391 | pmbp = pmbe; |
d53a0d33 | 392 | |
a1042aa2 PM |
393 | /* |
394 | * Instead of trying smaller sizes on every | |
395 | * iteration (even if we succeed in allocating | |
396 | * space), try using pmb_sizes[i].size again. | |
397 | */ | |
398 | i--; | |
399 | mapped++; | |
d7cdc9e8 | 400 | |
f7fcec93 | 401 | raw_spin_unlock_irqrestore(&pmbe->lock, flags); |
a1042aa2 PM |
402 | } |
403 | } while (size >= SZ_16M); | |
d7cdc9e8 | 404 | |
281983d6 PM |
405 | flush_cache_vmap(orig_addr, orig_addr + orig_size); |
406 | ||
6eb3c735 PM |
407 | return 0; |
408 | } | |
409 | ||
410 | void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, | |
411 | pgprot_t prot, void *caller) | |
412 | { | |
281983d6 | 413 | unsigned long vaddr; |
6eb3c735 PM |
414 | phys_addr_t offset, last_addr; |
415 | phys_addr_t align_mask; | |
416 | unsigned long aligned; | |
417 | struct vm_struct *area; | |
418 | int i, ret; | |
419 | ||
420 | if (!pmb_iomapping_enabled) | |
421 | return NULL; | |
422 | ||
423 | /* | |
424 | * Small mappings need to go through the TLB. | |
425 | */ | |
426 | if (size < SZ_16M) | |
427 | return ERR_PTR(-EINVAL); | |
428 | if (!pmb_prot_valid(prot)) | |
429 | return ERR_PTR(-EINVAL); | |
430 | ||
431 | for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) | |
432 | if (size >= pmb_sizes[i].size) | |
433 | break; | |
434 | ||
435 | last_addr = phys + size; | |
436 | align_mask = ~(pmb_sizes[i].size - 1); | |
437 | offset = phys & ~align_mask; | |
438 | phys &= align_mask; | |
439 | aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; | |
440 | ||
281983d6 PM |
441 | /* |
442 | * XXX: This should really start from uncached_end, but this | |
443 | * causes the MMU to reset, so for now we restrict it to the | |
444 | * 0xb000...0xc000 range. | |
445 | */ | |
446 | area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000, | |
6eb3c735 PM |
447 | P3SEG, caller); |
448 | if (!area) | |
449 | return NULL; | |
450 | ||
451 | area->phys_addr = phys; | |
281983d6 | 452 | vaddr = (unsigned long)area->addr; |
6eb3c735 PM |
453 | |
454 | ret = pmb_bolt_mapping(vaddr, phys, size, prot); | |
a1042aa2 | 455 | if (unlikely(ret != 0)) |
6eb3c735 PM |
456 | return ERR_PTR(ret); |
457 | ||
281983d6 | 458 | return (void __iomem *)(offset + (char *)vaddr); |
d7cdc9e8 PM |
459 | } |
460 | ||
90e7d649 | 461 | int pmb_unmap(void __iomem *addr) |
d7cdc9e8 | 462 | { |
d53a0d33 | 463 | struct pmb_entry *pmbe = NULL; |
90e7d649 PM |
464 | unsigned long vaddr = (unsigned long __force)addr; |
465 | int i, found = 0; | |
d7cdc9e8 | 466 | |
d53a0d33 PM |
467 | read_lock(&pmb_rwlock); |
468 | ||
edd7de80 | 469 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
51becfd9 | 470 | if (test_bit(i, pmb_map)) { |
edd7de80 | 471 | pmbe = &pmb_entry_list[i]; |
90e7d649 PM |
472 | if (pmbe->vpn == vaddr) { |
473 | found = 1; | |
edd7de80 | 474 | break; |
90e7d649 | 475 | } |
edd7de80 MF |
476 | } |
477 | } | |
d53a0d33 PM |
478 | |
479 | read_unlock(&pmb_rwlock); | |
480 | ||
90e7d649 PM |
481 | if (found) { |
482 | pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); | |
483 | return 0; | |
484 | } | |
d7cdc9e8 | 485 | |
90e7d649 | 486 | return -EINVAL; |
d01447b3 PM |
487 | } |
488 | ||
489 | static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | |
490 | { | |
d7cdc9e8 PM |
491 | do { |
492 | struct pmb_entry *pmblink = pmbe; | |
493 | ||
067784f6 MF |
494 | /* |
495 | * We may be called before this pmb_entry has been | |
496 | * entered into the PMB table via set_pmb_entry(), but | |
497 | * that's OK because we've allocated a unique slot for | |
498 | * this entry in pmb_alloc() (even if we haven't filled | |
499 | * it yet). | |
500 | * | |
d53a0d33 | 501 | * Therefore, calling __clear_pmb_entry() is safe as no |
067784f6 MF |
502 | * other mapping can be using that slot. |
503 | */ | |
d53a0d33 | 504 | __clear_pmb_entry(pmbe); |
fc2bdefd | 505 | |
281983d6 PM |
506 | flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size); |
507 | ||
d7cdc9e8 PM |
508 | pmbe = pmblink->link; |
509 | ||
510 | pmb_free(pmblink); | |
d01447b3 PM |
511 | } while (pmbe && --depth); |
512 | } | |
513 | ||
514 | static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth) | |
515 | { | |
516 | unsigned long flags; | |
d53a0d33 | 517 | |
d01447b3 PM |
518 | if (unlikely(!pmbe)) |
519 | return; | |
520 | ||
521 | write_lock_irqsave(&pmb_rwlock, flags); | |
522 | __pmb_unmap_entry(pmbe, depth); | |
d53a0d33 | 523 | write_unlock_irqrestore(&pmb_rwlock, flags); |
d7cdc9e8 PM |
524 | } |
525 | ||
d01447b3 | 526 | static void __init pmb_notify(void) |
20b5014b | 527 | { |
d01447b3 | 528 | int i; |
20b5014b | 529 | |
efd54ea3 | 530 | pr_info("PMB: boot mappings:\n"); |
20b5014b | 531 | |
d01447b3 PM |
532 | read_lock(&pmb_rwlock); |
533 | ||
534 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | |
535 | struct pmb_entry *pmbe; | |
536 | ||
537 | if (!test_bit(i, pmb_map)) | |
538 | continue; | |
539 | ||
540 | pmbe = &pmb_entry_list[i]; | |
541 | ||
542 | pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n", | |
543 | pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT, | |
544 | pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un"); | |
545 | } | |
546 | ||
547 | read_unlock(&pmb_rwlock); | |
548 | } | |
549 | ||
550 | /* | |
551 | * Sync our software copy of the PMB mappings with those in hardware. The | |
552 | * mappings in the hardware PMB were either set up by the bootloader or | |
553 | * very early on by the kernel. | |
554 | */ | |
555 | static void __init pmb_synchronize(void) | |
556 | { | |
557 | struct pmb_entry *pmbp = NULL; | |
558 | int i, j; | |
559 | ||
3d467676 | 560 | /* |
efd54ea3 PM |
561 | * Run through the initial boot mappings, log the established |
562 | * ones, and blow away anything that falls outside of the valid | |
563 | * PPN range. Specifically, we only care about existing mappings | |
564 | * that impact the cached/uncached sections. | |
3d467676 | 565 | * |
efd54ea3 PM |
566 | * Note that touching these can be a bit of a minefield; the boot |
567 | * loader can establish multi-page mappings with the same caching | |
568 | * attributes, so we need to ensure that we aren't modifying a | |
569 | * mapping that we're presently executing from, or may execute | |
570 | * from in the case of straddling page boundaries. | |
3d467676 | 571 | * |
efd54ea3 PM |
572 | * In the future we will have to tidy up after the boot loader by |
573 | * jumping between the cached and uncached mappings and tearing | |
574 | * down alternating mappings while executing from the other. | |
3d467676 | 575 | */ |
51becfd9 | 576 | for (i = 0; i < NR_PMB_ENTRIES; i++) { |
3d467676 MF |
577 | unsigned long addr, data; |
578 | unsigned long addr_val, data_val; | |
efd54ea3 | 579 | unsigned long ppn, vpn, flags; |
d53a0d33 | 580 | unsigned long irqflags; |
d7813bc9 | 581 | unsigned int size; |
efd54ea3 | 582 | struct pmb_entry *pmbe; |
20b5014b | 583 | |
3d467676 MF |
584 | addr = mk_pmb_addr(i); |
585 | data = mk_pmb_data(i); | |
20b5014b | 586 | |
3d467676 MF |
587 | addr_val = __raw_readl(addr); |
588 | data_val = __raw_readl(data); | |
20b5014b | 589 | |
3d467676 MF |
590 | /* |
591 | * Skip over any bogus entries | |
592 | */ | |
593 | if (!(data_val & PMB_V) || !(addr_val & PMB_V)) | |
594 | continue; | |
20b5014b | 595 | |
3d467676 MF |
596 | ppn = data_val & PMB_PFN_MASK; |
597 | vpn = addr_val & PMB_PFN_MASK; | |
a0ab3668 | 598 | |
3d467676 MF |
599 | /* |
600 | * Only preserve in-range mappings. | |
601 | */ | |
efd54ea3 | 602 | if (!pmb_ppn_in_range(ppn)) { |
3d467676 MF |
603 | /* |
604 | * Invalidate anything out of bounds. | |
605 | */ | |
2e450643 PM |
606 | writel_uncached(addr_val & ~PMB_V, addr); |
607 | writel_uncached(data_val & ~PMB_V, data); | |
efd54ea3 | 608 | continue; |
3d467676 | 609 | } |
efd54ea3 PM |
610 | |
611 | /* | |
612 | * Update the caching attributes if necessary | |
613 | */ | |
614 | if (data_val & PMB_C) { | |
0065b967 PM |
615 | data_val &= ~PMB_CACHE_MASK; |
616 | data_val |= pmb_cache_flags(); | |
2e450643 PM |
617 | |
618 | writel_uncached(data_val, data); | |
efd54ea3 PM |
619 | } |
620 | ||
d7813bc9 PM |
621 | size = data_val & PMB_SZ_MASK; |
622 | flags = size | (data_val & PMB_CACHE_MASK); | |
efd54ea3 PM |
623 | |
624 | pmbe = pmb_alloc(vpn, ppn, flags, i); | |
625 | if (IS_ERR(pmbe)) { | |
626 | WARN_ON_ONCE(1); | |
627 | continue; | |
628 | } | |
629 | ||
f7fcec93 | 630 | raw_spin_lock_irqsave(&pmbe->lock, irqflags); |
d53a0d33 | 631 | |
d7813bc9 PM |
632 | for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) |
633 | if (pmb_sizes[j].flag == size) | |
634 | pmbe->size = pmb_sizes[j].size; | |
635 | ||
d53a0d33 | 636 | if (pmbp) { |
f7fcec93 | 637 | raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING); |
d53a0d33 PM |
638 | /* |
639 | * Compare the previous entry against the current one to | |
640 | * see if the entries span a contiguous mapping. If so, | |
d01447b3 PM |
641 | * setup the entry links accordingly. Compound mappings |
642 | * are later coalesced. | |
d53a0d33 | 643 | */ |
d01447b3 | 644 | if (pmb_can_merge(pmbp, pmbe)) |
d53a0d33 | 645 | pmbp->link = pmbe; |
f7fcec93 | 646 | raw_spin_unlock(&pmbp->lock); |
d53a0d33 | 647 | } |
d7813bc9 PM |
648 | |
649 | pmbp = pmbe; | |
650 | ||
f7fcec93 | 651 | raw_spin_unlock_irqrestore(&pmbe->lock, irqflags); |
d01447b3 PM |
652 | } |
653 | } | |
d53a0d33 | 654 | |
d01447b3 PM |
655 | static void __init pmb_merge(struct pmb_entry *head) |
656 | { | |
657 | unsigned long span, newsize; | |
658 | struct pmb_entry *tail; | |
659 | int i = 1, depth = 0; | |
660 | ||
661 | span = newsize = head->size; | |
efd54ea3 | 662 | |
d01447b3 PM |
663 | tail = head->link; |
664 | while (tail) { | |
665 | span += tail->size; | |
666 | ||
667 | if (pmb_size_valid(span)) { | |
668 | newsize = span; | |
669 | depth = i; | |
670 | } | |
671 | ||
672 | /* This is the end of the line.. */ | |
673 | if (!tail->link) | |
674 | break; | |
675 | ||
676 | tail = tail->link; | |
677 | i++; | |
a0ab3668 PM |
678 | } |
679 | ||
d01447b3 PM |
680 | /* |
681 | * The merged page size must be valid. | |
682 | */ | |
c7b03fa0 | 683 | if (!depth || !pmb_size_valid(newsize)) |
d01447b3 PM |
684 | return; |
685 | ||
686 | head->flags &= ~PMB_SZ_MASK; | |
687 | head->flags |= pmb_size_to_flags(newsize); | |
688 | ||
689 | head->size = newsize; | |
690 | ||
691 | __pmb_unmap_entry(head->link, depth); | |
692 | __set_pmb_entry(head); | |
a0ab3668 | 693 | } |
a0ab3668 | 694 | |
d01447b3 | 695 | static void __init pmb_coalesce(void) |
a0ab3668 | 696 | { |
d01447b3 PM |
697 | unsigned long flags; |
698 | int i; | |
699 | ||
700 | write_lock_irqsave(&pmb_rwlock, flags); | |
701 | ||
702 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | |
703 | struct pmb_entry *pmbe; | |
704 | ||
705 | if (!test_bit(i, pmb_map)) | |
706 | continue; | |
707 | ||
708 | pmbe = &pmb_entry_list[i]; | |
709 | ||
710 | /* | |
711 | * We're only interested in compound mappings | |
712 | */ | |
713 | if (!pmbe->link) | |
714 | continue; | |
715 | ||
716 | /* | |
717 | * Nothing to do if it already uses the largest possible | |
718 | * page size. | |
719 | */ | |
720 | if (pmbe->size == SZ_512M) | |
721 | continue; | |
722 | ||
723 | pmb_merge(pmbe); | |
724 | } | |
725 | ||
726 | write_unlock_irqrestore(&pmb_rwlock, flags); | |
727 | } | |
728 | ||
729 | #ifdef CONFIG_UNCACHED_MAPPING | |
730 | static void __init pmb_resize(void) | |
731 | { | |
732 | int i; | |
a0ab3668 | 733 | |
a0ab3668 | 734 | /* |
d01447b3 PM |
735 | * If the uncached mapping was constructed by the kernel, it will |
736 | * already be a reasonable size. | |
a0ab3668 | 737 | */ |
d01447b3 PM |
738 | if (uncached_size == SZ_16M) |
739 | return; | |
740 | ||
741 | read_lock(&pmb_rwlock); | |
742 | ||
743 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { | |
744 | struct pmb_entry *pmbe; | |
745 | unsigned long flags; | |
746 | ||
747 | if (!test_bit(i, pmb_map)) | |
748 | continue; | |
749 | ||
750 | pmbe = &pmb_entry_list[i]; | |
751 | ||
752 | if (pmbe->vpn != uncached_start) | |
753 | continue; | |
754 | ||
755 | /* | |
756 | * Found it, now resize it. | |
757 | */ | |
f7fcec93 | 758 | raw_spin_lock_irqsave(&pmbe->lock, flags); |
d01447b3 PM |
759 | |
760 | pmbe->size = SZ_16M; | |
761 | pmbe->flags &= ~PMB_SZ_MASK; | |
762 | pmbe->flags |= pmb_size_to_flags(pmbe->size); | |
763 | ||
764 | uncached_resize(pmbe->size); | |
765 | ||
766 | __set_pmb_entry(pmbe); | |
767 | ||
f7fcec93 | 768 | raw_spin_unlock_irqrestore(&pmbe->lock, flags); |
d01447b3 PM |
769 | } |
770 | ||
0e6f989b | 771 | read_unlock(&pmb_rwlock); |
d01447b3 PM |
772 | } |
773 | #endif | |
774 | ||
4cfa8e75 PM |
775 | static int __init early_pmb(char *p) |
776 | { | |
777 | if (!p) | |
778 | return 0; | |
779 | ||
780 | if (strstr(p, "iomap")) | |
781 | pmb_iomapping_enabled = 1; | |
782 | ||
783 | return 0; | |
784 | } | |
785 | early_param("pmb", early_pmb); | |
786 | ||
d01447b3 PM |
787 | void __init pmb_init(void) |
788 | { | |
789 | /* Synchronize software state */ | |
790 | pmb_synchronize(); | |
791 | ||
792 | /* Attempt to combine compound mappings */ | |
793 | pmb_coalesce(); | |
794 | ||
795 | #ifdef CONFIG_UNCACHED_MAPPING | |
796 | /* Resize initial mappings, if necessary */ | |
797 | pmb_resize(); | |
798 | #endif | |
799 | ||
800 | /* Log them */ | |
801 | pmb_notify(); | |
3d467676 | 802 | |
2e450643 | 803 | writel_uncached(0, PMB_IRMCR); |
a0ab3668 PM |
804 | |
805 | /* Flush out the TLB */ | |
b5b6c7ee | 806 | local_flush_tlb_all(); |
2e450643 | 807 | ctrl_barrier(); |
20b5014b | 808 | } |
0c7b1df6 | 809 | |
2efa53b2 PM |
810 | bool __in_29bit_mode(void) |
811 | { | |
812 | return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0; | |
813 | } | |
814 | ||
a1153636 | 815 | static int pmb_debugfs_show(struct seq_file *file, void *iter) |
0c7b1df6 PM |
816 | { |
817 | int i; | |
818 | ||
819 | seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n" | |
820 | "CB: Copy-Back, B: Buffered, UB: Unbuffered\n"); | |
821 | seq_printf(file, "ety vpn ppn size flags\n"); | |
822 | ||
823 | for (i = 0; i < NR_PMB_ENTRIES; i++) { | |
824 | unsigned long addr, data; | |
825 | unsigned int size; | |
826 | char *sz_str = NULL; | |
827 | ||
9d56dd3b PM |
828 | addr = __raw_readl(mk_pmb_addr(i)); |
829 | data = __raw_readl(mk_pmb_data(i)); | |
0c7b1df6 PM |
830 | |
831 | size = data & PMB_SZ_MASK; | |
832 | sz_str = (size == PMB_SZ_16M) ? " 16MB": | |
833 | (size == PMB_SZ_64M) ? " 64MB": | |
834 | (size == PMB_SZ_128M) ? "128MB": | |
835 | "512MB"; | |
836 | ||
837 | /* 02: V 0x88 0x08 128MB C CB B */ | |
838 | seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n", | |
839 | i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ', | |
840 | (addr >> 24) & 0xff, (data >> 24) & 0xff, | |
841 | sz_str, (data & PMB_C) ? 'C' : ' ', | |
842 | (data & PMB_WT) ? "WT" : "CB", | |
843 | (data & PMB_UB) ? "UB" : " B"); | |
844 | } | |
845 | ||
846 | return 0; | |
847 | } | |
848 | ||
a1153636 | 849 | DEFINE_SHOW_ATTRIBUTE(pmb_debugfs); |
0c7b1df6 PM |
850 | |
851 | static int __init pmb_debugfs_init(void) | |
852 | { | |
03eb2a08 GKH |
853 | debugfs_create_file("pmb", S_IFREG | S_IRUGO, arch_debugfs_dir, NULL, |
854 | &pmb_debugfs_fops); | |
0c7b1df6 PM |
855 | return 0; |
856 | } | |
62c8cbbf | 857 | subsys_initcall(pmb_debugfs_init); |
a83c0b73 FV |
858 | |
859 | #ifdef CONFIG_PM | |
d4cc183f | 860 | static void pmb_syscore_resume(void) |
a83c0b73 | 861 | { |
d4cc183f | 862 | struct pmb_entry *pmbe; |
edd7de80 | 863 | int i; |
a83c0b73 | 864 | |
d4cc183f | 865 | read_lock(&pmb_rwlock); |
d53a0d33 | 866 | |
d4cc183f PM |
867 | for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { |
868 | if (test_bit(i, pmb_map)) { | |
869 | pmbe = &pmb_entry_list[i]; | |
870 | set_pmb_entry(pmbe); | |
edd7de80 | 871 | } |
a83c0b73 | 872 | } |
d53a0d33 | 873 | |
d4cc183f | 874 | read_unlock(&pmb_rwlock); |
a83c0b73 FV |
875 | } |
876 | ||
d4cc183f PM |
877 | static struct syscore_ops pmb_syscore_ops = { |
878 | .resume = pmb_syscore_resume, | |
a83c0b73 FV |
879 | }; |
880 | ||
881 | static int __init pmb_sysdev_init(void) | |
882 | { | |
d4cc183f PM |
883 | register_syscore_ops(&pmb_syscore_ops); |
884 | return 0; | |
a83c0b73 | 885 | } |
a83c0b73 FV |
886 | subsys_initcall(pmb_sysdev_init); |
887 | #endif |