4 * Privileged Space Mapping Buffer (PMB) Support.
6 * Copyright (C) 2005 - 2011 Paul Mundt
7 * Copyright (C) 2010 Matt Fleming
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/syscore_ops.h>
16 #include <linux/cpu.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
19 #include <linux/debugfs.h>
21 #include <linux/seq_file.h>
22 #include <linux/err.h>
24 #include <linux/spinlock.h>
25 #include <linux/vmalloc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/sizes.h>
28 #include <asm/uaccess.h>
29 #include <asm/pgtable.h>
32 #include <asm/mmu_context.h>
45 * 0 .. NR_PMB_ENTRIES for specific entry selection, or
46 * PMB_NO_ENTRY to search for a free one
50 /* Adjacent entry link for contiguous multi-entry mappings */
51 struct pmb_entry *link;
58 { .size = SZ_512M, .flag = PMB_SZ_512M, },
59 { .size = SZ_128M, .flag = PMB_SZ_128M, },
60 { .size = SZ_64M, .flag = PMB_SZ_64M, },
61 { .size = SZ_16M, .flag = PMB_SZ_16M, },
64 static void pmb_unmap_entry(struct pmb_entry *, int depth);
66 static DEFINE_RWLOCK(pmb_rwlock);
67 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES];
68 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES);
70 static unsigned int pmb_iomapping_enabled;
72 static __always_inline unsigned long mk_pmb_entry(unsigned int entry)
74 return (entry & PMB_E_MASK) << PMB_E_SHIFT;
77 static __always_inline unsigned long mk_pmb_addr(unsigned int entry)
79 return mk_pmb_entry(entry) | PMB_ADDR;
82 static __always_inline unsigned long mk_pmb_data(unsigned int entry)
84 return mk_pmb_entry(entry) | PMB_DATA;
87 static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
89 return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
93 * Ensure that the PMB entries match our cache configuration.
95 * When we are in 32-bit address extended mode, CCR.CB becomes
96 * invalid, so care must be taken to manually adjust cacheable
99 static __always_inline unsigned long pmb_cache_flags(void)
101 unsigned long flags = 0;
103 #if defined(CONFIG_CACHE_OFF)
104 flags |= PMB_WT | PMB_UB;
105 #elif defined(CONFIG_CACHE_WRITETHROUGH)
106 flags |= PMB_C | PMB_WT | PMB_UB;
107 #elif defined(CONFIG_CACHE_WRITEBACK)
115 * Convert typical pgprot value to the PMB equivalent
117 static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot)
119 unsigned long pmb_flags = 0;
120 u64 flags = pgprot_val(prot);
122 if (flags & _PAGE_CACHABLE)
124 if (flags & _PAGE_WT)
125 pmb_flags |= PMB_WT | PMB_UB;
130 static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b)
132 return (b->vpn == (a->vpn + a->size)) &&
133 (b->ppn == (a->ppn + a->size)) &&
134 (b->flags == a->flags);
137 static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys,
142 read_lock(&pmb_rwlock);
144 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
145 struct pmb_entry *pmbe, *iter;
148 if (!test_bit(i, pmb_map))
151 pmbe = &pmb_entry_list[i];
154 * See if VPN and PPN are bounded by an existing mapping.
156 if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size)))
158 if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size)))
162 * Now see if we're in range of a simple mapping.
164 if (size <= pmbe->size) {
165 read_unlock(&pmb_rwlock);
172 * Finally for sizes that involve compound mappings, walk
175 for (iter = pmbe->link; iter; iter = iter->link)
179 * Nothing else to do if the range requirements are met.
182 read_unlock(&pmb_rwlock);
187 read_unlock(&pmb_rwlock);
191 static bool pmb_size_valid(unsigned long size)
195 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
196 if (pmb_sizes[i].size == size)
202 static inline bool pmb_addr_valid(unsigned long addr, unsigned long size)
204 return (addr >= P1SEG && (addr + size - 1) < P3SEG);
207 static inline bool pmb_prot_valid(pgprot_t prot)
209 return (pgprot_val(prot) & _PAGE_USER) == 0;
212 static int pmb_size_to_flags(unsigned long size)
216 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
217 if (pmb_sizes[i].size == size)
218 return pmb_sizes[i].flag;
223 static int pmb_alloc_entry(void)
227 pos = find_first_zero_bit(pmb_map, NR_PMB_ENTRIES);
228 if (pos >= 0 && pos < NR_PMB_ENTRIES)
229 __set_bit(pos, pmb_map);
236 static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
237 unsigned long flags, int entry)
239 struct pmb_entry *pmbe;
240 unsigned long irqflags;
244 write_lock_irqsave(&pmb_rwlock, irqflags);
246 if (entry == PMB_NO_ENTRY) {
247 pos = pmb_alloc_entry();
248 if (unlikely(pos < 0)) {
253 if (__test_and_set_bit(entry, pmb_map)) {
254 ret = ERR_PTR(-ENOSPC);
261 write_unlock_irqrestore(&pmb_rwlock, irqflags);
263 pmbe = &pmb_entry_list[pos];
265 memset(pmbe, 0, sizeof(struct pmb_entry));
267 raw_spin_lock_init(&pmbe->lock);
277 write_unlock_irqrestore(&pmb_rwlock, irqflags);
281 static void pmb_free(struct pmb_entry *pmbe)
283 __clear_bit(pmbe->entry, pmb_map);
285 pmbe->entry = PMB_NO_ENTRY;
290 * Must be run uncached.
292 static void __set_pmb_entry(struct pmb_entry *pmbe)
294 unsigned long addr, data;
296 addr = mk_pmb_addr(pmbe->entry);
297 data = mk_pmb_data(pmbe->entry);
302 __raw_writel(pmbe->vpn | PMB_V, addr);
303 __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data);
308 static void __clear_pmb_entry(struct pmb_entry *pmbe)
310 unsigned long addr, data;
311 unsigned long addr_val, data_val;
313 addr = mk_pmb_addr(pmbe->entry);
314 data = mk_pmb_data(pmbe->entry);
316 addr_val = __raw_readl(addr);
317 data_val = __raw_readl(data);
320 writel_uncached(addr_val & ~PMB_V, addr);
321 writel_uncached(data_val & ~PMB_V, data);
325 static void set_pmb_entry(struct pmb_entry *pmbe)
329 raw_spin_lock_irqsave(&pmbe->lock, flags);
330 __set_pmb_entry(pmbe);
331 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
333 #endif /* CONFIG_PM */
335 int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys,
336 unsigned long size, pgprot_t prot)
338 struct pmb_entry *pmbp, *pmbe;
339 unsigned long orig_addr, orig_size;
340 unsigned long flags, pmb_flags;
345 if (!pmb_addr_valid(vaddr, size))
347 if (pmb_mapping_exists(vaddr, phys, size))
353 flush_tlb_kernel_range(vaddr, vaddr + size);
355 pmb_flags = pgprot_to_pmb_flags(prot);
359 for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) {
360 if (size < pmb_sizes[i].size)
363 pmbe = pmb_alloc(vaddr, phys, pmb_flags |
364 pmb_sizes[i].flag, PMB_NO_ENTRY);
366 pmb_unmap_entry(pmbp, mapped);
367 return PTR_ERR(pmbe);
370 raw_spin_lock_irqsave(&pmbe->lock, flags);
372 pmbe->size = pmb_sizes[i].size;
374 __set_pmb_entry(pmbe);
381 * Link adjacent entries that span multiple PMB
382 * entries for easier tear-down.
385 raw_spin_lock_nested(&pmbp->lock,
386 SINGLE_DEPTH_NESTING);
388 raw_spin_unlock(&pmbp->lock);
394 * Instead of trying smaller sizes on every
395 * iteration (even if we succeed in allocating
396 * space), try using pmb_sizes[i].size again.
401 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
403 } while (size >= SZ_16M);
405 flush_cache_vmap(orig_addr, orig_addr + orig_size);
410 void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size,
411 pgprot_t prot, void *caller)
414 phys_addr_t offset, last_addr;
415 phys_addr_t align_mask;
416 unsigned long aligned;
417 struct vm_struct *area;
420 if (!pmb_iomapping_enabled)
424 * Small mappings need to go through the TLB.
427 return ERR_PTR(-EINVAL);
428 if (!pmb_prot_valid(prot))
429 return ERR_PTR(-EINVAL);
431 for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++)
432 if (size >= pmb_sizes[i].size)
435 last_addr = phys + size;
436 align_mask = ~(pmb_sizes[i].size - 1);
437 offset = phys & ~align_mask;
439 aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys;
442 * XXX: This should really start from uncached_end, but this
443 * causes the MMU to reset, so for now we restrict it to the
444 * 0xb000...0xc000 range.
446 area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000,
451 area->phys_addr = phys;
452 vaddr = (unsigned long)area->addr;
454 ret = pmb_bolt_mapping(vaddr, phys, size, prot);
455 if (unlikely(ret != 0))
458 return (void __iomem *)(offset + (char *)vaddr);
461 int pmb_unmap(void __iomem *addr)
463 struct pmb_entry *pmbe = NULL;
464 unsigned long vaddr = (unsigned long __force)addr;
467 read_lock(&pmb_rwlock);
469 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
470 if (test_bit(i, pmb_map)) {
471 pmbe = &pmb_entry_list[i];
472 if (pmbe->vpn == vaddr) {
479 read_unlock(&pmb_rwlock);
482 pmb_unmap_entry(pmbe, NR_PMB_ENTRIES);
489 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
492 struct pmb_entry *pmblink = pmbe;
495 * We may be called before this pmb_entry has been
496 * entered into the PMB table via set_pmb_entry(), but
497 * that's OK because we've allocated a unique slot for
498 * this entry in pmb_alloc() (even if we haven't filled
501 * Therefore, calling __clear_pmb_entry() is safe as no
502 * other mapping can be using that slot.
504 __clear_pmb_entry(pmbe);
506 flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size);
508 pmbe = pmblink->link;
511 } while (pmbe && --depth);
514 static void pmb_unmap_entry(struct pmb_entry *pmbe, int depth)
521 write_lock_irqsave(&pmb_rwlock, flags);
522 __pmb_unmap_entry(pmbe, depth);
523 write_unlock_irqrestore(&pmb_rwlock, flags);
526 static void __init pmb_notify(void)
530 pr_info("PMB: boot mappings:\n");
532 read_lock(&pmb_rwlock);
534 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
535 struct pmb_entry *pmbe;
537 if (!test_bit(i, pmb_map))
540 pmbe = &pmb_entry_list[i];
542 pr_info(" 0x%08lx -> 0x%08lx [ %4ldMB %2scached ]\n",
543 pmbe->vpn >> PAGE_SHIFT, pmbe->ppn >> PAGE_SHIFT,
544 pmbe->size >> 20, (pmbe->flags & PMB_C) ? "" : "un");
547 read_unlock(&pmb_rwlock);
551 * Sync our software copy of the PMB mappings with those in hardware. The
552 * mappings in the hardware PMB were either set up by the bootloader or
553 * very early on by the kernel.
555 static void __init pmb_synchronize(void)
557 struct pmb_entry *pmbp = NULL;
561 * Run through the initial boot mappings, log the established
562 * ones, and blow away anything that falls outside of the valid
563 * PPN range. Specifically, we only care about existing mappings
564 * that impact the cached/uncached sections.
566 * Note that touching these can be a bit of a minefield; the boot
567 * loader can establish multi-page mappings with the same caching
568 * attributes, so we need to ensure that we aren't modifying a
569 * mapping that we're presently executing from, or may execute
570 * from in the case of straddling page boundaries.
572 * In the future we will have to tidy up after the boot loader by
573 * jumping between the cached and uncached mappings and tearing
574 * down alternating mappings while executing from the other.
576 for (i = 0; i < NR_PMB_ENTRIES; i++) {
577 unsigned long addr, data;
578 unsigned long addr_val, data_val;
579 unsigned long ppn, vpn, flags;
580 unsigned long irqflags;
582 struct pmb_entry *pmbe;
584 addr = mk_pmb_addr(i);
585 data = mk_pmb_data(i);
587 addr_val = __raw_readl(addr);
588 data_val = __raw_readl(data);
591 * Skip over any bogus entries
593 if (!(data_val & PMB_V) || !(addr_val & PMB_V))
596 ppn = data_val & PMB_PFN_MASK;
597 vpn = addr_val & PMB_PFN_MASK;
600 * Only preserve in-range mappings.
602 if (!pmb_ppn_in_range(ppn)) {
604 * Invalidate anything out of bounds.
606 writel_uncached(addr_val & ~PMB_V, addr);
607 writel_uncached(data_val & ~PMB_V, data);
612 * Update the caching attributes if necessary
614 if (data_val & PMB_C) {
615 data_val &= ~PMB_CACHE_MASK;
616 data_val |= pmb_cache_flags();
618 writel_uncached(data_val, data);
621 size = data_val & PMB_SZ_MASK;
622 flags = size | (data_val & PMB_CACHE_MASK);
624 pmbe = pmb_alloc(vpn, ppn, flags, i);
630 raw_spin_lock_irqsave(&pmbe->lock, irqflags);
632 for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
633 if (pmb_sizes[j].flag == size)
634 pmbe->size = pmb_sizes[j].size;
637 raw_spin_lock_nested(&pmbp->lock, SINGLE_DEPTH_NESTING);
639 * Compare the previous entry against the current one to
640 * see if the entries span a contiguous mapping. If so,
641 * setup the entry links accordingly. Compound mappings
642 * are later coalesced.
644 if (pmb_can_merge(pmbp, pmbe))
646 raw_spin_unlock(&pmbp->lock);
651 raw_spin_unlock_irqrestore(&pmbe->lock, irqflags);
655 static void __init pmb_merge(struct pmb_entry *head)
657 unsigned long span, newsize;
658 struct pmb_entry *tail;
659 int i = 1, depth = 0;
661 span = newsize = head->size;
667 if (pmb_size_valid(span)) {
672 /* This is the end of the line.. */
681 * The merged page size must be valid.
683 if (!depth || !pmb_size_valid(newsize))
686 head->flags &= ~PMB_SZ_MASK;
687 head->flags |= pmb_size_to_flags(newsize);
689 head->size = newsize;
691 __pmb_unmap_entry(head->link, depth);
692 __set_pmb_entry(head);
695 static void __init pmb_coalesce(void)
700 write_lock_irqsave(&pmb_rwlock, flags);
702 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
703 struct pmb_entry *pmbe;
705 if (!test_bit(i, pmb_map))
708 pmbe = &pmb_entry_list[i];
711 * We're only interested in compound mappings
717 * Nothing to do if it already uses the largest possible
720 if (pmbe->size == SZ_512M)
726 write_unlock_irqrestore(&pmb_rwlock, flags);
729 #ifdef CONFIG_UNCACHED_MAPPING
730 static void __init pmb_resize(void)
735 * If the uncached mapping was constructed by the kernel, it will
736 * already be a reasonable size.
738 if (uncached_size == SZ_16M)
741 read_lock(&pmb_rwlock);
743 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
744 struct pmb_entry *pmbe;
747 if (!test_bit(i, pmb_map))
750 pmbe = &pmb_entry_list[i];
752 if (pmbe->vpn != uncached_start)
756 * Found it, now resize it.
758 raw_spin_lock_irqsave(&pmbe->lock, flags);
761 pmbe->flags &= ~PMB_SZ_MASK;
762 pmbe->flags |= pmb_size_to_flags(pmbe->size);
764 uncached_resize(pmbe->size);
766 __set_pmb_entry(pmbe);
768 raw_spin_unlock_irqrestore(&pmbe->lock, flags);
771 read_unlock(&pmb_rwlock);
775 static int __init early_pmb(char *p)
780 if (strstr(p, "iomap"))
781 pmb_iomapping_enabled = 1;
785 early_param("pmb", early_pmb);
787 void __init pmb_init(void)
789 /* Synchronize software state */
792 /* Attempt to combine compound mappings */
795 #ifdef CONFIG_UNCACHED_MAPPING
796 /* Resize initial mappings, if necessary */
803 writel_uncached(0, PMB_IRMCR);
805 /* Flush out the TLB */
806 local_flush_tlb_all();
810 bool __in_29bit_mode(void)
812 return (__raw_readl(PMB_PASCR) & PASCR_SE) == 0;
815 static int pmb_seq_show(struct seq_file *file, void *iter)
819 seq_printf(file, "V: Valid, C: Cacheable, WT: Write-Through\n"
820 "CB: Copy-Back, B: Buffered, UB: Unbuffered\n");
821 seq_printf(file, "ety vpn ppn size flags\n");
823 for (i = 0; i < NR_PMB_ENTRIES; i++) {
824 unsigned long addr, data;
828 addr = __raw_readl(mk_pmb_addr(i));
829 data = __raw_readl(mk_pmb_data(i));
831 size = data & PMB_SZ_MASK;
832 sz_str = (size == PMB_SZ_16M) ? " 16MB":
833 (size == PMB_SZ_64M) ? " 64MB":
834 (size == PMB_SZ_128M) ? "128MB":
837 /* 02: V 0x88 0x08 128MB C CB B */
838 seq_printf(file, "%02d: %c 0x%02lx 0x%02lx %s %c %s %s\n",
839 i, ((addr & PMB_V) && (data & PMB_V)) ? 'V' : ' ',
840 (addr >> 24) & 0xff, (data >> 24) & 0xff,
841 sz_str, (data & PMB_C) ? 'C' : ' ',
842 (data & PMB_WT) ? "WT" : "CB",
843 (data & PMB_UB) ? "UB" : " B");
849 static int pmb_debugfs_open(struct inode *inode, struct file *file)
851 return single_open(file, pmb_seq_show, NULL);
854 static const struct file_operations pmb_debugfs_fops = {
855 .owner = THIS_MODULE,
856 .open = pmb_debugfs_open,
859 .release = single_release,
862 static int __init pmb_debugfs_init(void)
864 struct dentry *dentry;
866 dentry = debugfs_create_file("pmb", S_IFREG | S_IRUGO,
867 arch_debugfs_dir, NULL, &pmb_debugfs_fops);
873 subsys_initcall(pmb_debugfs_init);
876 static void pmb_syscore_resume(void)
878 struct pmb_entry *pmbe;
881 read_lock(&pmb_rwlock);
883 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) {
884 if (test_bit(i, pmb_map)) {
885 pmbe = &pmb_entry_list[i];
890 read_unlock(&pmb_rwlock);
893 static struct syscore_ops pmb_syscore_ops = {
894 .resume = pmb_syscore_resume,
897 static int __init pmb_sysdev_init(void)
899 register_syscore_ops(&pmb_syscore_ops);
902 subsys_initcall(pmb_sysdev_init);