2 * This file contains ioremap and related functions for 64-bit machines.
4 * Derived from arch/ppc64/mm/init.c
9 * Copyright (C) 1996 Paul Mackerras
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 * Rework for PPC64 port.
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/export.h>
30 #include <linux/types.h>
31 #include <linux/mman.h>
33 #include <linux/swap.h>
34 #include <linux/stddef.h>
35 #include <linux/vmalloc.h>
36 #include <linux/memblock.h>
37 #include <linux/slab.h>
38 #include <linux/hugetlb.h>
40 #include <asm/pgalloc.h>
44 #include <asm/mmu_context.h>
45 #include <asm/pgtable.h>
48 #include <asm/machdep.h>
50 #include <asm/processor.h>
51 #include <asm/cputable.h>
52 #include <asm/sections.h>
53 #include <asm/firmware.h>
55 #include <asm/powernv.h>
59 #ifdef CONFIG_PPC_STD_MMU_64
60 #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
61 #error TASK_SIZE_USER64 exceeds user VSID range
65 #ifdef CONFIG_PPC_BOOK3S_64
67 * partition table and process table for ISA 3.0
69 struct prtb_entry *process_tb;
70 struct patb_entry *partition_tb;
74 unsigned long __pte_index_size;
75 EXPORT_SYMBOL(__pte_index_size);
76 unsigned long __pmd_index_size;
77 EXPORT_SYMBOL(__pmd_index_size);
78 unsigned long __pud_index_size;
79 EXPORT_SYMBOL(__pud_index_size);
80 unsigned long __pgd_index_size;
81 EXPORT_SYMBOL(__pgd_index_size);
82 unsigned long __pmd_cache_index;
83 EXPORT_SYMBOL(__pmd_cache_index);
84 unsigned long __pte_table_size;
85 EXPORT_SYMBOL(__pte_table_size);
86 unsigned long __pmd_table_size;
87 EXPORT_SYMBOL(__pmd_table_size);
88 unsigned long __pud_table_size;
89 EXPORT_SYMBOL(__pud_table_size);
90 unsigned long __pgd_table_size;
91 EXPORT_SYMBOL(__pgd_table_size);
92 unsigned long __pmd_val_bits;
93 EXPORT_SYMBOL(__pmd_val_bits);
94 unsigned long __pud_val_bits;
95 EXPORT_SYMBOL(__pud_val_bits);
96 unsigned long __pgd_val_bits;
97 EXPORT_SYMBOL(__pgd_val_bits);
98 unsigned long __kernel_virt_start;
99 EXPORT_SYMBOL(__kernel_virt_start);
100 unsigned long __kernel_virt_size;
101 EXPORT_SYMBOL(__kernel_virt_size);
102 unsigned long __vmalloc_start;
103 EXPORT_SYMBOL(__vmalloc_start);
104 unsigned long __vmalloc_end;
105 EXPORT_SYMBOL(__vmalloc_end);
106 struct page *vmemmap;
107 EXPORT_SYMBOL(vmemmap);
108 unsigned long __pte_frag_nr;
109 EXPORT_SYMBOL(__pte_frag_nr);
110 unsigned long __pte_frag_size_shift;
111 EXPORT_SYMBOL(__pte_frag_size_shift);
112 unsigned long ioremap_bot;
113 #else /* !CONFIG_PPC_BOOK3S_64 */
114 unsigned long ioremap_bot = IOREMAP_BASE;
118 * __ioremap_at - Low level function to establish the page tables
121 void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
126 /* Make sure we have the base flags */
127 if ((flags & _PAGE_PRESENT) == 0)
128 flags |= pgprot_val(PAGE_KERNEL);
130 /* We don't support the 4K PFN hack with ioremap */
131 if (flags & H_PAGE_4K_PFN)
134 WARN_ON(pa & ~PAGE_MASK);
135 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
136 WARN_ON(size & ~PAGE_MASK);
138 for (i = 0; i < size; i += PAGE_SIZE)
139 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
142 return (void __iomem *)ea;
146 * __iounmap_from - Low level function to tear down the page tables
147 * for an IO mapping. This is used for mappings that
148 * are manipulated manually, like partial unmapping of
149 * PCI IOs or ISA space.
151 void __iounmap_at(void *ea, unsigned long size)
153 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
154 WARN_ON(size & ~PAGE_MASK);
156 unmap_kernel_range((unsigned long)ea, size);
159 void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
160 unsigned long flags, void *caller)
162 phys_addr_t paligned;
166 * Choose an address to map it to.
167 * Once the imalloc system is running, we use it.
168 * Before that, we map using addresses going
169 * up from ioremap_bot. imalloc will use
170 * the addresses from ioremap_bot through
174 paligned = addr & PAGE_MASK;
175 size = PAGE_ALIGN(addr + size) - paligned;
177 if ((size == 0) || (paligned == 0))
180 if (slab_is_available()) {
181 struct vm_struct *area;
183 area = __get_vm_area_caller(size, VM_IOREMAP,
184 ioremap_bot, IOREMAP_END,
189 area->phys_addr = paligned;
190 ret = __ioremap_at(paligned, area->addr, size, flags);
194 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
200 ret += addr & ~PAGE_MASK;
204 void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
207 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
210 void __iomem * ioremap(phys_addr_t addr, unsigned long size)
212 unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
213 void *caller = __builtin_return_address(0);
216 return ppc_md.ioremap(addr, size, flags, caller);
217 return __ioremap_caller(addr, size, flags, caller);
220 void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
222 unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
223 void *caller = __builtin_return_address(0);
226 return ppc_md.ioremap(addr, size, flags, caller);
227 return __ioremap_caller(addr, size, flags, caller);
230 void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
233 void *caller = __builtin_return_address(0);
235 /* writeable implies dirty for kernel addresses */
236 if (flags & _PAGE_WRITE)
237 flags |= _PAGE_DIRTY;
239 /* we don't want to let _PAGE_EXEC leak out */
240 flags &= ~_PAGE_EXEC;
242 * Force kernel mapping.
244 #if defined(CONFIG_PPC_BOOK3S_64)
245 flags |= _PAGE_PRIVILEGED;
247 flags &= ~_PAGE_USER;
252 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
253 * which means that we just cleared supervisor access... oops ;-) This
256 flags |= _PAGE_BAP_SR;
260 return ppc_md.ioremap(addr, size, flags, caller);
261 return __ioremap_caller(addr, size, flags, caller);
266 * Unmap an IO region and remove it from imalloc'd list.
267 * Access to IO memory should be serialized by driver.
269 void __iounmap(volatile void __iomem *token)
273 if (!slab_is_available())
276 addr = (void *) ((unsigned long __force)
277 PCI_FIX_ADDR(token) & PAGE_MASK);
278 if ((unsigned long)addr < ioremap_bot) {
279 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
286 void iounmap(volatile void __iomem *token)
289 ppc_md.iounmap(token);
294 EXPORT_SYMBOL(ioremap);
295 EXPORT_SYMBOL(ioremap_wc);
296 EXPORT_SYMBOL(ioremap_prot);
297 EXPORT_SYMBOL(__ioremap);
298 EXPORT_SYMBOL(__ioremap_at);
299 EXPORT_SYMBOL(iounmap);
300 EXPORT_SYMBOL(__iounmap);
301 EXPORT_SYMBOL(__iounmap_at);
303 #ifndef __PAGETABLE_PUD_FOLDED
304 /* 4 level page table */
305 struct page *pgd_page(pgd_t pgd)
308 return pte_page(pgd_pte(pgd));
309 return virt_to_page(pgd_page_vaddr(pgd));
313 struct page *pud_page(pud_t pud)
316 return pte_page(pud_pte(pud));
317 return virt_to_page(pud_page_vaddr(pud));
321 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
322 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
324 struct page *pmd_page(pmd_t pmd)
326 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
327 return pte_page(pmd_pte(pmd));
328 return virt_to_page(pmd_page_vaddr(pmd));
331 #ifdef CONFIG_PPC_64K_PAGES
332 static pte_t *get_from_cache(struct mm_struct *mm)
334 void *pte_frag, *ret;
336 spin_lock(&mm->page_table_lock);
337 ret = mm->context.pte_frag;
339 pte_frag = ret + PTE_FRAG_SIZE;
341 * If we have taken up all the fragments mark PTE page NULL
343 if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
345 mm->context.pte_frag = pte_frag;
347 spin_unlock(&mm->page_table_lock);
351 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
354 struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
357 if (!kernel && !pgtable_page_ctor(page)) {
362 ret = page_address(page);
363 spin_lock(&mm->page_table_lock);
365 * If we find pgtable_page set, we return
366 * the allocated page with single fragement
369 if (likely(!mm->context.pte_frag)) {
370 set_page_count(page, PTE_FRAG_NR);
371 mm->context.pte_frag = ret + PTE_FRAG_SIZE;
373 spin_unlock(&mm->page_table_lock);
378 pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
382 pte = get_from_cache(mm);
386 return __alloc_for_cache(mm, kernel);
388 #endif /* CONFIG_PPC_64K_PAGES */
390 void pte_fragment_free(unsigned long *table, int kernel)
392 struct page *page = virt_to_page(table);
393 if (put_page_testzero(page)) {
395 pgtable_page_dtor(page);
396 free_hot_cold_page(page, 0);
401 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
403 unsigned long pgf = (unsigned long)table;
405 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
407 tlb_remove_table(tlb, (void *)pgf);
410 void __tlb_remove_table(void *_table)
412 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
413 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
416 /* PTE page needs special handling */
417 pte_fragment_free(table, 0);
419 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
420 kmem_cache_free(PGT_CACHE(shift), table);
424 void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
427 /* PTE page needs special handling */
428 pte_fragment_free(table, 0);
430 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
431 kmem_cache_free(PGT_CACHE(shift), table);
436 #ifdef CONFIG_PPC_BOOK3S_64
437 void __init mmu_partition_table_init(void)
439 unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
442 BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
443 partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
444 MEMBLOCK_ALLOC_ANYWHERE));
446 /* Initialize the Partition Table with no entries */
447 memset((void *)partition_tb, 0, patb_size);
450 * update partition table control register,
453 ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
454 mtspr(SPRN_PTCR, ptcr);
455 powernv_set_nmmu_ptcr(ptcr);
458 void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
461 unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
463 partition_tb[lpid].patb0 = cpu_to_be64(dw0);
464 partition_tb[lpid].patb1 = cpu_to_be64(dw1);
467 * Global flush of TLBs and partition table caches for this lpid.
468 * The type of flush (hash or radix) depends on what the previous
469 * use of this partition ID was, not the new use.
471 asm volatile("ptesync" : : : "memory");
473 asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
474 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
476 asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
477 "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
478 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
480 EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
481 #endif /* CONFIG_PPC_BOOK3S_64 */