]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/sh/mm/ioremap.c | |
3 | * | |
d9b9487a PM |
4 | * (C) Copyright 1995 1996 Linus Torvalds |
5 | * (C) Copyright 2005 - 2010 Paul Mundt | |
6 | * | |
1da177e4 LT |
7 | * Re-map IO memory to kernel address space so that we can access it. |
8 | * This is needed for high PCI addresses that aren't mapped in the | |
9 | * 640k-1MB IO memory area on PC's | |
10 | * | |
b66c1a39 PM |
11 | * This file is subject to the terms and conditions of the GNU General |
12 | * Public License. See the file "COPYING" in the main directory of this | |
13 | * archive for more details. | |
1da177e4 | 14 | */ |
1da177e4 | 15 | #include <linux/vmalloc.h> |
b66c1a39 | 16 | #include <linux/module.h> |
5a0e3ad6 | 17 | #include <linux/slab.h> |
1da177e4 | 18 | #include <linux/mm.h> |
a3e61d50 | 19 | #include <linux/pci.h> |
5b3e1a85 | 20 | #include <linux/io.h> |
08732d12 | 21 | #include <asm/io_trapped.h> |
1da177e4 LT |
22 | #include <asm/page.h> |
23 | #include <asm/pgalloc.h> | |
b66c1a39 | 24 | #include <asm/addrspace.h> |
1da177e4 LT |
25 | #include <asm/cacheflush.h> |
26 | #include <asm/tlbflush.h> | |
0fd14754 | 27 | #include <asm/mmu.h> |
3eef6b74 | 28 | #include "ioremap.h" |
1da177e4 | 29 | |
13f1fc87 CH |
30 | /* |
31 | * On 32-bit SH, we traditionally have the whole physical address space mapped | |
32 | * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do | |
33 | * anything but place the address in the proper segment. This is true for P1 | |
34 | * and P2 addresses, as well as some P3 ones. However, most of the P3 addresses | |
35 | * and newer cores using extended addressing need to map through page tables, so | |
36 | * the ioremap() implementation becomes a bit more complicated. | |
37 | */ | |
38 | #ifdef CONFIG_29BIT | |
39 | static void __iomem * | |
40 | __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) | |
41 | { | |
42 | phys_addr_t last_addr = offset + size - 1; | |
43 | ||
44 | /* | |
45 | * For P1 and P2 space this is trivial, as everything is already | |
46 | * mapped. Uncached access for P1 addresses are done through P2. | |
47 | * In the P3 case or for addresses outside of the 29-bit space, | |
48 | * mapping must be done by the PMB or by using page tables. | |
49 | */ | |
50 | if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) { | |
51 | u64 flags = pgprot_val(prot); | |
52 | ||
53 | /* | |
54 | * Anything using the legacy PTEA space attributes needs | |
55 | * to be kicked down to page table mappings. | |
56 | */ | |
57 | if (unlikely(flags & _PAGE_PCC_MASK)) | |
58 | return NULL; | |
59 | if (unlikely(flags & _PAGE_CACHABLE)) | |
60 | return (void __iomem *)P1SEGADDR(offset); | |
61 | ||
62 | return (void __iomem *)P2SEGADDR(offset); | |
63 | } | |
64 | ||
65 | /* P4 above the store queues are always mapped. */ | |
66 | if (unlikely(offset >= P3_ADDR_MAX)) | |
67 | return (void __iomem *)P4SEGADDR(offset); | |
68 | ||
69 | return NULL; | |
70 | } | |
71 | #else | |
72 | #define __ioremap_29bit(offset, size, prot) NULL | |
73 | #endif /* CONFIG_29BIT */ | |
74 | ||
e72590fa GU |
75 | void __iomem __ref *ioremap_prot(phys_addr_t phys_addr, size_t size, |
76 | unsigned long prot) | |
1da177e4 | 77 | { |
90e7d649 | 78 | void __iomem *mapped; |
0453c9a7 | 79 | pgprot_t pgprot = __pgprot(prot); |
1da177e4 | 80 | |
13f1fc87 CH |
81 | mapped = __ioremap_trapped(phys_addr, size); |
82 | if (mapped) | |
83 | return mapped; | |
84 | ||
85 | mapped = __ioremap_29bit(phys_addr, size, pgprot); | |
86 | if (mapped) | |
87 | return mapped; | |
88 | ||
90e7d649 PM |
89 | /* |
90 | * If we can't yet use the regular approach, go the fixmap route. | |
91 | */ | |
92 | if (!mem_init_done) | |
93 | return ioremap_fixed(phys_addr, size, pgprot); | |
94 | ||
95 | /* | |
96 | * First try to remap through the PMB. | |
97 | * PMB entries are all pre-faulted. | |
98 | */ | |
0453c9a7 BH |
99 | mapped = pmb_remap_caller(phys_addr, size, pgprot, |
100 | __builtin_return_address(0)); | |
90e7d649 PM |
101 | if (mapped && !IS_ERR(mapped)) |
102 | return mapped; | |
103 | ||
0453c9a7 | 104 | return generic_ioremap_prot(phys_addr, size, pgprot); |
1da177e4 | 105 | } |
0453c9a7 | 106 | EXPORT_SYMBOL(ioremap_prot); |
1da177e4 | 107 | |
78bf04fc PM |
108 | /* |
109 | * Simple checks for non-translatable mappings. | |
110 | */ | |
111 | static inline int iomapping_nontranslatable(unsigned long offset) | |
112 | { | |
113 | #ifdef CONFIG_29BIT | |
114 | /* | |
115 | * In 29-bit mode this includes the fixed P1/P2 areas, as well as | |
116 | * parts of P3. | |
117 | */ | |
118 | if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) | |
119 | return 1; | |
120 | #endif | |
121 | ||
78bf04fc PM |
122 | return 0; |
123 | } | |
124 | ||
0453c9a7 | 125 | void iounmap(volatile void __iomem *addr) |
1da177e4 | 126 | { |
b66c1a39 | 127 | unsigned long vaddr = (unsigned long __force)addr; |
b66c1a39 | 128 | |
78bf04fc PM |
129 | /* |
130 | * Nothing to do if there is no translatable mapping. | |
131 | */ | |
132 | if (iomapping_nontranslatable(vaddr)) | |
b66c1a39 PM |
133 | return; |
134 | ||
12b6b01c PM |
135 | /* |
136 | * There's no VMA if it's from an early fixed mapping. | |
137 | */ | |
0453c9a7 | 138 | if (iounmap_fixed((void __iomem *)addr) == 0) |
12b6b01c PM |
139 | return; |
140 | ||
b66c1a39 | 141 | /* |
90e7d649 | 142 | * If the PMB handled it, there's nothing else to do. |
b66c1a39 | 143 | */ |
0453c9a7 | 144 | if (pmb_unmap((void __iomem *)addr) == 0) |
90e7d649 | 145 | return; |
b66c1a39 | 146 | |
0453c9a7 | 147 | generic_iounmap(addr); |
1da177e4 | 148 | } |
98c90e5e | 149 | EXPORT_SYMBOL(iounmap); |