]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/m32r/mm/ioremap.c | |
3 | * | |
4 | * Copyright (c) 2001, 2002 Hiroyuki Kondo | |
5 | * | |
6 | * Taken from mips version. | |
7 | * (C) Copyright 1995 1996 Linus Torvalds | |
8 | * (C) Copyright 2001 Ralf Baechle | |
9 | */ | |
10 | ||
11 | /* | |
12 | * This file is subject to the terms and conditions of the GNU General Public | |
13 | * License. See the file "COPYING" in the main directory of this archive | |
14 | * for more details. | |
15 | * | |
16 | */ | |
17 | ||
18 | #include <linux/module.h> | |
19 | #include <asm/addrspace.h> | |
20 | #include <asm/byteorder.h> | |
21 | ||
22 | #include <linux/vmalloc.h> | |
23 | #include <asm/io.h> | |
24 | #include <asm/pgalloc.h> | |
25 | #include <asm/cacheflush.h> | |
26 | #include <asm/tlbflush.h> | |
27 | ||
28 | static inline void | |
29 | remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | |
30 | unsigned long phys_addr, unsigned long flags) | |
31 | { | |
32 | unsigned long end; | |
33 | unsigned long pfn; | |
34 | pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ | |
35 | | _PAGE_WRITE | flags); | |
36 | ||
37 | address &= ~PMD_MASK; | |
38 | end = address + size; | |
39 | if (end > PMD_SIZE) | |
40 | end = PMD_SIZE; | |
41 | if (address >= end) | |
42 | BUG(); | |
43 | pfn = phys_addr >> PAGE_SHIFT; | |
44 | do { | |
45 | if (!pte_none(*pte)) { | |
46 | printk("remap_area_pte: page already exists\n"); | |
47 | BUG(); | |
48 | } | |
49 | set_pte(pte, pfn_pte(pfn, pgprot)); | |
50 | address += PAGE_SIZE; | |
51 | pfn++; | |
52 | pte++; | |
53 | } while (address && (address < end)); | |
54 | } | |
55 | ||
56 | static inline int | |
57 | remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | |
58 | unsigned long phys_addr, unsigned long flags) | |
59 | { | |
60 | unsigned long end; | |
61 | ||
62 | address &= ~PGDIR_MASK; | |
63 | end = address + size; | |
64 | if (end > PGDIR_SIZE) | |
65 | end = PGDIR_SIZE; | |
66 | phys_addr -= address; | |
67 | if (address >= end) | |
68 | BUG(); | |
69 | do { | |
872fec16 | 70 | pte_t * pte = pte_alloc_kernel(pmd, address); |
1da177e4 LT |
71 | if (!pte) |
72 | return -ENOMEM; | |
73 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | |
74 | address = (address + PMD_SIZE) & PMD_MASK; | |
75 | pmd++; | |
76 | } while (address && (address < end)); | |
77 | return 0; | |
78 | } | |
79 | ||
80 | static int | |
81 | remap_area_pages(unsigned long address, unsigned long phys_addr, | |
82 | unsigned long size, unsigned long flags) | |
83 | { | |
84 | int error; | |
85 | pgd_t * dir; | |
86 | unsigned long end = address + size; | |
87 | ||
88 | phys_addr -= address; | |
89 | dir = pgd_offset(&init_mm, address); | |
90 | flush_cache_all(); | |
91 | if (address >= end) | |
92 | BUG(); | |
1da177e4 LT |
93 | do { |
94 | pmd_t *pmd; | |
95 | pmd = pmd_alloc(&init_mm, dir, address); | |
96 | error = -ENOMEM; | |
97 | if (!pmd) | |
98 | break; | |
99 | if (remap_area_pmd(pmd, address, end - address, | |
100 | phys_addr + address, flags)) | |
101 | break; | |
102 | error = 0; | |
103 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | |
104 | dir++; | |
105 | } while (address && (address < end)); | |
1da177e4 LT |
106 | flush_tlb_all(); |
107 | return error; | |
108 | } | |
109 | ||
110 | /* | |
111 | * Generic mapping function (not visible outside): | |
112 | */ | |
113 | ||
114 | /* | |
115 | * Remap an arbitrary physical address space into the kernel virtual | |
116 | * address space. Needed when the kernel wants to access high addresses | |
117 | * directly. | |
118 | * | |
119 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | |
120 | * have to convert them into an offset in a page-aligned mapping, but the | |
121 | * caller shouldn't need to know that small detail. | |
122 | */ | |
123 | ||
124 | #define IS_LOW512(addr) (!((unsigned long)(addr) & ~0x1fffffffUL)) | |
125 | ||
126 | void __iomem * | |
127 | __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | |
128 | { | |
129 | void __iomem * addr; | |
130 | struct vm_struct * area; | |
131 | unsigned long offset, last_addr; | |
132 | ||
133 | /* Don't allow wraparound or zero size */ | |
134 | last_addr = phys_addr + size - 1; | |
135 | if (!size || last_addr < phys_addr) | |
136 | return NULL; | |
137 | ||
138 | /* | |
139 | * Map objects in the low 512mb of address space using KSEG1, otherwise | |
140 | * map using page tables. | |
141 | */ | |
142 | if (IS_LOW512(phys_addr) && IS_LOW512(phys_addr + size - 1)) | |
143 | return (void *) KSEG1ADDR(phys_addr); | |
144 | ||
145 | /* | |
146 | * Don't allow anybody to remap normal RAM that we're using.. | |
147 | */ | |
148 | if (phys_addr < virt_to_phys(high_memory)) { | |
149 | char *t_addr, *t_end; | |
150 | struct page *page; | |
151 | ||
152 | t_addr = __va(phys_addr); | |
153 | t_end = t_addr + (size - 1); | |
154 | ||
155 | for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) | |
156 | if(!PageReserved(page)) | |
157 | return NULL; | |
158 | } | |
159 | ||
160 | /* | |
161 | * Mappings have to be page-aligned | |
162 | */ | |
163 | offset = phys_addr & ~PAGE_MASK; | |
164 | phys_addr &= PAGE_MASK; | |
165 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | |
166 | ||
167 | /* | |
168 | * Ok, go for it.. | |
169 | */ | |
170 | area = get_vm_area(size, VM_IOREMAP); | |
171 | if (!area) | |
172 | return NULL; | |
173 | area->phys_addr = phys_addr; | |
174 | addr = (void __iomem *) area->addr; | |
175 | if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) { | |
176 | vunmap((void __force *) addr); | |
177 | return NULL; | |
178 | } | |
179 | ||
180 | return (void __iomem *) (offset + (char __iomem *)addr); | |
181 | } | |
182 | ||
183 | #define IS_KSEG1(addr) (((unsigned long)(addr) & ~0x1fffffffUL) == KSEG1) | |
184 | ||
185 | void iounmap(volatile void __iomem *addr) | |
186 | { | |
187 | if (!IS_KSEG1(addr)) | |
188 | vfree((void *) (PAGE_MASK & (unsigned long) addr)); | |
189 | } | |
190 |