]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * i386 memory mapping | |
3 | * | |
4 | * Copyright Fujitsu, Corp. 2011, 2012 | |
5 | * | |
6 | * Authors: | |
7 | * Wen Congyang <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | ||
14 | #include "cpu.h" | |
15 | #include "exec/cpu-all.h" | |
16 | #include "sysemu/memory_mapping.h" | |
17 | ||
18 | /* PAE Paging or IA-32e Paging */ | |
19 | static void walk_pte(MemoryMappingList *list, AddressSpace *as, | |
20 | hwaddr pte_start_addr, | |
21 | int32_t a20_mask, target_ulong start_line_addr) | |
22 | { | |
23 | hwaddr pte_addr, start_paddr; | |
24 | uint64_t pte; | |
25 | target_ulong start_vaddr; | |
26 | int i; | |
27 | ||
28 | for (i = 0; i < 512; i++) { | |
29 | pte_addr = (pte_start_addr + i * 8) & a20_mask; | |
30 | pte = ldq_phys(as, pte_addr); | |
31 | if (!(pte & PG_PRESENT_MASK)) { | |
32 | /* not present */ | |
33 | continue; | |
34 | } | |
35 | ||
36 | start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63); | |
37 | if (cpu_physical_memory_is_io(start_paddr)) { | |
38 | /* I/O region */ | |
39 | continue; | |
40 | } | |
41 | ||
42 | start_vaddr = start_line_addr | ((i & 0x1ff) << 12); | |
43 | memory_mapping_list_add_merge_sorted(list, start_paddr, | |
44 | start_vaddr, 1 << 12); | |
45 | } | |
46 | } | |
47 | ||
48 | /* 32-bit Paging */ | |
49 | static void walk_pte2(MemoryMappingList *list, AddressSpace *as, | |
50 | hwaddr pte_start_addr, int32_t a20_mask, | |
51 | target_ulong start_line_addr) | |
52 | { | |
53 | hwaddr pte_addr, start_paddr; | |
54 | uint32_t pte; | |
55 | target_ulong start_vaddr; | |
56 | int i; | |
57 | ||
58 | for (i = 0; i < 1024; i++) { | |
59 | pte_addr = (pte_start_addr + i * 4) & a20_mask; | |
60 | pte = ldl_phys(as, pte_addr); | |
61 | if (!(pte & PG_PRESENT_MASK)) { | |
62 | /* not present */ | |
63 | continue; | |
64 | } | |
65 | ||
66 | start_paddr = pte & ~0xfff; | |
67 | if (cpu_physical_memory_is_io(start_paddr)) { | |
68 | /* I/O region */ | |
69 | continue; | |
70 | } | |
71 | ||
72 | start_vaddr = start_line_addr | ((i & 0x3ff) << 12); | |
73 | memory_mapping_list_add_merge_sorted(list, start_paddr, | |
74 | start_vaddr, 1 << 12); | |
75 | } | |
76 | } | |
77 | ||
78 | /* PAE Paging or IA-32e Paging */ | |
79 | #define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */ | |
80 | ||
81 | static void walk_pde(MemoryMappingList *list, AddressSpace *as, | |
82 | hwaddr pde_start_addr, | |
83 | int32_t a20_mask, target_ulong start_line_addr) | |
84 | { | |
85 | hwaddr pde_addr, pte_start_addr, start_paddr; | |
86 | uint64_t pde; | |
87 | target_ulong line_addr, start_vaddr; | |
88 | int i; | |
89 | ||
90 | for (i = 0; i < 512; i++) { | |
91 | pde_addr = (pde_start_addr + i * 8) & a20_mask; | |
92 | pde = ldq_phys(as, pde_addr); | |
93 | if (!(pde & PG_PRESENT_MASK)) { | |
94 | /* not present */ | |
95 | continue; | |
96 | } | |
97 | ||
98 | line_addr = start_line_addr | ((i & 0x1ff) << 21); | |
99 | if (pde & PG_PSE_MASK) { | |
100 | /* 2 MB page */ | |
101 | start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63); | |
102 | if (cpu_physical_memory_is_io(start_paddr)) { | |
103 | /* I/O region */ | |
104 | continue; | |
105 | } | |
106 | start_vaddr = line_addr; | |
107 | memory_mapping_list_add_merge_sorted(list, start_paddr, | |
108 | start_vaddr, 1 << 21); | |
109 | continue; | |
110 | } | |
111 | ||
112 | pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask; | |
113 | walk_pte(list, as, pte_start_addr, a20_mask, line_addr); | |
114 | } | |
115 | } | |
116 | ||
117 | /* 32-bit Paging */ | |
118 | static void walk_pde2(MemoryMappingList *list, AddressSpace *as, | |
119 | hwaddr pde_start_addr, int32_t a20_mask, | |
120 | bool pse) | |
121 | { | |
122 | hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr; | |
123 | uint32_t pde; | |
124 | target_ulong line_addr, start_vaddr; | |
125 | int i; | |
126 | ||
127 | for (i = 0; i < 1024; i++) { | |
128 | pde_addr = (pde_start_addr + i * 4) & a20_mask; | |
129 | pde = ldl_phys(as, pde_addr); | |
130 | if (!(pde & PG_PRESENT_MASK)) { | |
131 | /* not present */ | |
132 | continue; | |
133 | } | |
134 | ||
135 | line_addr = (((unsigned int)i & 0x3ff) << 22); | |
136 | if ((pde & PG_PSE_MASK) && pse) { | |
137 | /* | |
138 | * 4 MB page: | |
139 | * bits 39:32 are bits 20:13 of the PDE | |
140 | * bit3 31:22 are bits 31:22 of the PDE | |
141 | */ | |
142 | high_paddr = ((hwaddr)(pde & 0x1fe000) << 19); | |
143 | start_paddr = (pde & ~0x3fffff) | high_paddr; | |
144 | if (cpu_physical_memory_is_io(start_paddr)) { | |
145 | /* I/O region */ | |
146 | continue; | |
147 | } | |
148 | start_vaddr = line_addr; | |
149 | memory_mapping_list_add_merge_sorted(list, start_paddr, | |
150 | start_vaddr, 1 << 22); | |
151 | continue; | |
152 | } | |
153 | ||
154 | pte_start_addr = (pde & ~0xfff) & a20_mask; | |
155 | walk_pte2(list, as, pte_start_addr, a20_mask, line_addr); | |
156 | } | |
157 | } | |
158 | ||
159 | /* PAE Paging */ | |
160 | static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as, | |
161 | hwaddr pdpe_start_addr, int32_t a20_mask) | |
162 | { | |
163 | hwaddr pdpe_addr, pde_start_addr; | |
164 | uint64_t pdpe; | |
165 | target_ulong line_addr; | |
166 | int i; | |
167 | ||
168 | for (i = 0; i < 4; i++) { | |
169 | pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; | |
170 | pdpe = ldq_phys(as, pdpe_addr); | |
171 | if (!(pdpe & PG_PRESENT_MASK)) { | |
172 | /* not present */ | |
173 | continue; | |
174 | } | |
175 | ||
176 | line_addr = (((unsigned int)i & 0x3) << 30); | |
177 | pde_start_addr = (pdpe & ~0xfff) & a20_mask; | |
178 | walk_pde(list, as, pde_start_addr, a20_mask, line_addr); | |
179 | } | |
180 | } | |
181 | ||
182 | #ifdef TARGET_X86_64 | |
183 | /* IA-32e Paging */ | |
184 | static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, | |
185 | hwaddr pdpe_start_addr, int32_t a20_mask, | |
186 | target_ulong start_line_addr) | |
187 | { | |
188 | hwaddr pdpe_addr, pde_start_addr, start_paddr; | |
189 | uint64_t pdpe; | |
190 | target_ulong line_addr, start_vaddr; | |
191 | int i; | |
192 | ||
193 | for (i = 0; i < 512; i++) { | |
194 | pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; | |
195 | pdpe = ldq_phys(as, pdpe_addr); | |
196 | if (!(pdpe & PG_PRESENT_MASK)) { | |
197 | /* not present */ | |
198 | continue; | |
199 | } | |
200 | ||
201 | line_addr = start_line_addr | ((i & 0x1ffULL) << 30); | |
202 | if (pdpe & PG_PSE_MASK) { | |
203 | /* 1 GB page */ | |
204 | start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63); | |
205 | if (cpu_physical_memory_is_io(start_paddr)) { | |
206 | /* I/O region */ | |
207 | continue; | |
208 | } | |
209 | start_vaddr = line_addr; | |
210 | memory_mapping_list_add_merge_sorted(list, start_paddr, | |
211 | start_vaddr, 1 << 30); | |
212 | continue; | |
213 | } | |
214 | ||
215 | pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask; | |
216 | walk_pde(list, as, pde_start_addr, a20_mask, line_addr); | |
217 | } | |
218 | } | |
219 | ||
220 | /* IA-32e Paging */ | |
221 | static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, | |
222 | hwaddr pml4e_start_addr, int32_t a20_mask) | |
223 | { | |
224 | hwaddr pml4e_addr, pdpe_start_addr; | |
225 | uint64_t pml4e; | |
226 | target_ulong line_addr; | |
227 | int i; | |
228 | ||
229 | for (i = 0; i < 512; i++) { | |
230 | pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask; | |
231 | pml4e = ldq_phys(as, pml4e_addr); | |
232 | if (!(pml4e & PG_PRESENT_MASK)) { | |
233 | /* not present */ | |
234 | continue; | |
235 | } | |
236 | ||
237 | line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48); | |
238 | pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask; | |
239 | walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr); | |
240 | } | |
241 | } | |
242 | #endif | |
243 | ||
244 | void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list, | |
245 | Error **errp) | |
246 | { | |
247 | X86CPU *cpu = X86_CPU(cs); | |
248 | CPUX86State *env = &cpu->env; | |
249 | ||
250 | if (!cpu_paging_enabled(cs)) { | |
251 | /* paging is disabled */ | |
252 | return; | |
253 | } | |
254 | ||
255 | if (env->cr[4] & CR4_PAE_MASK) { | |
256 | #ifdef TARGET_X86_64 | |
257 | if (env->hflags & HF_LMA_MASK) { | |
258 | hwaddr pml4e_addr; | |
259 | ||
260 | pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask; | |
261 | walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask); | |
262 | } else | |
263 | #endif | |
264 | { | |
265 | hwaddr pdpe_addr; | |
266 | ||
267 | pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask; | |
268 | walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask); | |
269 | } | |
270 | } else { | |
271 | hwaddr pde_addr; | |
272 | bool pse; | |
273 | ||
274 | pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask; | |
275 | pse = !!(env->cr[4] & CR4_PSE_MASK); | |
276 | walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse); | |
277 | } | |
278 | } | |
279 |