4 * Copyright Fujitsu, Corp. 2011, 2012
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
15 #include "exec/cpu-all.h"
16 #include "sysemu/memory_mapping.h"
18 /* PAE Paging or IA-32e Paging */
19 static void walk_pte(MemoryMappingList *list, AddressSpace *as,
20 hwaddr pte_start_addr,
21 int32_t a20_mask, target_ulong start_line_addr)
23 hwaddr pte_addr, start_paddr;
25 target_ulong start_vaddr;
28 for (i = 0; i < 512; i++) {
29 pte_addr = (pte_start_addr + i * 8) & a20_mask;
30 pte = address_space_ldq(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
31 if (!(pte & PG_PRESENT_MASK)) {
36 start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
37 if (cpu_physical_memory_is_io(start_paddr)) {
42 start_vaddr = start_line_addr | ((i & 0x1ff) << 12);
43 memory_mapping_list_add_merge_sorted(list, start_paddr,
44 start_vaddr, 1 << 12);
49 static void walk_pte2(MemoryMappingList *list, AddressSpace *as,
50 hwaddr pte_start_addr, int32_t a20_mask,
51 target_ulong start_line_addr)
53 hwaddr pte_addr, start_paddr;
55 target_ulong start_vaddr;
58 for (i = 0; i < 1024; i++) {
59 pte_addr = (pte_start_addr + i * 4) & a20_mask;
60 pte = address_space_ldl(as, pte_addr, MEMTXATTRS_UNSPECIFIED, NULL);
61 if (!(pte & PG_PRESENT_MASK)) {
66 start_paddr = pte & ~0xfff;
67 if (cpu_physical_memory_is_io(start_paddr)) {
72 start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
73 memory_mapping_list_add_merge_sorted(list, start_paddr,
74 start_vaddr, 1 << 12);
78 /* PAE Paging or IA-32e Paging */
79 #define PLM4_ADDR_MASK 0xffffffffff000ULL /* selects bits 51:12 */
81 static void walk_pde(MemoryMappingList *list, AddressSpace *as,
82 hwaddr pde_start_addr,
83 int32_t a20_mask, target_ulong start_line_addr)
85 hwaddr pde_addr, pte_start_addr, start_paddr;
87 target_ulong line_addr, start_vaddr;
90 for (i = 0; i < 512; i++) {
91 pde_addr = (pde_start_addr + i * 8) & a20_mask;
92 pde = address_space_ldq(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
93 if (!(pde & PG_PRESENT_MASK)) {
98 line_addr = start_line_addr | ((i & 0x1ff) << 21);
99 if (pde & PG_PSE_MASK) {
101 start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
102 if (cpu_physical_memory_is_io(start_paddr)) {
106 start_vaddr = line_addr;
107 memory_mapping_list_add_merge_sorted(list, start_paddr,
108 start_vaddr, 1 << 21);
112 pte_start_addr = (pde & PLM4_ADDR_MASK) & a20_mask;
113 walk_pte(list, as, pte_start_addr, a20_mask, line_addr);
118 static void walk_pde2(MemoryMappingList *list, AddressSpace *as,
119 hwaddr pde_start_addr, int32_t a20_mask,
122 hwaddr pde_addr, pte_start_addr, start_paddr, high_paddr;
124 target_ulong line_addr, start_vaddr;
127 for (i = 0; i < 1024; i++) {
128 pde_addr = (pde_start_addr + i * 4) & a20_mask;
129 pde = address_space_ldl(as, pde_addr, MEMTXATTRS_UNSPECIFIED, NULL);
130 if (!(pde & PG_PRESENT_MASK)) {
135 line_addr = (((unsigned int)i & 0x3ff) << 22);
136 if ((pde & PG_PSE_MASK) && pse) {
139 * bits 39:32 are bits 20:13 of the PDE
140 * bit3 31:22 are bits 31:22 of the PDE
142 high_paddr = ((hwaddr)(pde & 0x1fe000) << 19);
143 start_paddr = (pde & ~0x3fffff) | high_paddr;
144 if (cpu_physical_memory_is_io(start_paddr)) {
148 start_vaddr = line_addr;
149 memory_mapping_list_add_merge_sorted(list, start_paddr,
150 start_vaddr, 1 << 22);
154 pte_start_addr = (pde & ~0xfff) & a20_mask;
155 walk_pte2(list, as, pte_start_addr, a20_mask, line_addr);
160 static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as,
161 hwaddr pdpe_start_addr, int32_t a20_mask)
163 hwaddr pdpe_addr, pde_start_addr;
165 target_ulong line_addr;
168 for (i = 0; i < 4; i++) {
169 pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
170 pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
171 if (!(pdpe & PG_PRESENT_MASK)) {
176 line_addr = (((unsigned int)i & 0x3) << 30);
177 pde_start_addr = (pdpe & ~0xfff) & a20_mask;
178 walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
184 static void walk_pdpe(MemoryMappingList *list, AddressSpace *as,
185 hwaddr pdpe_start_addr, int32_t a20_mask,
186 target_ulong start_line_addr)
188 hwaddr pdpe_addr, pde_start_addr, start_paddr;
190 target_ulong line_addr, start_vaddr;
193 for (i = 0; i < 512; i++) {
194 pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
195 pdpe = address_space_ldq(as, pdpe_addr, MEMTXATTRS_UNSPECIFIED, NULL);
196 if (!(pdpe & PG_PRESENT_MASK)) {
201 line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
202 if (pdpe & PG_PSE_MASK) {
204 start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
205 if (cpu_physical_memory_is_io(start_paddr)) {
209 start_vaddr = line_addr;
210 memory_mapping_list_add_merge_sorted(list, start_paddr,
211 start_vaddr, 1 << 30);
215 pde_start_addr = (pdpe & PLM4_ADDR_MASK) & a20_mask;
216 walk_pde(list, as, pde_start_addr, a20_mask, line_addr);
221 static void walk_pml4e(MemoryMappingList *list, AddressSpace *as,
222 hwaddr pml4e_start_addr, int32_t a20_mask)
224 hwaddr pml4e_addr, pdpe_start_addr;
226 target_ulong line_addr;
229 for (i = 0; i < 512; i++) {
230 pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
231 pml4e = address_space_ldq(as, pml4e_addr, MEMTXATTRS_UNSPECIFIED,
233 if (!(pml4e & PG_PRESENT_MASK)) {
238 line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
239 pdpe_start_addr = (pml4e & PLM4_ADDR_MASK) & a20_mask;
240 walk_pdpe(list, as, pdpe_start_addr, a20_mask, line_addr);
245 void x86_cpu_get_memory_mapping(CPUState *cs, MemoryMappingList *list,
248 X86CPU *cpu = X86_CPU(cs);
249 CPUX86State *env = &cpu->env;
251 if (!cpu_paging_enabled(cs)) {
252 /* paging is disabled */
256 if (env->cr[4] & CR4_PAE_MASK) {
258 if (env->hflags & HF_LMA_MASK) {
261 pml4e_addr = (env->cr[3] & PLM4_ADDR_MASK) & env->a20_mask;
262 walk_pml4e(list, cs->as, pml4e_addr, env->a20_mask);
268 pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
269 walk_pdpe2(list, cs->as, pdpe_addr, env->a20_mask);
275 pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
276 pse = !!(env->cr[4] & CR4_PSE_MASK);
277 walk_pde2(list, cs->as, pde_addr, env->a20_mask, pse);