4 * Copyright Fujitsu, Corp. 2011, 2012
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
16 #include "memory_mapping.h"
18 /* PAE Paging or IA-32e Paging */
19 static void walk_pte(MemoryMappingList *list, target_phys_addr_t pte_start_addr,
20 int32_t a20_mask, target_ulong start_line_addr)
22 target_phys_addr_t pte_addr, start_paddr;
24 target_ulong start_vaddr;
27 for (i = 0; i < 512; i++) {
28 pte_addr = (pte_start_addr + i * 8) & a20_mask;
29 pte = ldq_phys(pte_addr);
30 if (!(pte & PG_PRESENT_MASK)) {
35 start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63);
36 if (cpu_physical_memory_is_io(start_paddr)) {
41 start_vaddr = start_line_addr | ((i & 0x1fff) << 12);
42 memory_mapping_list_add_merge_sorted(list, start_paddr,
43 start_vaddr, 1 << 12);
48 static void walk_pte2(MemoryMappingList *list,
49 target_phys_addr_t pte_start_addr, int32_t a20_mask,
50 target_ulong start_line_addr)
52 target_phys_addr_t pte_addr, start_paddr;
54 target_ulong start_vaddr;
57 for (i = 0; i < 1024; i++) {
58 pte_addr = (pte_start_addr + i * 4) & a20_mask;
59 pte = ldl_phys(pte_addr);
60 if (!(pte & PG_PRESENT_MASK)) {
65 start_paddr = pte & ~0xfff;
66 if (cpu_physical_memory_is_io(start_paddr)) {
71 start_vaddr = start_line_addr | ((i & 0x3ff) << 12);
72 memory_mapping_list_add_merge_sorted(list, start_paddr,
73 start_vaddr, 1 << 12);
77 /* PAE Paging or IA-32e Paging */
78 static void walk_pde(MemoryMappingList *list, target_phys_addr_t pde_start_addr,
79 int32_t a20_mask, target_ulong start_line_addr)
81 target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
83 target_ulong line_addr, start_vaddr;
86 for (i = 0; i < 512; i++) {
87 pde_addr = (pde_start_addr + i * 8) & a20_mask;
88 pde = ldq_phys(pde_addr);
89 if (!(pde & PG_PRESENT_MASK)) {
94 line_addr = start_line_addr | ((i & 0x1ff) << 21);
95 if (pde & PG_PSE_MASK) {
97 start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63);
98 if (cpu_physical_memory_is_io(start_paddr)) {
102 start_vaddr = line_addr;
103 memory_mapping_list_add_merge_sorted(list, start_paddr,
104 start_vaddr, 1 << 21);
108 pte_start_addr = (pde & ~0xfff) & a20_mask;
109 walk_pte(list, pte_start_addr, a20_mask, line_addr);
114 static void walk_pde2(MemoryMappingList *list,
115 target_phys_addr_t pde_start_addr, int32_t a20_mask,
118 target_phys_addr_t pde_addr, pte_start_addr, start_paddr;
120 target_ulong line_addr, start_vaddr;
123 for (i = 0; i < 1024; i++) {
124 pde_addr = (pde_start_addr + i * 4) & a20_mask;
125 pde = ldl_phys(pde_addr);
126 if (!(pde & PG_PRESENT_MASK)) {
131 line_addr = (((unsigned int)i & 0x3ff) << 22);
132 if ((pde & PG_PSE_MASK) && pse) {
134 start_paddr = (pde & ~0x3fffff) | ((pde & 0x1fe000) << 19);
135 if (cpu_physical_memory_is_io(start_paddr)) {
139 start_vaddr = line_addr;
140 memory_mapping_list_add_merge_sorted(list, start_paddr,
141 start_vaddr, 1 << 22);
145 pte_start_addr = (pde & ~0xfff) & a20_mask;
146 walk_pte2(list, pte_start_addr, a20_mask, line_addr);
151 static void walk_pdpe2(MemoryMappingList *list,
152 target_phys_addr_t pdpe_start_addr, int32_t a20_mask)
154 target_phys_addr_t pdpe_addr, pde_start_addr;
156 target_ulong line_addr;
159 for (i = 0; i < 4; i++) {
160 pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
161 pdpe = ldq_phys(pdpe_addr);
162 if (!(pdpe & PG_PRESENT_MASK)) {
167 line_addr = (((unsigned int)i & 0x3) << 30);
168 pde_start_addr = (pdpe & ~0xfff) & a20_mask;
169 walk_pde(list, pde_start_addr, a20_mask, line_addr);
175 static void walk_pdpe(MemoryMappingList *list,
176 target_phys_addr_t pdpe_start_addr, int32_t a20_mask,
177 target_ulong start_line_addr)
179 target_phys_addr_t pdpe_addr, pde_start_addr, start_paddr;
181 target_ulong line_addr, start_vaddr;
184 for (i = 0; i < 512; i++) {
185 pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask;
186 pdpe = ldq_phys(pdpe_addr);
187 if (!(pdpe & PG_PRESENT_MASK)) {
192 line_addr = start_line_addr | ((i & 0x1ffULL) << 30);
193 if (pdpe & PG_PSE_MASK) {
195 start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63);
196 if (cpu_physical_memory_is_io(start_paddr)) {
200 start_vaddr = line_addr;
201 memory_mapping_list_add_merge_sorted(list, start_paddr,
202 start_vaddr, 1 << 30);
206 pde_start_addr = (pdpe & ~0xfff) & a20_mask;
207 walk_pde(list, pde_start_addr, a20_mask, line_addr);
212 static void walk_pml4e(MemoryMappingList *list,
213 target_phys_addr_t pml4e_start_addr, int32_t a20_mask)
215 target_phys_addr_t pml4e_addr, pdpe_start_addr;
217 target_ulong line_addr;
220 for (i = 0; i < 512; i++) {
221 pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask;
222 pml4e = ldq_phys(pml4e_addr);
223 if (!(pml4e & PG_PRESENT_MASK)) {
228 line_addr = ((i & 0x1ffULL) << 39) | (0xffffULL << 48);
229 pdpe_start_addr = (pml4e & ~0xfff) & a20_mask;
230 walk_pdpe(list, pdpe_start_addr, a20_mask, line_addr);
235 int cpu_get_memory_mapping(MemoryMappingList *list, CPUArchState *env)
237 if (!cpu_paging_enabled(env)) {
238 /* paging is disabled */
242 if (env->cr[4] & CR4_PAE_MASK) {
244 if (env->hflags & HF_LMA_MASK) {
245 target_phys_addr_t pml4e_addr;
247 pml4e_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
248 walk_pml4e(list, pml4e_addr, env->a20_mask);
252 target_phys_addr_t pdpe_addr;
254 pdpe_addr = (env->cr[3] & ~0x1f) & env->a20_mask;
255 walk_pdpe2(list, pdpe_addr, env->a20_mask);
258 target_phys_addr_t pde_addr;
261 pde_addr = (env->cr[3] & ~0xfff) & env->a20_mask;
262 pse = !!(env->cr[4] & CR4_PSE_MASK);
263 walk_pde2(list, pde_addr, env->a20_mask, pse);
269 bool cpu_paging_enabled(CPUArchState *env)
271 return env->cr[0] & CR0_PG_MASK;