# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
#endif
+/* Size of the L2 (and L3, etc) page tables. */
+#define V_L2_BITS 10
+#define V_L2_SIZE (1 << V_L2_BITS)
+
/* The bits remaining after N lower levels of page tables. */
#define V_L1_BITS_REM \
- ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
+ ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS)
#if V_L1_BITS_REM < 4
-#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
+#define V_L1_BITS (V_L1_BITS_REM + V_L2_BITS)
#else
#define V_L1_BITS V_L1_BITS_REM
#endif
}
#endif
-static void page_init(void)
+void page_size_init(void)
{
/* NOTE: we can always suppose that qemu_host_page_size >=
TARGET_PAGE_SIZE */
#ifdef _WIN32
- {
- SYSTEM_INFO system_info;
+ SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- qemu_real_host_page_size = system_info.dwPageSize;
- }
+ GetSystemInfo(&system_info);
+ qemu_real_host_page_size = system_info.dwPageSize;
#else
qemu_real_host_page_size = getpagesize();
#endif
qemu_host_page_size = TARGET_PAGE_SIZE;
}
qemu_host_page_mask = ~(qemu_host_page_size - 1);
+}
+static void page_init(void)
+{
+ page_size_init();
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
{
#ifdef HAVE_KINFO_GETVMMAP
lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
/* Level 2..N-1. */
- for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
+ for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) {
void **p = *lp;
if (p == NULL) {
if (!alloc) {
return NULL;
}
- ALLOC(p, sizeof(void *) * L2_SIZE);
+ ALLOC(p, sizeof(void *) * V_L2_SIZE);
*lp = p;
}
- lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
+ lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
}
pd = *lp;
if (!alloc) {
return NULL;
}
- ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
+ ALLOC(pd, sizeof(PageDesc) * V_L2_SIZE);
*lp = pd;
}
#undef ALLOC
- return pd + (index & (L2_SIZE - 1));
+ return pd + (index & (V_L2_SIZE - 1));
}
static inline PageDesc *page_find(tb_page_addr_t index)
if (level == 0) {
PageDesc *pd = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < V_L2_SIZE; ++i) {
pd[i].first_tb = NULL;
invalidate_page_bitmap(pd + i);
}
} else {
void **pp = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < V_L2_SIZE; ++i) {
page_flush_tb_1(level - 1, pp + i);
}
}
int i;
for (i = 0; i < V_L1_SIZE; i++) {
- page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
+ page_flush_tb_1(V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
}
}
CPU_FOREACH(cpu) {
CPUArchState *env = cpu->env_ptr;
- memset(env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof(void *));
+ memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
}
- memset(tcg_ctx.tb_ctx.tb_phys_hash, 0,
- CODE_GEN_PHYS_HASH_SIZE * sizeof(void *));
+ memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
page_flush_tb();
tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer;
if (level == 0) {
PageDesc *pd = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < V_L2_SIZE; ++i) {
int prot = pd[i].flags;
pa = base | (i << TARGET_PAGE_BITS);
} else {
void **pp = *lp;
- for (i = 0; i < L2_SIZE; ++i) {
+ for (i = 0; i < V_L2_SIZE; ++i) {
pa = base | ((abi_ulong)i <<
- (TARGET_PAGE_BITS + L2_BITS * level));
+ (TARGET_PAGE_BITS + V_L2_BITS * level));
rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
if (rc != 0) {
return rc;
for (i = 0; i < V_L1_SIZE; i++) {
int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
- V_L1_SHIFT / L2_BITS - 1, l1_map + i);
+ V_L1_SHIFT / V_L2_BITS - 1, l1_map + i);
if (rc != 0) {
return rc;