]> Git Repo - qemu.git/blame - exec.c
nx defines
[qemu.git] / exec.c
CommitLineData
54936004 1/*
fd6ce8f6 2 * virtual page mapping and translated block handling
54936004
FB
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
67b915a5 20#include "config.h"
d5a8f07c
FB
21#ifdef _WIN32
22#include <windows.h>
23#else
a98d49b1 24#include <sys/types.h>
d5a8f07c
FB
25#include <sys/mman.h>
26#endif
54936004
FB
27#include <stdlib.h>
28#include <stdio.h>
29#include <stdarg.h>
30#include <string.h>
31#include <errno.h>
32#include <unistd.h>
33#include <inttypes.h>
34
6180a181
FB
35#include "cpu.h"
36#include "exec-all.h"
54936004 37
fd6ce8f6 38//#define DEBUG_TB_INVALIDATE
66e85a21 39//#define DEBUG_FLUSH
9fa3e853 40//#define DEBUG_TLB
fd6ce8f6
FB
41
42/* make various TB consistency checks */
43//#define DEBUG_TB_CHECK
98857888 44//#define DEBUG_TLB_CHECK
fd6ce8f6
FB
45
46/* threshold to flush the translated code buffer */
47#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - CODE_GEN_MAX_SIZE)
48
9fa3e853
FB
49#define SMC_BITMAP_USE_THRESHOLD 10
50
51#define MMAP_AREA_START 0x00000000
52#define MMAP_AREA_END 0xa8000000
fd6ce8f6 53
108c49b8
FB
54#if defined(TARGET_SPARC64)
55#define TARGET_PHYS_ADDR_SPACE_BITS 41
56#elif defined(TARGET_PPC64)
57#define TARGET_PHYS_ADDR_SPACE_BITS 42
58#else
59/* Note: for compatibility with kqemu, we use 32 bits for x86_64 */
60#define TARGET_PHYS_ADDR_SPACE_BITS 32
61#endif
62
fd6ce8f6 63TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
9fa3e853 64TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
fd6ce8f6 65int nb_tbs;
eb51d102
FB
66/* any access to the tbs or the page table must use this lock */
67spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
fd6ce8f6 68
b8076a74 69uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
fd6ce8f6
FB
70uint8_t *code_gen_ptr;
71
9fa3e853
FB
72int phys_ram_size;
73int phys_ram_fd;
74uint8_t *phys_ram_base;
1ccde1cb 75uint8_t *phys_ram_dirty;
9fa3e853 76
6a00d601
FB
77CPUState *first_cpu;
78/* current CPU in the current thread. It is only valid inside
79 cpu_exec() */
80CPUState *cpu_single_env;
81
54936004 82typedef struct PageDesc {
92e873b9 83 /* list of TBs intersecting this ram page */
fd6ce8f6 84 TranslationBlock *first_tb;
9fa3e853
FB
85 /* in order to optimize self modifying code, we count the number
86 of lookups we do to a given page to use a bitmap */
87 unsigned int code_write_count;
88 uint8_t *code_bitmap;
89#if defined(CONFIG_USER_ONLY)
90 unsigned long flags;
91#endif
54936004
FB
92} PageDesc;
93
92e873b9
FB
94typedef struct PhysPageDesc {
95 /* offset in host memory of the page + io_index in the low 12 bits */
e04f40b5 96 uint32_t phys_offset;
92e873b9
FB
97} PhysPageDesc;
98
54936004
FB
99#define L2_BITS 10
100#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
101
102#define L1_SIZE (1 << L1_BITS)
103#define L2_SIZE (1 << L2_BITS)
104
33417e70 105static void io_mem_init(void);
fd6ce8f6 106
83fb7adf
FB
107unsigned long qemu_real_host_page_size;
108unsigned long qemu_host_page_bits;
109unsigned long qemu_host_page_size;
110unsigned long qemu_host_page_mask;
54936004 111
92e873b9 112/* XXX: for system emulation, it could just be an array */
54936004 113static PageDesc *l1_map[L1_SIZE];
0a962c02 114PhysPageDesc **l1_phys_map;
54936004 115
33417e70 116/* io memory support */
33417e70
FB
117CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
118CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
a4193c8a 119void *io_mem_opaque[IO_MEM_NB_ENTRIES];
33417e70
FB
120static int io_mem_nb;
121
34865134
FB
122/* log support */
123char *logfilename = "/tmp/qemu.log";
124FILE *logfile;
125int loglevel;
126
e3db7226
FB
127/* statistics */
128static int tlb_flush_count;
129static int tb_flush_count;
130static int tb_phys_invalidate_count;
131
b346ff46 132static void page_init(void)
54936004 133{
83fb7adf 134 /* NOTE: we can always suppose that qemu_host_page_size >=
54936004 135 TARGET_PAGE_SIZE */
67b915a5 136#ifdef _WIN32
d5a8f07c
FB
137 {
138 SYSTEM_INFO system_info;
139 DWORD old_protect;
140
141 GetSystemInfo(&system_info);
142 qemu_real_host_page_size = system_info.dwPageSize;
143
144 VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
145 PAGE_EXECUTE_READWRITE, &old_protect);
146 }
67b915a5 147#else
83fb7adf 148 qemu_real_host_page_size = getpagesize();
d5a8f07c
FB
149 {
150 unsigned long start, end;
151
152 start = (unsigned long)code_gen_buffer;
153 start &= ~(qemu_real_host_page_size - 1);
154
155 end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
156 end += qemu_real_host_page_size - 1;
157 end &= ~(qemu_real_host_page_size - 1);
158
159 mprotect((void *)start, end - start,
160 PROT_READ | PROT_WRITE | PROT_EXEC);
161 }
67b915a5 162#endif
d5a8f07c 163
83fb7adf
FB
164 if (qemu_host_page_size == 0)
165 qemu_host_page_size = qemu_real_host_page_size;
166 if (qemu_host_page_size < TARGET_PAGE_SIZE)
167 qemu_host_page_size = TARGET_PAGE_SIZE;
168 qemu_host_page_bits = 0;
169 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
170 qemu_host_page_bits++;
171 qemu_host_page_mask = ~(qemu_host_page_size - 1);
108c49b8
FB
172 l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
173 memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
54936004
FB
174}
175
fd6ce8f6 176static inline PageDesc *page_find_alloc(unsigned int index)
54936004 177{
54936004
FB
178 PageDesc **lp, *p;
179
54936004
FB
180 lp = &l1_map[index >> L2_BITS];
181 p = *lp;
182 if (!p) {
183 /* allocate if not found */
59817ccb 184 p = qemu_malloc(sizeof(PageDesc) * L2_SIZE);
fd6ce8f6 185 memset(p, 0, sizeof(PageDesc) * L2_SIZE);
54936004
FB
186 *lp = p;
187 }
188 return p + (index & (L2_SIZE - 1));
189}
190
fd6ce8f6 191static inline PageDesc *page_find(unsigned int index)
54936004 192{
54936004
FB
193 PageDesc *p;
194
54936004
FB
195 p = l1_map[index >> L2_BITS];
196 if (!p)
197 return 0;
fd6ce8f6
FB
198 return p + (index & (L2_SIZE - 1));
199}
200
108c49b8 201static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
92e873b9 202{
108c49b8 203 void **lp, **p;
92e873b9 204
108c49b8
FB
205 p = (void **)l1_phys_map;
206#if TARGET_PHYS_ADDR_SPACE_BITS > 32
207
208#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
209#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
210#endif
211 lp = p + ((index >> (L1_BITS + L2_BITS)) & (L1_SIZE - 1));
92e873b9
FB
212 p = *lp;
213 if (!p) {
214 /* allocate if not found */
108c49b8
FB
215 if (!alloc)
216 return NULL;
217 p = qemu_vmalloc(sizeof(void *) * L1_SIZE);
218 memset(p, 0, sizeof(void *) * L1_SIZE);
219 *lp = p;
220 }
221#endif
222 lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
223 p = *lp;
224 if (!p) {
225 /* allocate if not found */
226 if (!alloc)
227 return NULL;
0a962c02 228 p = qemu_vmalloc(sizeof(PhysPageDesc) * L2_SIZE);
92e873b9
FB
229 memset(p, 0, sizeof(PhysPageDesc) * L2_SIZE);
230 *lp = p;
231 }
108c49b8 232 return ((PhysPageDesc *)p) + (index & (L2_SIZE - 1));
92e873b9
FB
233}
234
108c49b8 235static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
92e873b9 236{
108c49b8 237 return phys_page_find_alloc(index, 0);
92e873b9
FB
238}
239
9fa3e853 240#if !defined(CONFIG_USER_ONLY)
6a00d601 241static void tlb_protect_code(ram_addr_t ram_addr);
3a7d929e
FB
242static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
243 target_ulong vaddr);
9fa3e853 244#endif
fd6ce8f6 245
6a00d601 246void cpu_exec_init(CPUState *env)
fd6ce8f6 247{
6a00d601
FB
248 CPUState **penv;
249 int cpu_index;
250
fd6ce8f6
FB
251 if (!code_gen_ptr) {
252 code_gen_ptr = code_gen_buffer;
b346ff46 253 page_init();
33417e70 254 io_mem_init();
fd6ce8f6 255 }
6a00d601
FB
256 env->next_cpu = NULL;
257 penv = &first_cpu;
258 cpu_index = 0;
259 while (*penv != NULL) {
260 penv = (CPUState **)&(*penv)->next_cpu;
261 cpu_index++;
262 }
263 env->cpu_index = cpu_index;
264 *penv = env;
fd6ce8f6
FB
265}
266
9fa3e853
FB
267static inline void invalidate_page_bitmap(PageDesc *p)
268{
269 if (p->code_bitmap) {
59817ccb 270 qemu_free(p->code_bitmap);
9fa3e853
FB
271 p->code_bitmap = NULL;
272 }
273 p->code_write_count = 0;
274}
275
fd6ce8f6
FB
276/* set to NULL all the 'first_tb' fields in all PageDescs */
277static void page_flush_tb(void)
278{
279 int i, j;
280 PageDesc *p;
281
282 for(i = 0; i < L1_SIZE; i++) {
283 p = l1_map[i];
284 if (p) {
9fa3e853
FB
285 for(j = 0; j < L2_SIZE; j++) {
286 p->first_tb = NULL;
287 invalidate_page_bitmap(p);
288 p++;
289 }
fd6ce8f6
FB
290 }
291 }
292}
293
294/* flush all the translation blocks */
d4e8164f 295/* XXX: tb_flush is currently not thread safe */
6a00d601 296void tb_flush(CPUState *env1)
fd6ce8f6 297{
6a00d601 298 CPUState *env;
0124311e 299#if defined(DEBUG_FLUSH)
fd6ce8f6
FB
300 printf("qemu: flush code_size=%d nb_tbs=%d avg_tb_size=%d\n",
301 code_gen_ptr - code_gen_buffer,
302 nb_tbs,
0124311e 303 nb_tbs > 0 ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0);
fd6ce8f6
FB
304#endif
305 nb_tbs = 0;
6a00d601
FB
306
307 for(env = first_cpu; env != NULL; env = env->next_cpu) {
308 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
309 }
9fa3e853 310
8a8a608f 311 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
fd6ce8f6 312 page_flush_tb();
9fa3e853 313
fd6ce8f6 314 code_gen_ptr = code_gen_buffer;
d4e8164f
FB
315 /* XXX: flush processor icache at this point if cache flush is
316 expensive */
e3db7226 317 tb_flush_count++;
fd6ce8f6
FB
318}
319
320#ifdef DEBUG_TB_CHECK
321
322static void tb_invalidate_check(unsigned long address)
323{
324 TranslationBlock *tb;
325 int i;
326 address &= TARGET_PAGE_MASK;
327 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
328 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
329 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
330 address >= tb->pc + tb->size)) {
331 printf("ERROR invalidate: address=%08lx PC=%08lx size=%04x\n",
332 address, tb->pc, tb->size);
333 }
334 }
335 }
336}
337
338/* verify that all the pages have correct rights for code */
339static void tb_page_check(void)
340{
341 TranslationBlock *tb;
342 int i, flags1, flags2;
343
344 for(i = 0;i < CODE_GEN_HASH_SIZE; i++) {
345 for(tb = tb_hash[i]; tb != NULL; tb = tb->hash_next) {
346 flags1 = page_get_flags(tb->pc);
347 flags2 = page_get_flags(tb->pc + tb->size - 1);
348 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
349 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
350 tb->pc, tb->size, flags1, flags2);
351 }
352 }
353 }
354}
355
d4e8164f
FB
356void tb_jmp_check(TranslationBlock *tb)
357{
358 TranslationBlock *tb1;
359 unsigned int n1;
360
361 /* suppress any remaining jumps to this TB */
362 tb1 = tb->jmp_first;
363 for(;;) {
364 n1 = (long)tb1 & 3;
365 tb1 = (TranslationBlock *)((long)tb1 & ~3);
366 if (n1 == 2)
367 break;
368 tb1 = tb1->jmp_next[n1];
369 }
370 /* check end of list */
371 if (tb1 != tb) {
372 printf("ERROR: jmp_list from 0x%08lx\n", (long)tb);
373 }
374}
375
fd6ce8f6
FB
376#endif
377
378/* invalidate one TB */
379static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
380 int next_offset)
381{
382 TranslationBlock *tb1;
383 for(;;) {
384 tb1 = *ptb;
385 if (tb1 == tb) {
386 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
387 break;
388 }
389 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
390 }
391}
392
9fa3e853
FB
393static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
394{
395 TranslationBlock *tb1;
396 unsigned int n1;
397
398 for(;;) {
399 tb1 = *ptb;
400 n1 = (long)tb1 & 3;
401 tb1 = (TranslationBlock *)((long)tb1 & ~3);
402 if (tb1 == tb) {
403 *ptb = tb1->page_next[n1];
404 break;
405 }
406 ptb = &tb1->page_next[n1];
407 }
408}
409
d4e8164f
FB
410static inline void tb_jmp_remove(TranslationBlock *tb, int n)
411{
412 TranslationBlock *tb1, **ptb;
413 unsigned int n1;
414
415 ptb = &tb->jmp_next[n];
416 tb1 = *ptb;
417 if (tb1) {
418 /* find tb(n) in circular list */
419 for(;;) {
420 tb1 = *ptb;
421 n1 = (long)tb1 & 3;
422 tb1 = (TranslationBlock *)((long)tb1 & ~3);
423 if (n1 == n && tb1 == tb)
424 break;
425 if (n1 == 2) {
426 ptb = &tb1->jmp_first;
427 } else {
428 ptb = &tb1->jmp_next[n1];
429 }
430 }
431 /* now we can suppress tb(n) from the list */
432 *ptb = tb->jmp_next[n];
433
434 tb->jmp_next[n] = NULL;
435 }
436}
437
438/* reset the jump entry 'n' of a TB so that it is not chained to
439 another TB */
440static inline void tb_reset_jump(TranslationBlock *tb, int n)
441{
442 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
443}
444
8a40a180 445static inline void tb_phys_invalidate(TranslationBlock *tb, unsigned int page_addr)
fd6ce8f6 446{
6a00d601 447 CPUState *env;
8a40a180 448 PageDesc *p;
d4e8164f 449 unsigned int h, n1;
8a40a180
FB
450 target_ulong phys_pc;
451 TranslationBlock *tb1, *tb2;
d4e8164f 452
8a40a180
FB
453 /* remove the TB from the hash list */
454 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
455 h = tb_phys_hash_func(phys_pc);
456 tb_remove(&tb_phys_hash[h], tb,
457 offsetof(TranslationBlock, phys_hash_next));
458
459 /* remove the TB from the page list */
460 if (tb->page_addr[0] != page_addr) {
461 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
462 tb_page_remove(&p->first_tb, tb);
463 invalidate_page_bitmap(p);
464 }
465 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
466 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
467 tb_page_remove(&p->first_tb, tb);
468 invalidate_page_bitmap(p);
469 }
470
36bdbe54 471 tb_invalidated_flag = 1;
59817ccb 472
fd6ce8f6 473 /* remove the TB from the hash list */
8a40a180 474 h = tb_jmp_cache_hash_func(tb->pc);
6a00d601
FB
475 for(env = first_cpu; env != NULL; env = env->next_cpu) {
476 if (env->tb_jmp_cache[h] == tb)
477 env->tb_jmp_cache[h] = NULL;
478 }
d4e8164f
FB
479
480 /* suppress this TB from the two jump lists */
481 tb_jmp_remove(tb, 0);
482 tb_jmp_remove(tb, 1);
483
484 /* suppress any remaining jumps to this TB */
485 tb1 = tb->jmp_first;
486 for(;;) {
487 n1 = (long)tb1 & 3;
488 if (n1 == 2)
489 break;
490 tb1 = (TranslationBlock *)((long)tb1 & ~3);
491 tb2 = tb1->jmp_next[n1];
492 tb_reset_jump(tb1, n1);
493 tb1->jmp_next[n1] = NULL;
494 tb1 = tb2;
495 }
496 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
9fa3e853 497
e3db7226 498 tb_phys_invalidate_count++;
9fa3e853
FB
499}
500
501static inline void set_bits(uint8_t *tab, int start, int len)
502{
503 int end, mask, end1;
504
505 end = start + len;
506 tab += start >> 3;
507 mask = 0xff << (start & 7);
508 if ((start & ~7) == (end & ~7)) {
509 if (start < end) {
510 mask &= ~(0xff << (end & 7));
511 *tab |= mask;
512 }
513 } else {
514 *tab++ |= mask;
515 start = (start + 8) & ~7;
516 end1 = end & ~7;
517 while (start < end1) {
518 *tab++ = 0xff;
519 start += 8;
520 }
521 if (start < end) {
522 mask = ~(0xff << (end & 7));
523 *tab |= mask;
524 }
525 }
526}
527
528static void build_page_bitmap(PageDesc *p)
529{
530 int n, tb_start, tb_end;
531 TranslationBlock *tb;
532
59817ccb 533 p->code_bitmap = qemu_malloc(TARGET_PAGE_SIZE / 8);
9fa3e853
FB
534 if (!p->code_bitmap)
535 return;
536 memset(p->code_bitmap, 0, TARGET_PAGE_SIZE / 8);
537
538 tb = p->first_tb;
539 while (tb != NULL) {
540 n = (long)tb & 3;
541 tb = (TranslationBlock *)((long)tb & ~3);
542 /* NOTE: this is subtle as a TB may span two physical pages */
543 if (n == 0) {
544 /* NOTE: tb_end may be after the end of the page, but
545 it is not a problem */
546 tb_start = tb->pc & ~TARGET_PAGE_MASK;
547 tb_end = tb_start + tb->size;
548 if (tb_end > TARGET_PAGE_SIZE)
549 tb_end = TARGET_PAGE_SIZE;
550 } else {
551 tb_start = 0;
552 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
553 }
554 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
555 tb = tb->page_next[n];
556 }
557}
558
d720b93d
FB
559#ifdef TARGET_HAS_PRECISE_SMC
560
561static void tb_gen_code(CPUState *env,
562 target_ulong pc, target_ulong cs_base, int flags,
563 int cflags)
564{
565 TranslationBlock *tb;
566 uint8_t *tc_ptr;
567 target_ulong phys_pc, phys_page2, virt_page2;
568 int code_gen_size;
569
c27004ec
FB
570 phys_pc = get_phys_addr_code(env, pc);
571 tb = tb_alloc(pc);
d720b93d
FB
572 if (!tb) {
573 /* flush must be done */
574 tb_flush(env);
575 /* cannot fail at this point */
c27004ec 576 tb = tb_alloc(pc);
d720b93d
FB
577 }
578 tc_ptr = code_gen_ptr;
579 tb->tc_ptr = tc_ptr;
580 tb->cs_base = cs_base;
581 tb->flags = flags;
582 tb->cflags = cflags;
583 cpu_gen_code(env, tb, CODE_GEN_MAX_SIZE, &code_gen_size);
584 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
585
586 /* check next page if needed */
c27004ec 587 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
d720b93d 588 phys_page2 = -1;
c27004ec 589 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
d720b93d
FB
590 phys_page2 = get_phys_addr_code(env, virt_page2);
591 }
592 tb_link_phys(tb, phys_pc, phys_page2);
593}
594#endif
595
9fa3e853
FB
596/* invalidate all TBs which intersect with the target physical page
597 starting in range [start;end[. NOTE: start and end must refer to
d720b93d
FB
598 the same physical page. 'is_cpu_write_access' should be true if called
599 from a real cpu write access: the virtual CPU will exit the current
600 TB if code is modified inside this TB. */
601void tb_invalidate_phys_page_range(target_ulong start, target_ulong end,
602 int is_cpu_write_access)
603{
604 int n, current_tb_modified, current_tb_not_found, current_flags;
d720b93d 605 CPUState *env = cpu_single_env;
9fa3e853 606 PageDesc *p;
ea1c1802 607 TranslationBlock *tb, *tb_next, *current_tb, *saved_tb;
9fa3e853 608 target_ulong tb_start, tb_end;
d720b93d 609 target_ulong current_pc, current_cs_base;
9fa3e853
FB
610
611 p = page_find(start >> TARGET_PAGE_BITS);
612 if (!p)
613 return;
614 if (!p->code_bitmap &&
d720b93d
FB
615 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
616 is_cpu_write_access) {
9fa3e853
FB
617 /* build code bitmap */
618 build_page_bitmap(p);
619 }
620
621 /* we remove all the TBs in the range [start, end[ */
622 /* XXX: see if in some cases it could be faster to invalidate all the code */
d720b93d
FB
623 current_tb_not_found = is_cpu_write_access;
624 current_tb_modified = 0;
625 current_tb = NULL; /* avoid warning */
626 current_pc = 0; /* avoid warning */
627 current_cs_base = 0; /* avoid warning */
628 current_flags = 0; /* avoid warning */
9fa3e853
FB
629 tb = p->first_tb;
630 while (tb != NULL) {
631 n = (long)tb & 3;
632 tb = (TranslationBlock *)((long)tb & ~3);
633 tb_next = tb->page_next[n];
634 /* NOTE: this is subtle as a TB may span two physical pages */
635 if (n == 0) {
636 /* NOTE: tb_end may be after the end of the page, but
637 it is not a problem */
638 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
639 tb_end = tb_start + tb->size;
640 } else {
641 tb_start = tb->page_addr[1];
642 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
643 }
644 if (!(tb_end <= start || tb_start >= end)) {
d720b93d
FB
645#ifdef TARGET_HAS_PRECISE_SMC
646 if (current_tb_not_found) {
647 current_tb_not_found = 0;
648 current_tb = NULL;
649 if (env->mem_write_pc) {
650 /* now we have a real cpu fault */
651 current_tb = tb_find_pc(env->mem_write_pc);
652 }
653 }
654 if (current_tb == tb &&
655 !(current_tb->cflags & CF_SINGLE_INSN)) {
656 /* If we are modifying the current TB, we must stop
657 its execution. We could be more precise by checking
658 that the modification is after the current PC, but it
659 would require a specialized function to partially
660 restore the CPU state */
661
662 current_tb_modified = 1;
663 cpu_restore_state(current_tb, env,
664 env->mem_write_pc, NULL);
665#if defined(TARGET_I386)
666 current_flags = env->hflags;
667 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
668 current_cs_base = (target_ulong)env->segs[R_CS].base;
669 current_pc = current_cs_base + env->eip;
670#else
671#error unsupported CPU
672#endif
673 }
674#endif /* TARGET_HAS_PRECISE_SMC */
6f5a9f7e
FB
675 /* we need to do that to handle the case where a signal
676 occurs while doing tb_phys_invalidate() */
677 saved_tb = NULL;
678 if (env) {
679 saved_tb = env->current_tb;
680 env->current_tb = NULL;
681 }
9fa3e853 682 tb_phys_invalidate(tb, -1);
6f5a9f7e
FB
683 if (env) {
684 env->current_tb = saved_tb;
685 if (env->interrupt_request && env->current_tb)
686 cpu_interrupt(env, env->interrupt_request);
687 }
9fa3e853
FB
688 }
689 tb = tb_next;
690 }
691#if !defined(CONFIG_USER_ONLY)
692 /* if no code remaining, no need to continue to use slow writes */
693 if (!p->first_tb) {
694 invalidate_page_bitmap(p);
d720b93d
FB
695 if (is_cpu_write_access) {
696 tlb_unprotect_code_phys(env, start, env->mem_write_vaddr);
697 }
698 }
699#endif
700#ifdef TARGET_HAS_PRECISE_SMC
701 if (current_tb_modified) {
702 /* we generate a block containing just the instruction
703 modifying the memory. It will ensure that it cannot modify
704 itself */
ea1c1802 705 env->current_tb = NULL;
d720b93d
FB
706 tb_gen_code(env, current_pc, current_cs_base, current_flags,
707 CF_SINGLE_INSN);
708 cpu_resume_from_signal(env, NULL);
9fa3e853 709 }
fd6ce8f6 710#endif
9fa3e853 711}
fd6ce8f6 712
9fa3e853 713/* len must be <= 8 and start must be a multiple of len */
d720b93d 714static inline void tb_invalidate_phys_page_fast(target_ulong start, int len)
9fa3e853
FB
715{
716 PageDesc *p;
717 int offset, b;
59817ccb 718#if 0
a4193c8a
FB
719 if (1) {
720 if (loglevel) {
721 fprintf(logfile, "modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
722 cpu_single_env->mem_write_vaddr, len,
723 cpu_single_env->eip,
724 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
725 }
59817ccb
FB
726 }
727#endif
9fa3e853
FB
728 p = page_find(start >> TARGET_PAGE_BITS);
729 if (!p)
730 return;
731 if (p->code_bitmap) {
732 offset = start & ~TARGET_PAGE_MASK;
733 b = p->code_bitmap[offset >> 3] >> (offset & 7);
734 if (b & ((1 << len) - 1))
735 goto do_invalidate;
736 } else {
737 do_invalidate:
d720b93d 738 tb_invalidate_phys_page_range(start, start + len, 1);
9fa3e853
FB
739 }
740}
741
9fa3e853 742#if !defined(CONFIG_SOFTMMU)
d720b93d
FB
743static void tb_invalidate_phys_page(target_ulong addr,
744 unsigned long pc, void *puc)
9fa3e853 745{
d720b93d
FB
746 int n, current_flags, current_tb_modified;
747 target_ulong current_pc, current_cs_base;
9fa3e853 748 PageDesc *p;
d720b93d
FB
749 TranslationBlock *tb, *current_tb;
750#ifdef TARGET_HAS_PRECISE_SMC
751 CPUState *env = cpu_single_env;
752#endif
9fa3e853
FB
753
754 addr &= TARGET_PAGE_MASK;
755 p = page_find(addr >> TARGET_PAGE_BITS);
756 if (!p)
757 return;
758 tb = p->first_tb;
d720b93d
FB
759 current_tb_modified = 0;
760 current_tb = NULL;
761 current_pc = 0; /* avoid warning */
762 current_cs_base = 0; /* avoid warning */
763 current_flags = 0; /* avoid warning */
764#ifdef TARGET_HAS_PRECISE_SMC
765 if (tb && pc != 0) {
766 current_tb = tb_find_pc(pc);
767 }
768#endif
9fa3e853
FB
769 while (tb != NULL) {
770 n = (long)tb & 3;
771 tb = (TranslationBlock *)((long)tb & ~3);
d720b93d
FB
772#ifdef TARGET_HAS_PRECISE_SMC
773 if (current_tb == tb &&
774 !(current_tb->cflags & CF_SINGLE_INSN)) {
775 /* If we are modifying the current TB, we must stop
776 its execution. We could be more precise by checking
777 that the modification is after the current PC, but it
778 would require a specialized function to partially
779 restore the CPU state */
780
781 current_tb_modified = 1;
782 cpu_restore_state(current_tb, env, pc, puc);
783#if defined(TARGET_I386)
784 current_flags = env->hflags;
785 current_flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
786 current_cs_base = (target_ulong)env->segs[R_CS].base;
787 current_pc = current_cs_base + env->eip;
788#else
789#error unsupported CPU
790#endif
791 }
792#endif /* TARGET_HAS_PRECISE_SMC */
9fa3e853
FB
793 tb_phys_invalidate(tb, addr);
794 tb = tb->page_next[n];
795 }
fd6ce8f6 796 p->first_tb = NULL;
d720b93d
FB
797#ifdef TARGET_HAS_PRECISE_SMC
798 if (current_tb_modified) {
799 /* we generate a block containing just the instruction
800 modifying the memory. It will ensure that it cannot modify
801 itself */
ea1c1802 802 env->current_tb = NULL;
d720b93d
FB
803 tb_gen_code(env, current_pc, current_cs_base, current_flags,
804 CF_SINGLE_INSN);
805 cpu_resume_from_signal(env, puc);
806 }
807#endif
fd6ce8f6 808}
9fa3e853 809#endif
fd6ce8f6
FB
810
811/* add the tb in the target page and protect it if necessary */
9fa3e853
FB
812static inline void tb_alloc_page(TranslationBlock *tb,
813 unsigned int n, unsigned int page_addr)
fd6ce8f6
FB
814{
815 PageDesc *p;
9fa3e853
FB
816 TranslationBlock *last_first_tb;
817
818 tb->page_addr[n] = page_addr;
3a7d929e 819 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS);
9fa3e853
FB
820 tb->page_next[n] = p->first_tb;
821 last_first_tb = p->first_tb;
822 p->first_tb = (TranslationBlock *)((long)tb | n);
823 invalidate_page_bitmap(p);
fd6ce8f6 824
107db443 825#if defined(TARGET_HAS_SMC) || 1
d720b93d 826
9fa3e853 827#if defined(CONFIG_USER_ONLY)
fd6ce8f6 828 if (p->flags & PAGE_WRITE) {
9fa3e853
FB
829 unsigned long host_start, host_end, addr;
830 int prot;
831
fd6ce8f6
FB
832 /* force the host page as non writable (writes will have a
833 page fault + mprotect overhead) */
83fb7adf
FB
834 host_start = page_addr & qemu_host_page_mask;
835 host_end = host_start + qemu_host_page_size;
fd6ce8f6
FB
836 prot = 0;
837 for(addr = host_start; addr < host_end; addr += TARGET_PAGE_SIZE)
838 prot |= page_get_flags(addr);
83fb7adf 839 mprotect((void *)host_start, qemu_host_page_size,
fd6ce8f6
FB
840 (prot & PAGE_BITS) & ~PAGE_WRITE);
841#ifdef DEBUG_TB_INVALIDATE
842 printf("protecting code page: 0x%08lx\n",
843 host_start);
844#endif
845 p->flags &= ~PAGE_WRITE;
fd6ce8f6 846 }
9fa3e853
FB
847#else
848 /* if some code is already present, then the pages are already
849 protected. So we handle the case where only the first TB is
850 allocated in a physical page */
851 if (!last_first_tb) {
6a00d601 852 tlb_protect_code(page_addr);
9fa3e853
FB
853 }
854#endif
d720b93d
FB
855
856#endif /* TARGET_HAS_SMC */
fd6ce8f6
FB
857}
858
859/* Allocate a new translation block. Flush the translation buffer if
860 too many translation blocks or too much generated code. */
c27004ec 861TranslationBlock *tb_alloc(target_ulong pc)
fd6ce8f6
FB
862{
863 TranslationBlock *tb;
fd6ce8f6
FB
864
865 if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
866 (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
d4e8164f 867 return NULL;
fd6ce8f6
FB
868 tb = &tbs[nb_tbs++];
869 tb->pc = pc;
b448f2f3 870 tb->cflags = 0;
d4e8164f
FB
871 return tb;
872}
873
9fa3e853
FB
874/* add a new TB and link it to the physical page tables. phys_page2 is
875 (-1) to indicate that only one page contains the TB. */
876void tb_link_phys(TranslationBlock *tb,
877 target_ulong phys_pc, target_ulong phys_page2)
d4e8164f 878{
9fa3e853
FB
879 unsigned int h;
880 TranslationBlock **ptb;
881
882 /* add in the physical hash table */
883 h = tb_phys_hash_func(phys_pc);
884 ptb = &tb_phys_hash[h];
885 tb->phys_hash_next = *ptb;
886 *ptb = tb;
fd6ce8f6
FB
887
888 /* add in the page list */
9fa3e853
FB
889 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
890 if (phys_page2 != -1)
891 tb_alloc_page(tb, 1, phys_page2);
892 else
893 tb->page_addr[1] = -1;
9fa3e853 894
d4e8164f
FB
895 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
896 tb->jmp_next[0] = NULL;
897 tb->jmp_next[1] = NULL;
b448f2f3
FB
898#ifdef USE_CODE_COPY
899 tb->cflags &= ~CF_FP_USED;
900 if (tb->cflags & CF_TB_FP_USED)
901 tb->cflags |= CF_FP_USED;
902#endif
d4e8164f
FB
903
904 /* init original jump addresses */
905 if (tb->tb_next_offset[0] != 0xffff)
906 tb_reset_jump(tb, 0);
907 if (tb->tb_next_offset[1] != 0xffff)
908 tb_reset_jump(tb, 1);
8a40a180
FB
909
910#ifdef DEBUG_TB_CHECK
911 tb_page_check();
912#endif
fd6ce8f6
FB
913}
914
9fa3e853
FB
915/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
916 tb[1].tc_ptr. Return NULL if not found */
917TranslationBlock *tb_find_pc(unsigned long tc_ptr)
fd6ce8f6 918{
9fa3e853
FB
919 int m_min, m_max, m;
920 unsigned long v;
921 TranslationBlock *tb;
a513fe19
FB
922
923 if (nb_tbs <= 0)
924 return NULL;
925 if (tc_ptr < (unsigned long)code_gen_buffer ||
926 tc_ptr >= (unsigned long)code_gen_ptr)
927 return NULL;
928 /* binary search (cf Knuth) */
929 m_min = 0;
930 m_max = nb_tbs - 1;
931 while (m_min <= m_max) {
932 m = (m_min + m_max) >> 1;
933 tb = &tbs[m];
934 v = (unsigned long)tb->tc_ptr;
935 if (v == tc_ptr)
936 return tb;
937 else if (tc_ptr < v) {
938 m_max = m - 1;
939 } else {
940 m_min = m + 1;
941 }
942 }
943 return &tbs[m_max];
944}
7501267e 945
ea041c0e
FB
946static void tb_reset_jump_recursive(TranslationBlock *tb);
947
948static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
949{
950 TranslationBlock *tb1, *tb_next, **ptb;
951 unsigned int n1;
952
953 tb1 = tb->jmp_next[n];
954 if (tb1 != NULL) {
955 /* find head of list */
956 for(;;) {
957 n1 = (long)tb1 & 3;
958 tb1 = (TranslationBlock *)((long)tb1 & ~3);
959 if (n1 == 2)
960 break;
961 tb1 = tb1->jmp_next[n1];
962 }
963 /* we are now sure now that tb jumps to tb1 */
964 tb_next = tb1;
965
966 /* remove tb from the jmp_first list */
967 ptb = &tb_next->jmp_first;
968 for(;;) {
969 tb1 = *ptb;
970 n1 = (long)tb1 & 3;
971 tb1 = (TranslationBlock *)((long)tb1 & ~3);
972 if (n1 == n && tb1 == tb)
973 break;
974 ptb = &tb1->jmp_next[n1];
975 }
976 *ptb = tb->jmp_next[n];
977 tb->jmp_next[n] = NULL;
978
979 /* suppress the jump to next tb in generated code */
980 tb_reset_jump(tb, n);
981
0124311e 982 /* suppress jumps in the tb on which we could have jumped */
ea041c0e
FB
983 tb_reset_jump_recursive(tb_next);
984 }
985}
986
987static void tb_reset_jump_recursive(TranslationBlock *tb)
988{
989 tb_reset_jump_recursive2(tb, 0);
990 tb_reset_jump_recursive2(tb, 1);
991}
992
1fddef4b 993#if defined(TARGET_HAS_ICE)
d720b93d
FB
994static void breakpoint_invalidate(CPUState *env, target_ulong pc)
995{
996 target_ulong phys_addr;
997
998 phys_addr = cpu_get_phys_page_debug(env, pc);
999 tb_invalidate_phys_page_range(phys_addr, phys_addr + 1, 0);
1000}
c27004ec 1001#endif
d720b93d 1002
c33a346e
FB
1003/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
1004 breakpoint is reached */
2e12669a 1005int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
4c3a88a2 1006{
1fddef4b 1007#if defined(TARGET_HAS_ICE)
4c3a88a2 1008 int i;
d720b93d 1009
4c3a88a2
FB
1010 for(i = 0; i < env->nb_breakpoints; i++) {
1011 if (env->breakpoints[i] == pc)
1012 return 0;
1013 }
1014
1015 if (env->nb_breakpoints >= MAX_BREAKPOINTS)
1016 return -1;
1017 env->breakpoints[env->nb_breakpoints++] = pc;
d720b93d
FB
1018
1019 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1020 return 0;
1021#else
1022 return -1;
1023#endif
1024}
1025
1026/* remove a breakpoint */
2e12669a 1027int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
4c3a88a2 1028{
1fddef4b 1029#if defined(TARGET_HAS_ICE)
4c3a88a2
FB
1030 int i;
1031 for(i = 0; i < env->nb_breakpoints; i++) {
1032 if (env->breakpoints[i] == pc)
1033 goto found;
1034 }
1035 return -1;
1036 found:
4c3a88a2 1037 env->nb_breakpoints--;
1fddef4b
FB
1038 if (i < env->nb_breakpoints)
1039 env->breakpoints[i] = env->breakpoints[env->nb_breakpoints];
d720b93d
FB
1040
1041 breakpoint_invalidate(env, pc);
4c3a88a2
FB
1042 return 0;
1043#else
1044 return -1;
1045#endif
1046}
1047
c33a346e
FB
1048/* enable or disable single step mode. EXCP_DEBUG is returned by the
1049 CPU loop after each instruction */
1050void cpu_single_step(CPUState *env, int enabled)
1051{
1fddef4b 1052#if defined(TARGET_HAS_ICE)
c33a346e
FB
1053 if (env->singlestep_enabled != enabled) {
1054 env->singlestep_enabled = enabled;
1055 /* must flush all the translated code to avoid inconsistancies */
9fa3e853 1056 /* XXX: only flush what is necessary */
0124311e 1057 tb_flush(env);
c33a346e
FB
1058 }
1059#endif
1060}
1061
34865134
FB
1062/* enable or disable low levels log */
1063void cpu_set_log(int log_flags)
1064{
1065 loglevel = log_flags;
1066 if (loglevel && !logfile) {
1067 logfile = fopen(logfilename, "w");
1068 if (!logfile) {
1069 perror(logfilename);
1070 _exit(1);
1071 }
9fa3e853
FB
1072#if !defined(CONFIG_SOFTMMU)
1073 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1074 {
1075 static uint8_t logfile_buf[4096];
1076 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1077 }
1078#else
34865134 1079 setvbuf(logfile, NULL, _IOLBF, 0);
9fa3e853 1080#endif
34865134
FB
1081 }
1082}
1083
1084void cpu_set_log_filename(const char *filename)
1085{
1086 logfilename = strdup(filename);
1087}
c33a346e 1088
0124311e 1089/* mask must never be zero, except for A20 change call */
68a79315 1090void cpu_interrupt(CPUState *env, int mask)
ea041c0e
FB
1091{
1092 TranslationBlock *tb;
ee8b7021 1093 static int interrupt_lock;
59817ccb 1094
68a79315 1095 env->interrupt_request |= mask;
ea041c0e
FB
1096 /* if the cpu is currently executing code, we must unlink it and
1097 all the potentially executing TB */
1098 tb = env->current_tb;
ee8b7021
FB
1099 if (tb && !testandset(&interrupt_lock)) {
1100 env->current_tb = NULL;
ea041c0e 1101 tb_reset_jump_recursive(tb);
ee8b7021 1102 interrupt_lock = 0;
ea041c0e
FB
1103 }
1104}
1105
b54ad049
FB
1106void cpu_reset_interrupt(CPUState *env, int mask)
1107{
1108 env->interrupt_request &= ~mask;
1109}
1110
f193c797
FB
1111CPULogItem cpu_log_items[] = {
1112 { CPU_LOG_TB_OUT_ASM, "out_asm",
1113 "show generated host assembly code for each compiled TB" },
1114 { CPU_LOG_TB_IN_ASM, "in_asm",
1115 "show target assembly code for each compiled TB" },
1116 { CPU_LOG_TB_OP, "op",
1117 "show micro ops for each compiled TB (only usable if 'in_asm' used)" },
1118#ifdef TARGET_I386
1119 { CPU_LOG_TB_OP_OPT, "op_opt",
1120 "show micro ops after optimization for each compiled TB" },
1121#endif
1122 { CPU_LOG_INT, "int",
1123 "show interrupts/exceptions in short format" },
1124 { CPU_LOG_EXEC, "exec",
1125 "show trace before each executed TB (lots of logs)" },
9fddaa0c
FB
1126 { CPU_LOG_TB_CPU, "cpu",
1127 "show CPU state before bloc translation" },
f193c797
FB
1128#ifdef TARGET_I386
1129 { CPU_LOG_PCALL, "pcall",
1130 "show protected mode far calls/returns/exceptions" },
1131#endif
8e3a9fd2 1132#ifdef DEBUG_IOPORT
fd872598
FB
1133 { CPU_LOG_IOPORT, "ioport",
1134 "show all i/o ports accesses" },
8e3a9fd2 1135#endif
f193c797
FB
1136 { 0, NULL, NULL },
1137};
1138
1139static int cmp1(const char *s1, int n, const char *s2)
1140{
1141 if (strlen(s2) != n)
1142 return 0;
1143 return memcmp(s1, s2, n) == 0;
1144}
1145
1146/* takes a comma separated list of log masks. Return 0 if error. */
1147int cpu_str_to_log_mask(const char *str)
1148{
1149 CPULogItem *item;
1150 int mask;
1151 const char *p, *p1;
1152
1153 p = str;
1154 mask = 0;
1155 for(;;) {
1156 p1 = strchr(p, ',');
1157 if (!p1)
1158 p1 = p + strlen(p);
8e3a9fd2
FB
1159 if(cmp1(p,p1-p,"all")) {
1160 for(item = cpu_log_items; item->mask != 0; item++) {
1161 mask |= item->mask;
1162 }
1163 } else {
f193c797
FB
1164 for(item = cpu_log_items; item->mask != 0; item++) {
1165 if (cmp1(p, p1 - p, item->name))
1166 goto found;
1167 }
1168 return 0;
8e3a9fd2 1169 }
f193c797
FB
1170 found:
1171 mask |= item->mask;
1172 if (*p1 != ',')
1173 break;
1174 p = p1 + 1;
1175 }
1176 return mask;
1177}
ea041c0e 1178
7501267e
FB
1179void cpu_abort(CPUState *env, const char *fmt, ...)
1180{
1181 va_list ap;
1182
1183 va_start(ap, fmt);
1184 fprintf(stderr, "qemu: fatal: ");
1185 vfprintf(stderr, fmt, ap);
1186 fprintf(stderr, "\n");
1187#ifdef TARGET_I386
7fe48483
FB
1188 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1189#else
1190 cpu_dump_state(env, stderr, fprintf, 0);
7501267e
FB
1191#endif
1192 va_end(ap);
1193 abort();
1194}
1195
0124311e
FB
1196#if !defined(CONFIG_USER_ONLY)
1197
ee8b7021
FB
1198/* NOTE: if flush_global is true, also flush global entries (not
1199 implemented yet) */
1200void tlb_flush(CPUState *env, int flush_global)
33417e70 1201{
33417e70 1202 int i;
0124311e 1203
9fa3e853
FB
1204#if defined(DEBUG_TLB)
1205 printf("tlb_flush:\n");
1206#endif
0124311e
FB
1207 /* must reset current TB so that interrupts cannot modify the
1208 links while we are modifying them */
1209 env->current_tb = NULL;
1210
33417e70
FB
1211 for(i = 0; i < CPU_TLB_SIZE; i++) {
1212 env->tlb_read[0][i].address = -1;
1213 env->tlb_write[0][i].address = -1;
1214 env->tlb_read[1][i].address = -1;
1215 env->tlb_write[1][i].address = -1;
1216 }
9fa3e853 1217
8a40a180 1218 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
9fa3e853
FB
1219
1220#if !defined(CONFIG_SOFTMMU)
1221 munmap((void *)MMAP_AREA_START, MMAP_AREA_END - MMAP_AREA_START);
0a962c02
FB
1222#endif
1223#ifdef USE_KQEMU
1224 if (env->kqemu_enabled) {
1225 kqemu_flush(env, flush_global);
1226 }
9fa3e853 1227#endif
e3db7226 1228 tlb_flush_count++;
33417e70
FB
1229}
1230
274da6b2 1231static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
61382a50
FB
1232{
1233 if (addr == (tlb_entry->address &
1234 (TARGET_PAGE_MASK | TLB_INVALID_MASK)))
1235 tlb_entry->address = -1;
1236}
1237
2e12669a 1238void tlb_flush_page(CPUState *env, target_ulong addr)
33417e70 1239{
8a40a180 1240 int i;
9fa3e853 1241 TranslationBlock *tb;
0124311e 1242
9fa3e853 1243#if defined(DEBUG_TLB)
108c49b8 1244 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
9fa3e853 1245#endif
0124311e
FB
1246 /* must reset current TB so that interrupts cannot modify the
1247 links while we are modifying them */
1248 env->current_tb = NULL;
61382a50
FB
1249
1250 addr &= TARGET_PAGE_MASK;
1251 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1252 tlb_flush_entry(&env->tlb_read[0][i], addr);
1253 tlb_flush_entry(&env->tlb_write[0][i], addr);
1254 tlb_flush_entry(&env->tlb_read[1][i], addr);
1255 tlb_flush_entry(&env->tlb_write[1][i], addr);
0124311e 1256
8a40a180
FB
1257 for(i = 0; i < TB_JMP_CACHE_SIZE; i++) {
1258 tb = env->tb_jmp_cache[i];
1259 if (tb &&
1260 ((tb->pc & TARGET_PAGE_MASK) == addr ||
1261 ((tb->pc + tb->size - 1) & TARGET_PAGE_MASK) == addr)) {
1262 env->tb_jmp_cache[i] = NULL;
9fa3e853
FB
1263 }
1264 }
1265
0124311e 1266#if !defined(CONFIG_SOFTMMU)
9fa3e853 1267 if (addr < MMAP_AREA_END)
0124311e 1268 munmap((void *)addr, TARGET_PAGE_SIZE);
61382a50 1269#endif
0a962c02
FB
1270#ifdef USE_KQEMU
1271 if (env->kqemu_enabled) {
1272 kqemu_flush_page(env, addr);
1273 }
1274#endif
9fa3e853
FB
1275}
1276
9fa3e853
FB
1277/* update the TLBs so that writes to code in the virtual page 'addr'
1278 can be detected */
6a00d601 1279static void tlb_protect_code(ram_addr_t ram_addr)
9fa3e853 1280{
6a00d601
FB
1281 cpu_physical_memory_reset_dirty(ram_addr,
1282 ram_addr + TARGET_PAGE_SIZE,
1283 CODE_DIRTY_FLAG);
9fa3e853
FB
1284}
1285
9fa3e853 1286/* update the TLB so that writes in physical page 'phys_addr' are no longer
3a7d929e
FB
1287 tested for self modifying code */
1288static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
1289 target_ulong vaddr)
9fa3e853 1290{
3a7d929e 1291 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
1ccde1cb
FB
1292}
1293
1294static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
1295 unsigned long start, unsigned long length)
1296{
1297 unsigned long addr;
1298 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1299 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1300 if ((addr - start) < length) {
1301 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_NOTDIRTY;
1302 }
1303 }
1304}
1305
3a7d929e 1306void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 1307 int dirty_flags)
1ccde1cb
FB
1308{
1309 CPUState *env;
4f2ac237 1310 unsigned long length, start1;
0a962c02
FB
1311 int i, mask, len;
1312 uint8_t *p;
1ccde1cb
FB
1313
1314 start &= TARGET_PAGE_MASK;
1315 end = TARGET_PAGE_ALIGN(end);
1316
1317 length = end - start;
1318 if (length == 0)
1319 return;
0a962c02 1320 len = length >> TARGET_PAGE_BITS;
3a7d929e 1321#ifdef USE_KQEMU
6a00d601
FB
1322 /* XXX: should not depend on cpu context */
1323 env = first_cpu;
3a7d929e 1324 if (env->kqemu_enabled) {
f23db169
FB
1325 ram_addr_t addr;
1326 addr = start;
1327 for(i = 0; i < len; i++) {
1328 kqemu_set_notdirty(env, addr);
1329 addr += TARGET_PAGE_SIZE;
1330 }
3a7d929e
FB
1331 }
1332#endif
f23db169
FB
1333 mask = ~dirty_flags;
1334 p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
1335 for(i = 0; i < len; i++)
1336 p[i] &= mask;
1337
1ccde1cb
FB
1338 /* we modify the TLB cache so that the dirty bit will be set again
1339 when accessing the range */
59817ccb 1340 start1 = start + (unsigned long)phys_ram_base;
6a00d601
FB
1341 for(env = first_cpu; env != NULL; env = env->next_cpu) {
1342 for(i = 0; i < CPU_TLB_SIZE; i++)
1343 tlb_reset_dirty_range(&env->tlb_write[0][i], start1, length);
1344 for(i = 0; i < CPU_TLB_SIZE; i++)
1345 tlb_reset_dirty_range(&env->tlb_write[1][i], start1, length);
1346 }
59817ccb
FB
1347
1348#if !defined(CONFIG_SOFTMMU)
1349 /* XXX: this is expensive */
1350 {
1351 VirtPageDesc *p;
1352 int j;
1353 target_ulong addr;
1354
1355 for(i = 0; i < L1_SIZE; i++) {
1356 p = l1_virt_map[i];
1357 if (p) {
1358 addr = i << (TARGET_PAGE_BITS + L2_BITS);
1359 for(j = 0; j < L2_SIZE; j++) {
1360 if (p->valid_tag == virt_valid_tag &&
1361 p->phys_addr >= start && p->phys_addr < end &&
1362 (p->prot & PROT_WRITE)) {
1363 if (addr < MMAP_AREA_END) {
1364 mprotect((void *)addr, TARGET_PAGE_SIZE,
1365 p->prot & ~PROT_WRITE);
1366 }
1367 }
1368 addr += TARGET_PAGE_SIZE;
1369 p++;
1370 }
1371 }
1372 }
1373 }
1374#endif
1ccde1cb
FB
1375}
1376
3a7d929e
FB
1377static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
1378{
1379 ram_addr_t ram_addr;
1380
1381 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
1382 ram_addr = (tlb_entry->address & TARGET_PAGE_MASK) +
1383 tlb_entry->addend - (unsigned long)phys_ram_base;
1384 if (!cpu_physical_memory_is_dirty(ram_addr)) {
1385 tlb_entry->address |= IO_MEM_NOTDIRTY;
1386 }
1387 }
1388}
1389
1390/* update the TLB according to the current state of the dirty bits */
1391void cpu_tlb_update_dirty(CPUState *env)
1392{
1393 int i;
1394 for(i = 0; i < CPU_TLB_SIZE; i++)
1395 tlb_update_dirty(&env->tlb_write[0][i]);
1396 for(i = 0; i < CPU_TLB_SIZE; i++)
1397 tlb_update_dirty(&env->tlb_write[1][i]);
1398}
1399
1ccde1cb 1400static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry,
108c49b8 1401 unsigned long start)
1ccde1cb
FB
1402{
1403 unsigned long addr;
1404 if ((tlb_entry->address & ~TARGET_PAGE_MASK) == IO_MEM_NOTDIRTY) {
1405 addr = (tlb_entry->address & TARGET_PAGE_MASK) + tlb_entry->addend;
1406 if (addr == start) {
1407 tlb_entry->address = (tlb_entry->address & TARGET_PAGE_MASK) | IO_MEM_RAM;
1408 }
1409 }
1410}
1411
1412/* update the TLB corresponding to virtual page vaddr and phys addr
1413 addr so that it is no longer dirty */
6a00d601
FB
1414static inline void tlb_set_dirty(CPUState *env,
1415 unsigned long addr, target_ulong vaddr)
1ccde1cb 1416{
1ccde1cb
FB
1417 int i;
1418
1ccde1cb
FB
1419 addr &= TARGET_PAGE_MASK;
1420 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1421 tlb_set_dirty1(&env->tlb_write[0][i], addr);
1422 tlb_set_dirty1(&env->tlb_write[1][i], addr);
9fa3e853
FB
1423}
1424
59817ccb
FB
1425/* add a new TLB entry. At most one entry for a given virtual address
1426 is permitted. Return 0 if OK or 2 if the page could not be mapped
1427 (can only happen in non SOFTMMU mode for I/O pages or pages
1428 conflicting with the host address space). */
2e12669a
FB
1429int tlb_set_page(CPUState *env, target_ulong vaddr,
1430 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1431 int is_user, int is_softmmu)
1432{
92e873b9 1433 PhysPageDesc *p;
4f2ac237 1434 unsigned long pd;
9fa3e853 1435 unsigned int index;
4f2ac237 1436 target_ulong address;
108c49b8 1437 target_phys_addr_t addend;
9fa3e853
FB
1438 int ret;
1439
92e873b9 1440 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
9fa3e853
FB
1441 if (!p) {
1442 pd = IO_MEM_UNASSIGNED;
9fa3e853
FB
1443 } else {
1444 pd = p->phys_offset;
9fa3e853
FB
1445 }
1446#if defined(DEBUG_TLB)
3a7d929e
FB
1447 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x%08x prot=%x u=%d smmu=%d pd=0x%08lx\n",
1448 vaddr, paddr, prot, is_user, is_softmmu, pd);
9fa3e853
FB
1449#endif
1450
1451 ret = 0;
1452#if !defined(CONFIG_SOFTMMU)
1453 if (is_softmmu)
1454#endif
1455 {
1456 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1457 /* IO memory case */
1458 address = vaddr | pd;
1459 addend = paddr;
1460 } else {
1461 /* standard memory */
1462 address = vaddr;
1463 addend = (unsigned long)phys_ram_base + (pd & TARGET_PAGE_MASK);
1464 }
1465
90f18422 1466 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
9fa3e853 1467 addend -= vaddr;
67b915a5 1468 if (prot & PAGE_READ) {
9fa3e853
FB
1469 env->tlb_read[is_user][index].address = address;
1470 env->tlb_read[is_user][index].addend = addend;
1471 } else {
1472 env->tlb_read[is_user][index].address = -1;
1473 env->tlb_read[is_user][index].addend = -1;
1474 }
67b915a5 1475 if (prot & PAGE_WRITE) {
9fa3e853
FB
1476 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM) {
1477 /* ROM: access is ignored (same as unassigned) */
1478 env->tlb_write[is_user][index].address = vaddr | IO_MEM_ROM;
1ccde1cb 1479 env->tlb_write[is_user][index].addend = addend;
3a7d929e 1480 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1ccde1cb
FB
1481 !cpu_physical_memory_is_dirty(pd)) {
1482 env->tlb_write[is_user][index].address = vaddr | IO_MEM_NOTDIRTY;
1483 env->tlb_write[is_user][index].addend = addend;
9fa3e853
FB
1484 } else {
1485 env->tlb_write[is_user][index].address = address;
1486 env->tlb_write[is_user][index].addend = addend;
1487 }
1488 } else {
1489 env->tlb_write[is_user][index].address = -1;
1490 env->tlb_write[is_user][index].addend = -1;
1491 }
1492 }
1493#if !defined(CONFIG_SOFTMMU)
1494 else {
1495 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
1496 /* IO access: no mapping is done as it will be handled by the
1497 soft MMU */
1498 if (!(env->hflags & HF_SOFTMMU_MASK))
1499 ret = 2;
1500 } else {
1501 void *map_addr;
59817ccb
FB
1502
1503 if (vaddr >= MMAP_AREA_END) {
1504 ret = 2;
1505 } else {
1506 if (prot & PROT_WRITE) {
1507 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
d720b93d 1508#if defined(TARGET_HAS_SMC) || 1
59817ccb 1509 first_tb ||
d720b93d 1510#endif
59817ccb
FB
1511 ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
1512 !cpu_physical_memory_is_dirty(pd))) {
1513 /* ROM: we do as if code was inside */
1514 /* if code is present, we only map as read only and save the
1515 original mapping */
1516 VirtPageDesc *vp;
1517
90f18422 1518 vp = virt_page_find_alloc(vaddr >> TARGET_PAGE_BITS, 1);
59817ccb
FB
1519 vp->phys_addr = pd;
1520 vp->prot = prot;
1521 vp->valid_tag = virt_valid_tag;
1522 prot &= ~PAGE_WRITE;
1523 }
1524 }
1525 map_addr = mmap((void *)vaddr, TARGET_PAGE_SIZE, prot,
1526 MAP_SHARED | MAP_FIXED, phys_ram_fd, (pd & TARGET_PAGE_MASK));
1527 if (map_addr == MAP_FAILED) {
1528 cpu_abort(env, "mmap failed when mapped physical address 0x%08x to virtual address 0x%08x\n",
1529 paddr, vaddr);
9fa3e853 1530 }
9fa3e853
FB
1531 }
1532 }
1533 }
1534#endif
1535 return ret;
1536}
1537
1538/* called from signal handler: invalidate the code and unprotect the
1539 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1540int page_unprotect(unsigned long addr, unsigned long pc, void *puc)
9fa3e853
FB
1541{
1542#if !defined(CONFIG_SOFTMMU)
1543 VirtPageDesc *vp;
1544
1545#if defined(DEBUG_TLB)
1546 printf("page_unprotect: addr=0x%08x\n", addr);
1547#endif
1548 addr &= TARGET_PAGE_MASK;
59817ccb
FB
1549
1550 /* if it is not mapped, no need to worry here */
1551 if (addr >= MMAP_AREA_END)
1552 return 0;
9fa3e853
FB
1553 vp = virt_page_find(addr >> TARGET_PAGE_BITS);
1554 if (!vp)
1555 return 0;
1556 /* NOTE: in this case, validate_tag is _not_ tested as it
1557 validates only the code TLB */
1558 if (vp->valid_tag != virt_valid_tag)
1559 return 0;
1560 if (!(vp->prot & PAGE_WRITE))
1561 return 0;
1562#if defined(DEBUG_TLB)
1563 printf("page_unprotect: addr=0x%08x phys_addr=0x%08x prot=%x\n",
1564 addr, vp->phys_addr, vp->prot);
1565#endif
59817ccb
FB
1566 if (mprotect((void *)addr, TARGET_PAGE_SIZE, vp->prot) < 0)
1567 cpu_abort(cpu_single_env, "error mprotect addr=0x%lx prot=%d\n",
1568 (unsigned long)addr, vp->prot);
d720b93d 1569 /* set the dirty bit */
0a962c02 1570 phys_ram_dirty[vp->phys_addr >> TARGET_PAGE_BITS] = 0xff;
d720b93d
FB
1571 /* flush the code inside */
1572 tb_invalidate_phys_page(vp->phys_addr, pc, puc);
9fa3e853
FB
1573 return 1;
1574#else
1575 return 0;
1576#endif
33417e70
FB
1577}
1578
0124311e
FB
1579#else
1580
ee8b7021 1581void tlb_flush(CPUState *env, int flush_global)
0124311e
FB
1582{
1583}
1584
2e12669a 1585void tlb_flush_page(CPUState *env, target_ulong addr)
0124311e
FB
1586{
1587}
1588
2e12669a
FB
1589int tlb_set_page(CPUState *env, target_ulong vaddr,
1590 target_phys_addr_t paddr, int prot,
9fa3e853
FB
1591 int is_user, int is_softmmu)
1592{
1593 return 0;
1594}
0124311e 1595
9fa3e853
FB
1596/* dump memory mappings */
1597void page_dump(FILE *f)
33417e70 1598{
9fa3e853
FB
1599 unsigned long start, end;
1600 int i, j, prot, prot1;
1601 PageDesc *p;
33417e70 1602
9fa3e853
FB
1603 fprintf(f, "%-8s %-8s %-8s %s\n",
1604 "start", "end", "size", "prot");
1605 start = -1;
1606 end = -1;
1607 prot = 0;
1608 for(i = 0; i <= L1_SIZE; i++) {
1609 if (i < L1_SIZE)
1610 p = l1_map[i];
1611 else
1612 p = NULL;
1613 for(j = 0;j < L2_SIZE; j++) {
1614 if (!p)
1615 prot1 = 0;
1616 else
1617 prot1 = p[j].flags;
1618 if (prot1 != prot) {
1619 end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
1620 if (start != -1) {
1621 fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
1622 start, end, end - start,
1623 prot & PAGE_READ ? 'r' : '-',
1624 prot & PAGE_WRITE ? 'w' : '-',
1625 prot & PAGE_EXEC ? 'x' : '-');
1626 }
1627 if (prot1 != 0)
1628 start = end;
1629 else
1630 start = -1;
1631 prot = prot1;
1632 }
1633 if (!p)
1634 break;
1635 }
33417e70 1636 }
33417e70
FB
1637}
1638
9fa3e853 1639int page_get_flags(unsigned long address)
33417e70 1640{
9fa3e853
FB
1641 PageDesc *p;
1642
1643 p = page_find(address >> TARGET_PAGE_BITS);
33417e70 1644 if (!p)
9fa3e853
FB
1645 return 0;
1646 return p->flags;
1647}
1648
1649/* modify the flags of a page and invalidate the code if
1650 necessary. The flag PAGE_WRITE_ORG is positionned automatically
1651 depending on PAGE_WRITE */
1652void page_set_flags(unsigned long start, unsigned long end, int flags)
1653{
1654 PageDesc *p;
1655 unsigned long addr;
1656
1657 start = start & TARGET_PAGE_MASK;
1658 end = TARGET_PAGE_ALIGN(end);
1659 if (flags & PAGE_WRITE)
1660 flags |= PAGE_WRITE_ORG;
1661 spin_lock(&tb_lock);
1662 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
1663 p = page_find_alloc(addr >> TARGET_PAGE_BITS);
1664 /* if the write protection is set, then we invalidate the code
1665 inside */
1666 if (!(p->flags & PAGE_WRITE) &&
1667 (flags & PAGE_WRITE) &&
1668 p->first_tb) {
d720b93d 1669 tb_invalidate_phys_page(addr, 0, NULL);
9fa3e853
FB
1670 }
1671 p->flags = flags;
1672 }
1673 spin_unlock(&tb_lock);
33417e70
FB
1674}
1675
9fa3e853
FB
1676/* called from signal handler: invalidate the code and unprotect the
1677 page. Return TRUE if the fault was succesfully handled. */
d720b93d 1678int page_unprotect(unsigned long address, unsigned long pc, void *puc)
9fa3e853
FB
1679{
1680 unsigned int page_index, prot, pindex;
1681 PageDesc *p, *p1;
1682 unsigned long host_start, host_end, addr;
1683
83fb7adf 1684 host_start = address & qemu_host_page_mask;
9fa3e853
FB
1685 page_index = host_start >> TARGET_PAGE_BITS;
1686 p1 = page_find(page_index);
1687 if (!p1)
1688 return 0;
83fb7adf 1689 host_end = host_start + qemu_host_page_size;
9fa3e853
FB
1690 p = p1;
1691 prot = 0;
1692 for(addr = host_start;addr < host_end; addr += TARGET_PAGE_SIZE) {
1693 prot |= p->flags;
1694 p++;
1695 }
1696 /* if the page was really writable, then we change its
1697 protection back to writable */
1698 if (prot & PAGE_WRITE_ORG) {
1699 pindex = (address - host_start) >> TARGET_PAGE_BITS;
1700 if (!(p1[pindex].flags & PAGE_WRITE)) {
83fb7adf 1701 mprotect((void *)host_start, qemu_host_page_size,
9fa3e853
FB
1702 (prot & PAGE_BITS) | PAGE_WRITE);
1703 p1[pindex].flags |= PAGE_WRITE;
1704 /* and since the content will be modified, we must invalidate
1705 the corresponding translated code. */
d720b93d 1706 tb_invalidate_phys_page(address, pc, puc);
9fa3e853
FB
1707#ifdef DEBUG_TB_CHECK
1708 tb_invalidate_check(address);
1709#endif
1710 return 1;
1711 }
1712 }
1713 return 0;
1714}
1715
1716/* call this function when system calls directly modify a memory area */
1717void page_unprotect_range(uint8_t *data, unsigned long data_size)
1718{
1719 unsigned long start, end, addr;
1720
1721 start = (unsigned long)data;
1722 end = start + data_size;
1723 start &= TARGET_PAGE_MASK;
1724 end = TARGET_PAGE_ALIGN(end);
1725 for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
d720b93d 1726 page_unprotect(addr, 0, NULL);
9fa3e853
FB
1727 }
1728}
1729
6a00d601
FB
1730static inline void tlb_set_dirty(CPUState *env,
1731 unsigned long addr, target_ulong vaddr)
1ccde1cb
FB
1732{
1733}
9fa3e853
FB
1734#endif /* defined(CONFIG_USER_ONLY) */
1735
33417e70
FB
1736/* register physical memory. 'size' must be a multiple of the target
1737 page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
1738 io memory page */
2e12669a
FB
1739void cpu_register_physical_memory(target_phys_addr_t start_addr,
1740 unsigned long size,
1741 unsigned long phys_offset)
33417e70 1742{
108c49b8 1743 target_phys_addr_t addr, end_addr;
92e873b9 1744 PhysPageDesc *p;
33417e70 1745
5fd386f6 1746 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
33417e70 1747 end_addr = start_addr + size;
5fd386f6 1748 for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
108c49b8 1749 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
9fa3e853
FB
1750 p->phys_offset = phys_offset;
1751 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM)
33417e70
FB
1752 phys_offset += TARGET_PAGE_SIZE;
1753 }
1754}
1755
a4193c8a 1756static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
33417e70
FB
1757{
1758 return 0;
1759}
1760
a4193c8a 1761static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
33417e70
FB
1762{
1763}
1764
1765static CPUReadMemoryFunc *unassigned_mem_read[3] = {
1766 unassigned_mem_readb,
1767 unassigned_mem_readb,
1768 unassigned_mem_readb,
1769};
1770
1771static CPUWriteMemoryFunc *unassigned_mem_write[3] = {
1772 unassigned_mem_writeb,
1773 unassigned_mem_writeb,
1774 unassigned_mem_writeb,
1775};
1776
3a7d929e 1777static void notdirty_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1778{
3a7d929e
FB
1779 unsigned long ram_addr;
1780 int dirty_flags;
1781 ram_addr = addr - (unsigned long)phys_ram_base;
1782 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1783 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1784#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1785 tb_invalidate_phys_page_fast(ram_addr, 1);
1786 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1787#endif
3a7d929e 1788 }
c27004ec 1789 stb_p((uint8_t *)(long)addr, val);
f23db169
FB
1790 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1791 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1792 /* we remove the notdirty callback only if the code has been
1793 flushed */
1794 if (dirty_flags == 0xff)
6a00d601 1795 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1796}
1797
3a7d929e 1798static void notdirty_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1799{
3a7d929e
FB
1800 unsigned long ram_addr;
1801 int dirty_flags;
1802 ram_addr = addr - (unsigned long)phys_ram_base;
1803 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1804 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1805#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1806 tb_invalidate_phys_page_fast(ram_addr, 2);
1807 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1808#endif
3a7d929e 1809 }
c27004ec 1810 stw_p((uint8_t *)(long)addr, val);
f23db169
FB
1811 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1812 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1813 /* we remove the notdirty callback only if the code has been
1814 flushed */
1815 if (dirty_flags == 0xff)
6a00d601 1816 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1817}
1818
3a7d929e 1819static void notdirty_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
9fa3e853 1820{
3a7d929e
FB
1821 unsigned long ram_addr;
1822 int dirty_flags;
1823 ram_addr = addr - (unsigned long)phys_ram_base;
1824 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
1825 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
9fa3e853 1826#if !defined(CONFIG_USER_ONLY)
3a7d929e
FB
1827 tb_invalidate_phys_page_fast(ram_addr, 4);
1828 dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
9fa3e853 1829#endif
3a7d929e 1830 }
c27004ec 1831 stl_p((uint8_t *)(long)addr, val);
f23db169
FB
1832 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
1833 phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
1834 /* we remove the notdirty callback only if the code has been
1835 flushed */
1836 if (dirty_flags == 0xff)
6a00d601 1837 tlb_set_dirty(cpu_single_env, addr, cpu_single_env->mem_write_vaddr);
9fa3e853
FB
1838}
1839
3a7d929e 1840static CPUReadMemoryFunc *error_mem_read[3] = {
9fa3e853
FB
1841 NULL, /* never used */
1842 NULL, /* never used */
1843 NULL, /* never used */
1844};
1845
1ccde1cb
FB
1846static CPUWriteMemoryFunc *notdirty_mem_write[3] = {
1847 notdirty_mem_writeb,
1848 notdirty_mem_writew,
1849 notdirty_mem_writel,
1850};
1851
33417e70
FB
1852static void io_mem_init(void)
1853{
3a7d929e 1854 cpu_register_io_memory(IO_MEM_ROM >> IO_MEM_SHIFT, error_mem_read, unassigned_mem_write, NULL);
a4193c8a 1855 cpu_register_io_memory(IO_MEM_UNASSIGNED >> IO_MEM_SHIFT, unassigned_mem_read, unassigned_mem_write, NULL);
3a7d929e 1856 cpu_register_io_memory(IO_MEM_NOTDIRTY >> IO_MEM_SHIFT, error_mem_read, notdirty_mem_write, NULL);
1ccde1cb
FB
1857 io_mem_nb = 5;
1858
1859 /* alloc dirty bits array */
0a962c02 1860 phys_ram_dirty = qemu_vmalloc(phys_ram_size >> TARGET_PAGE_BITS);
3a7d929e 1861 memset(phys_ram_dirty, 0xff, phys_ram_size >> TARGET_PAGE_BITS);
33417e70
FB
1862}
1863
1864/* mem_read and mem_write are arrays of functions containing the
1865 function to access byte (index 0), word (index 1) and dword (index
1866 2). All functions must be supplied. If io_index is non zero, the
1867 corresponding io zone is modified. If it is zero, a new io zone is
1868 allocated. The return value can be used with
1869 cpu_register_physical_memory(). (-1) is returned if error. */
1870int cpu_register_io_memory(int io_index,
1871 CPUReadMemoryFunc **mem_read,
a4193c8a
FB
1872 CPUWriteMemoryFunc **mem_write,
1873 void *opaque)
33417e70
FB
1874{
1875 int i;
1876
1877 if (io_index <= 0) {
b5ff1b31 1878 if (io_mem_nb >= IO_MEM_NB_ENTRIES)
33417e70
FB
1879 return -1;
1880 io_index = io_mem_nb++;
1881 } else {
1882 if (io_index >= IO_MEM_NB_ENTRIES)
1883 return -1;
1884 }
b5ff1b31 1885
33417e70
FB
1886 for(i = 0;i < 3; i++) {
1887 io_mem_read[io_index][i] = mem_read[i];
1888 io_mem_write[io_index][i] = mem_write[i];
1889 }
a4193c8a 1890 io_mem_opaque[io_index] = opaque;
33417e70
FB
1891 return io_index << IO_MEM_SHIFT;
1892}
61382a50 1893
8926b517
FB
1894CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index)
1895{
1896 return io_mem_write[io_index >> IO_MEM_SHIFT];
1897}
1898
1899CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index)
1900{
1901 return io_mem_read[io_index >> IO_MEM_SHIFT];
1902}
1903
13eb76e0
FB
1904/* physical memory access (slow version, mainly for debug) */
1905#if defined(CONFIG_USER_ONLY)
2e12669a 1906void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1907 int len, int is_write)
1908{
1909 int l, flags;
1910 target_ulong page;
1911
1912 while (len > 0) {
1913 page = addr & TARGET_PAGE_MASK;
1914 l = (page + TARGET_PAGE_SIZE) - addr;
1915 if (l > len)
1916 l = len;
1917 flags = page_get_flags(page);
1918 if (!(flags & PAGE_VALID))
1919 return;
1920 if (is_write) {
1921 if (!(flags & PAGE_WRITE))
1922 return;
1923 memcpy((uint8_t *)addr, buf, len);
1924 } else {
1925 if (!(flags & PAGE_READ))
1926 return;
1927 memcpy(buf, (uint8_t *)addr, len);
1928 }
1929 len -= l;
1930 buf += l;
1931 addr += l;
1932 }
1933}
8df1cd07 1934
13eb76e0 1935#else
2e12669a 1936void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0
FB
1937 int len, int is_write)
1938{
1939 int l, io_index;
1940 uint8_t *ptr;
1941 uint32_t val;
2e12669a
FB
1942 target_phys_addr_t page;
1943 unsigned long pd;
92e873b9 1944 PhysPageDesc *p;
13eb76e0
FB
1945
1946 while (len > 0) {
1947 page = addr & TARGET_PAGE_MASK;
1948 l = (page + TARGET_PAGE_SIZE) - addr;
1949 if (l > len)
1950 l = len;
92e873b9 1951 p = phys_page_find(page >> TARGET_PAGE_BITS);
13eb76e0
FB
1952 if (!p) {
1953 pd = IO_MEM_UNASSIGNED;
1954 } else {
1955 pd = p->phys_offset;
1956 }
1957
1958 if (is_write) {
3a7d929e 1959 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
13eb76e0 1960 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
6a00d601
FB
1961 /* XXX: could force cpu_single_env to NULL to avoid
1962 potential bugs */
13eb76e0 1963 if (l >= 4 && ((addr & 3) == 0)) {
1c213d19 1964 /* 32 bit write access */
c27004ec 1965 val = ldl_p(buf);
a4193c8a 1966 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1967 l = 4;
1968 } else if (l >= 2 && ((addr & 1) == 0)) {
1c213d19 1969 /* 16 bit write access */
c27004ec 1970 val = lduw_p(buf);
a4193c8a 1971 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1972 l = 2;
1973 } else {
1c213d19 1974 /* 8 bit write access */
c27004ec 1975 val = ldub_p(buf);
a4193c8a 1976 io_mem_write[io_index][0](io_mem_opaque[io_index], addr, val);
13eb76e0
FB
1977 l = 1;
1978 }
1979 } else {
b448f2f3
FB
1980 unsigned long addr1;
1981 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
13eb76e0 1982 /* RAM case */
b448f2f3 1983 ptr = phys_ram_base + addr1;
13eb76e0 1984 memcpy(ptr, buf, l);
3a7d929e
FB
1985 if (!cpu_physical_memory_is_dirty(addr1)) {
1986 /* invalidate code */
1987 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
1988 /* set dirty bit */
f23db169
FB
1989 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
1990 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 1991 }
13eb76e0
FB
1992 }
1993 } else {
3a7d929e 1994 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
13eb76e0
FB
1995 /* I/O case */
1996 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
1997 if (l >= 4 && ((addr & 3) == 0)) {
1998 /* 32 bit read access */
a4193c8a 1999 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
c27004ec 2000 stl_p(buf, val);
13eb76e0
FB
2001 l = 4;
2002 } else if (l >= 2 && ((addr & 1) == 0)) {
2003 /* 16 bit read access */
a4193c8a 2004 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
c27004ec 2005 stw_p(buf, val);
13eb76e0
FB
2006 l = 2;
2007 } else {
1c213d19 2008 /* 8 bit read access */
a4193c8a 2009 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr);
c27004ec 2010 stb_p(buf, val);
13eb76e0
FB
2011 l = 1;
2012 }
2013 } else {
2014 /* RAM case */
2015 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2016 (addr & ~TARGET_PAGE_MASK);
2017 memcpy(buf, ptr, l);
2018 }
2019 }
2020 len -= l;
2021 buf += l;
2022 addr += l;
2023 }
2024}
8df1cd07
FB
2025
2026/* warning: addr must be aligned */
2027uint32_t ldl_phys(target_phys_addr_t addr)
2028{
2029 int io_index;
2030 uint8_t *ptr;
2031 uint32_t val;
2032 unsigned long pd;
2033 PhysPageDesc *p;
2034
2035 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2036 if (!p) {
2037 pd = IO_MEM_UNASSIGNED;
2038 } else {
2039 pd = p->phys_offset;
2040 }
2041
3a7d929e 2042 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM) {
8df1cd07
FB
2043 /* I/O case */
2044 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2045 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
2046 } else {
2047 /* RAM case */
2048 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2049 (addr & ~TARGET_PAGE_MASK);
2050 val = ldl_p(ptr);
2051 }
2052 return val;
2053}
2054
aab33094
FB
2055/* XXX: optimize */
2056uint32_t ldub_phys(target_phys_addr_t addr)
2057{
2058 uint8_t val;
2059 cpu_physical_memory_read(addr, &val, 1);
2060 return val;
2061}
2062
2063/* XXX: optimize */
2064uint32_t lduw_phys(target_phys_addr_t addr)
2065{
2066 uint16_t val;
2067 cpu_physical_memory_read(addr, (uint8_t *)&val, 2);
2068 return tswap16(val);
2069}
2070
2071/* XXX: optimize */
2072uint64_t ldq_phys(target_phys_addr_t addr)
2073{
2074 uint64_t val;
2075 cpu_physical_memory_read(addr, (uint8_t *)&val, 8);
2076 return tswap64(val);
2077}
2078
8df1cd07
FB
2079/* warning: addr must be aligned. The ram page is not masked as dirty
2080 and the code inside is not invalidated. It is useful if the dirty
2081 bits are used to track modified PTEs */
2082void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
2083{
2084 int io_index;
2085 uint8_t *ptr;
2086 unsigned long pd;
2087 PhysPageDesc *p;
2088
2089 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2090 if (!p) {
2091 pd = IO_MEM_UNASSIGNED;
2092 } else {
2093 pd = p->phys_offset;
2094 }
2095
3a7d929e 2096 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2097 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2098 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2099 } else {
2100 ptr = phys_ram_base + (pd & TARGET_PAGE_MASK) +
2101 (addr & ~TARGET_PAGE_MASK);
2102 stl_p(ptr, val);
2103 }
2104}
2105
2106/* warning: addr must be aligned */
8df1cd07
FB
2107void stl_phys(target_phys_addr_t addr, uint32_t val)
2108{
2109 int io_index;
2110 uint8_t *ptr;
2111 unsigned long pd;
2112 PhysPageDesc *p;
2113
2114 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2115 if (!p) {
2116 pd = IO_MEM_UNASSIGNED;
2117 } else {
2118 pd = p->phys_offset;
2119 }
2120
3a7d929e 2121 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
8df1cd07
FB
2122 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
2123 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
2124 } else {
2125 unsigned long addr1;
2126 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
2127 /* RAM case */
2128 ptr = phys_ram_base + addr1;
2129 stl_p(ptr, val);
3a7d929e
FB
2130 if (!cpu_physical_memory_is_dirty(addr1)) {
2131 /* invalidate code */
2132 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
2133 /* set dirty bit */
f23db169
FB
2134 phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
2135 (0xff & ~CODE_DIRTY_FLAG);
3a7d929e 2136 }
8df1cd07
FB
2137 }
2138}
2139
aab33094
FB
2140/* XXX: optimize */
2141void stb_phys(target_phys_addr_t addr, uint32_t val)
2142{
2143 uint8_t v = val;
2144 cpu_physical_memory_write(addr, &v, 1);
2145}
2146
2147/* XXX: optimize */
2148void stw_phys(target_phys_addr_t addr, uint32_t val)
2149{
2150 uint16_t v = tswap16(val);
2151 cpu_physical_memory_write(addr, (const uint8_t *)&v, 2);
2152}
2153
2154/* XXX: optimize */
2155void stq_phys(target_phys_addr_t addr, uint64_t val)
2156{
2157 val = tswap64(val);
2158 cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
2159}
2160
13eb76e0
FB
2161#endif
2162
2163/* virtual memory access for debug */
b448f2f3
FB
2164int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
2165 uint8_t *buf, int len, int is_write)
13eb76e0
FB
2166{
2167 int l;
2168 target_ulong page, phys_addr;
2169
2170 while (len > 0) {
2171 page = addr & TARGET_PAGE_MASK;
2172 phys_addr = cpu_get_phys_page_debug(env, page);
2173 /* if no physical page mapped, return an error */
2174 if (phys_addr == -1)
2175 return -1;
2176 l = (page + TARGET_PAGE_SIZE) - addr;
2177 if (l > len)
2178 l = len;
b448f2f3
FB
2179 cpu_physical_memory_rw(phys_addr + (addr & ~TARGET_PAGE_MASK),
2180 buf, l, is_write);
13eb76e0
FB
2181 len -= l;
2182 buf += l;
2183 addr += l;
2184 }
2185 return 0;
2186}
2187
e3db7226
FB
2188void dump_exec_info(FILE *f,
2189 int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
2190{
2191 int i, target_code_size, max_target_code_size;
2192 int direct_jmp_count, direct_jmp2_count, cross_page;
2193 TranslationBlock *tb;
2194
2195 target_code_size = 0;
2196 max_target_code_size = 0;
2197 cross_page = 0;
2198 direct_jmp_count = 0;
2199 direct_jmp2_count = 0;
2200 for(i = 0; i < nb_tbs; i++) {
2201 tb = &tbs[i];
2202 target_code_size += tb->size;
2203 if (tb->size > max_target_code_size)
2204 max_target_code_size = tb->size;
2205 if (tb->page_addr[1] != -1)
2206 cross_page++;
2207 if (tb->tb_next_offset[0] != 0xffff) {
2208 direct_jmp_count++;
2209 if (tb->tb_next_offset[1] != 0xffff) {
2210 direct_jmp2_count++;
2211 }
2212 }
2213 }
2214 /* XXX: avoid using doubles ? */
2215 cpu_fprintf(f, "TB count %d\n", nb_tbs);
2216 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
2217 nb_tbs ? target_code_size / nb_tbs : 0,
2218 max_target_code_size);
2219 cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
2220 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
2221 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
2222 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
2223 cross_page,
2224 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
2225 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
2226 direct_jmp_count,
2227 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
2228 direct_jmp2_count,
2229 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
2230 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
2231 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
2232 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
2233}
2234
61382a50
FB
2235#if !defined(CONFIG_USER_ONLY)
2236
2237#define MMUSUFFIX _cmmu
2238#define GETPC() NULL
2239#define env cpu_single_env
b769d8fe 2240#define SOFTMMU_CODE_ACCESS
61382a50
FB
2241
2242#define SHIFT 0
2243#include "softmmu_template.h"
2244
2245#define SHIFT 1
2246#include "softmmu_template.h"
2247
2248#define SHIFT 2
2249#include "softmmu_template.h"
2250
2251#define SHIFT 3
2252#include "softmmu_template.h"
2253
2254#undef env
2255
2256#endif
This page took 0.404633 seconds and 4 git commands to generate.