]> Git Repo - qemu.git/blame - cpu-all.h
-no-kqemu option
[qemu.git] / cpu-all.h
CommitLineData
5a9fdfec
FB
1/*
2 * defines common to all virtual CPUs
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef CPU_ALL_H
21#define CPU_ALL_H
22
0ac4bd56
FB
23#if defined(__arm__) || defined(__sparc__)
24#define WORDS_ALIGNED
25#endif
26
27/* some important defines:
28 *
29 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
30 * memory accesses.
31 *
32 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
33 * otherwise little endian.
34 *
35 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
36 *
37 * TARGET_WORDS_BIGENDIAN : same for target cpu
38 */
39
f193c797
FB
40#include "bswap.h"
41
42#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
43#define BSWAP_NEEDED
44#endif
45
46#ifdef BSWAP_NEEDED
47
48static inline uint16_t tswap16(uint16_t s)
49{
50 return bswap16(s);
51}
52
53static inline uint32_t tswap32(uint32_t s)
54{
55 return bswap32(s);
56}
57
58static inline uint64_t tswap64(uint64_t s)
59{
60 return bswap64(s);
61}
62
63static inline void tswap16s(uint16_t *s)
64{
65 *s = bswap16(*s);
66}
67
68static inline void tswap32s(uint32_t *s)
69{
70 *s = bswap32(*s);
71}
72
73static inline void tswap64s(uint64_t *s)
74{
75 *s = bswap64(*s);
76}
77
78#else
79
80static inline uint16_t tswap16(uint16_t s)
81{
82 return s;
83}
84
85static inline uint32_t tswap32(uint32_t s)
86{
87 return s;
88}
89
90static inline uint64_t tswap64(uint64_t s)
91{
92 return s;
93}
94
95static inline void tswap16s(uint16_t *s)
96{
97}
98
99static inline void tswap32s(uint32_t *s)
100{
101}
102
103static inline void tswap64s(uint64_t *s)
104{
105}
106
107#endif
108
109#if TARGET_LONG_SIZE == 4
110#define tswapl(s) tswap32(s)
111#define tswapls(s) tswap32s((uint32_t *)(s))
112#else
113#define tswapl(s) tswap64(s)
114#define tswapls(s) tswap64s((uint64_t *)(s))
115#endif
116
832ed0fa
FB
117/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
118 endian ! */
0ac4bd56
FB
119typedef union {
120 double d;
832ed0fa 121#if defined(WORDS_BIGENDIAN) || (defined(__arm__) && !defined(__VFP_FP__))
0ac4bd56 122 struct {
0ac4bd56 123 uint32_t upper;
832ed0fa 124 uint32_t lower;
0ac4bd56
FB
125 } l;
126#else
127 struct {
0ac4bd56 128 uint32_t lower;
832ed0fa 129 uint32_t upper;
0ac4bd56
FB
130 } l;
131#endif
132 uint64_t ll;
133} CPU_DoubleU;
134
61382a50
FB
135/* CPU memory access without any memory or io remapping */
136
83d73968
FB
137/*
138 * the generic syntax for the memory accesses is:
139 *
140 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
141 *
142 * store: st{type}{size}{endian}_{access_type}(ptr, val)
143 *
144 * type is:
145 * (empty): integer access
146 * f : float access
147 *
148 * sign is:
149 * (empty): for floats or 32 bit size
150 * u : unsigned
151 * s : signed
152 *
153 * size is:
154 * b: 8 bits
155 * w: 16 bits
156 * l: 32 bits
157 * q: 64 bits
158 *
159 * endian is:
160 * (empty): target cpu endianness or 8 bit access
161 * r : reversed target cpu endianness (not implemented yet)
162 * be : big endian (not implemented yet)
163 * le : little endian (not implemented yet)
164 *
165 * access_type is:
166 * raw : host memory access
167 * user : user mode access using soft MMU
168 * kernel : kernel mode access using soft MMU
169 */
c27004ec 170static inline int ldub_p(void *ptr)
5a9fdfec
FB
171{
172 return *(uint8_t *)ptr;
173}
174
c27004ec 175static inline int ldsb_p(void *ptr)
5a9fdfec
FB
176{
177 return *(int8_t *)ptr;
178}
179
c27004ec 180static inline void stb_p(void *ptr, int v)
5a9fdfec
FB
181{
182 *(uint8_t *)ptr = v;
183}
184
185/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
186 kernel handles unaligned load/stores may give better results, but
187 it is a system wide setting : bad */
0ac4bd56 188#if !defined(TARGET_WORDS_BIGENDIAN) && (defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
5a9fdfec
FB
189
190/* conservative code for little endian unaligned accesses */
c27004ec 191static inline int lduw_p(void *ptr)
5a9fdfec
FB
192{
193#ifdef __powerpc__
194 int val;
195 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
196 return val;
197#else
198 uint8_t *p = ptr;
199 return p[0] | (p[1] << 8);
200#endif
201}
202
c27004ec 203static inline int ldsw_p(void *ptr)
5a9fdfec
FB
204{
205#ifdef __powerpc__
206 int val;
207 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
208 return (int16_t)val;
209#else
210 uint8_t *p = ptr;
211 return (int16_t)(p[0] | (p[1] << 8));
212#endif
213}
214
c27004ec 215static inline int ldl_p(void *ptr)
5a9fdfec
FB
216{
217#ifdef __powerpc__
218 int val;
219 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
220 return val;
221#else
222 uint8_t *p = ptr;
223 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
224#endif
225}
226
c27004ec 227static inline uint64_t ldq_p(void *ptr)
5a9fdfec
FB
228{
229 uint8_t *p = ptr;
230 uint32_t v1, v2;
c27004ec
FB
231 v1 = ldl_p(p);
232 v2 = ldl_p(p + 4);
5a9fdfec
FB
233 return v1 | ((uint64_t)v2 << 32);
234}
235
c27004ec 236static inline void stw_p(void *ptr, int v)
5a9fdfec
FB
237{
238#ifdef __powerpc__
239 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
240#else
241 uint8_t *p = ptr;
242 p[0] = v;
243 p[1] = v >> 8;
244#endif
245}
246
c27004ec 247static inline void stl_p(void *ptr, int v)
5a9fdfec
FB
248{
249#ifdef __powerpc__
250 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
251#else
252 uint8_t *p = ptr;
253 p[0] = v;
254 p[1] = v >> 8;
255 p[2] = v >> 16;
256 p[3] = v >> 24;
257#endif
258}
259
c27004ec 260static inline void stq_p(void *ptr, uint64_t v)
5a9fdfec
FB
261{
262 uint8_t *p = ptr;
c27004ec
FB
263 stl_p(p, (uint32_t)v);
264 stl_p(p + 4, v >> 32);
5a9fdfec
FB
265}
266
267/* float access */
268
c27004ec 269static inline float ldfl_p(void *ptr)
5a9fdfec
FB
270{
271 union {
272 float f;
273 uint32_t i;
274 } u;
c27004ec 275 u.i = ldl_p(ptr);
5a9fdfec
FB
276 return u.f;
277}
278
c27004ec 279static inline void stfl_p(void *ptr, float v)
5a9fdfec
FB
280{
281 union {
282 float f;
283 uint32_t i;
284 } u;
285 u.f = v;
c27004ec 286 stl_p(ptr, u.i);
5a9fdfec
FB
287}
288
c27004ec 289static inline double ldfq_p(void *ptr)
5a9fdfec 290{
0ac4bd56 291 CPU_DoubleU u;
c27004ec
FB
292 u.l.lower = ldl_p(ptr);
293 u.l.upper = ldl_p(ptr + 4);
5a9fdfec
FB
294 return u.d;
295}
296
c27004ec 297static inline void stfq_p(void *ptr, double v)
5a9fdfec 298{
0ac4bd56 299 CPU_DoubleU u;
5a9fdfec 300 u.d = v;
c27004ec
FB
301 stl_p(ptr, u.l.lower);
302 stl_p(ptr + 4, u.l.upper);
5a9fdfec
FB
303}
304
0ac4bd56 305#elif defined(TARGET_WORDS_BIGENDIAN) && (!defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED))
93ac68bc 306
c27004ec 307static inline int lduw_p(void *ptr)
93ac68bc 308{
83d73968
FB
309#if defined(__i386__)
310 int val;
311 asm volatile ("movzwl %1, %0\n"
312 "xchgb %b0, %h0\n"
313 : "=q" (val)
314 : "m" (*(uint16_t *)ptr));
315 return val;
316#else
93ac68bc 317 uint8_t *b = (uint8_t *) ptr;
83d73968
FB
318 return ((b[0] << 8) | b[1]);
319#endif
93ac68bc
FB
320}
321
c27004ec 322static inline int ldsw_p(void *ptr)
93ac68bc 323{
83d73968
FB
324#if defined(__i386__)
325 int val;
326 asm volatile ("movzwl %1, %0\n"
327 "xchgb %b0, %h0\n"
328 : "=q" (val)
329 : "m" (*(uint16_t *)ptr));
330 return (int16_t)val;
331#else
332 uint8_t *b = (uint8_t *) ptr;
333 return (int16_t)((b[0] << 8) | b[1]);
334#endif
93ac68bc
FB
335}
336
c27004ec 337static inline int ldl_p(void *ptr)
93ac68bc 338{
4f2ac237 339#if defined(__i386__) || defined(__x86_64__)
83d73968
FB
340 int val;
341 asm volatile ("movl %1, %0\n"
342 "bswap %0\n"
343 : "=r" (val)
344 : "m" (*(uint32_t *)ptr));
345 return val;
346#else
93ac68bc 347 uint8_t *b = (uint8_t *) ptr;
83d73968
FB
348 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
349#endif
93ac68bc
FB
350}
351
c27004ec 352static inline uint64_t ldq_p(void *ptr)
93ac68bc
FB
353{
354 uint32_t a,b;
c27004ec
FB
355 a = ldl_p(ptr);
356 b = ldl_p(ptr+4);
93ac68bc
FB
357 return (((uint64_t)a<<32)|b);
358}
359
c27004ec 360static inline void stw_p(void *ptr, int v)
93ac68bc 361{
83d73968
FB
362#if defined(__i386__)
363 asm volatile ("xchgb %b0, %h0\n"
364 "movw %w0, %1\n"
365 : "=q" (v)
366 : "m" (*(uint16_t *)ptr), "0" (v));
367#else
93ac68bc
FB
368 uint8_t *d = (uint8_t *) ptr;
369 d[0] = v >> 8;
370 d[1] = v;
83d73968 371#endif
93ac68bc
FB
372}
373
c27004ec 374static inline void stl_p(void *ptr, int v)
93ac68bc 375{
4f2ac237 376#if defined(__i386__) || defined(__x86_64__)
83d73968
FB
377 asm volatile ("bswap %0\n"
378 "movl %0, %1\n"
379 : "=r" (v)
380 : "m" (*(uint32_t *)ptr), "0" (v));
381#else
93ac68bc
FB
382 uint8_t *d = (uint8_t *) ptr;
383 d[0] = v >> 24;
384 d[1] = v >> 16;
385 d[2] = v >> 8;
386 d[3] = v;
83d73968 387#endif
93ac68bc
FB
388}
389
c27004ec 390static inline void stq_p(void *ptr, uint64_t v)
93ac68bc 391{
c27004ec
FB
392 stl_p(ptr, v >> 32);
393 stl_p(ptr + 4, v);
0ac4bd56
FB
394}
395
396/* float access */
397
c27004ec 398static inline float ldfl_p(void *ptr)
0ac4bd56
FB
399{
400 union {
401 float f;
402 uint32_t i;
403 } u;
c27004ec 404 u.i = ldl_p(ptr);
0ac4bd56
FB
405 return u.f;
406}
407
c27004ec 408static inline void stfl_p(void *ptr, float v)
0ac4bd56
FB
409{
410 union {
411 float f;
412 uint32_t i;
413 } u;
414 u.f = v;
c27004ec 415 stl_p(ptr, u.i);
0ac4bd56
FB
416}
417
c27004ec 418static inline double ldfq_p(void *ptr)
0ac4bd56
FB
419{
420 CPU_DoubleU u;
c27004ec
FB
421 u.l.upper = ldl_p(ptr);
422 u.l.lower = ldl_p(ptr + 4);
0ac4bd56
FB
423 return u.d;
424}
425
c27004ec 426static inline void stfq_p(void *ptr, double v)
0ac4bd56
FB
427{
428 CPU_DoubleU u;
429 u.d = v;
c27004ec
FB
430 stl_p(ptr, u.l.upper);
431 stl_p(ptr + 4, u.l.lower);
93ac68bc
FB
432}
433
5a9fdfec
FB
434#else
435
c27004ec 436static inline int lduw_p(void *ptr)
5a9fdfec
FB
437{
438 return *(uint16_t *)ptr;
439}
440
c27004ec 441static inline int ldsw_p(void *ptr)
5a9fdfec
FB
442{
443 return *(int16_t *)ptr;
444}
445
c27004ec 446static inline int ldl_p(void *ptr)
5a9fdfec
FB
447{
448 return *(uint32_t *)ptr;
449}
450
c27004ec 451static inline uint64_t ldq_p(void *ptr)
5a9fdfec
FB
452{
453 return *(uint64_t *)ptr;
454}
455
c27004ec 456static inline void stw_p(void *ptr, int v)
5a9fdfec
FB
457{
458 *(uint16_t *)ptr = v;
459}
460
c27004ec 461static inline void stl_p(void *ptr, int v)
5a9fdfec
FB
462{
463 *(uint32_t *)ptr = v;
464}
465
c27004ec 466static inline void stq_p(void *ptr, uint64_t v)
5a9fdfec
FB
467{
468 *(uint64_t *)ptr = v;
469}
470
471/* float access */
472
c27004ec 473static inline float ldfl_p(void *ptr)
5a9fdfec
FB
474{
475 return *(float *)ptr;
476}
477
c27004ec 478static inline double ldfq_p(void *ptr)
5a9fdfec
FB
479{
480 return *(double *)ptr;
481}
482
c27004ec 483static inline void stfl_p(void *ptr, float v)
5a9fdfec
FB
484{
485 *(float *)ptr = v;
486}
487
c27004ec 488static inline void stfq_p(void *ptr, double v)
5a9fdfec
FB
489{
490 *(double *)ptr = v;
491}
492#endif
493
61382a50
FB
494/* MMU memory access macros */
495
c27004ec
FB
496/* NOTE: we use double casts if pointers and target_ulong have
497 different sizes */
498#define ldub_raw(p) ldub_p((uint8_t *)(long)(p))
499#define ldsb_raw(p) ldsb_p((uint8_t *)(long)(p))
500#define lduw_raw(p) lduw_p((uint8_t *)(long)(p))
501#define ldsw_raw(p) ldsw_p((uint8_t *)(long)(p))
502#define ldl_raw(p) ldl_p((uint8_t *)(long)(p))
503#define ldq_raw(p) ldq_p((uint8_t *)(long)(p))
504#define ldfl_raw(p) ldfl_p((uint8_t *)(long)(p))
505#define ldfq_raw(p) ldfq_p((uint8_t *)(long)(p))
506#define stb_raw(p, v) stb_p((uint8_t *)(long)(p), v)
507#define stw_raw(p, v) stw_p((uint8_t *)(long)(p), v)
508#define stl_raw(p, v) stl_p((uint8_t *)(long)(p), v)
509#define stq_raw(p, v) stq_p((uint8_t *)(long)(p), v)
510#define stfl_raw(p, v) stfl_p((uint8_t *)(long)(p), v)
511#define stfq_raw(p, v) stfq_p((uint8_t *)(long)(p), v)
512
513
61382a50
FB
514#if defined(CONFIG_USER_ONLY)
515
516/* if user mode, no other memory access functions */
517#define ldub(p) ldub_raw(p)
518#define ldsb(p) ldsb_raw(p)
519#define lduw(p) lduw_raw(p)
520#define ldsw(p) ldsw_raw(p)
521#define ldl(p) ldl_raw(p)
522#define ldq(p) ldq_raw(p)
523#define ldfl(p) ldfl_raw(p)
524#define ldfq(p) ldfq_raw(p)
525#define stb(p, v) stb_raw(p, v)
526#define stw(p, v) stw_raw(p, v)
527#define stl(p, v) stl_raw(p, v)
528#define stq(p, v) stq_raw(p, v)
529#define stfl(p, v) stfl_raw(p, v)
530#define stfq(p, v) stfq_raw(p, v)
531
532#define ldub_code(p) ldub_raw(p)
533#define ldsb_code(p) ldsb_raw(p)
534#define lduw_code(p) lduw_raw(p)
535#define ldsw_code(p) ldsw_raw(p)
536#define ldl_code(p) ldl_raw(p)
537
538#define ldub_kernel(p) ldub_raw(p)
539#define ldsb_kernel(p) ldsb_raw(p)
540#define lduw_kernel(p) lduw_raw(p)
541#define ldsw_kernel(p) ldsw_raw(p)
542#define ldl_kernel(p) ldl_raw(p)
0ac4bd56
FB
543#define ldfl_kernel(p) ldfl_raw(p)
544#define ldfq_kernel(p) ldfq_raw(p)
61382a50
FB
545#define stb_kernel(p, v) stb_raw(p, v)
546#define stw_kernel(p, v) stw_raw(p, v)
547#define stl_kernel(p, v) stl_raw(p, v)
548#define stq_kernel(p, v) stq_raw(p, v)
0ac4bd56
FB
549#define stfl_kernel(p, v) stfl_raw(p, v)
550#define stfq_kernel(p, vt) stfq_raw(p, v)
61382a50
FB
551
552#endif /* defined(CONFIG_USER_ONLY) */
553
5a9fdfec
FB
554/* page related stuff */
555
556#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
557#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
558#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
559
83fb7adf
FB
560extern unsigned long qemu_real_host_page_size;
561extern unsigned long qemu_host_page_bits;
562extern unsigned long qemu_host_page_size;
563extern unsigned long qemu_host_page_mask;
5a9fdfec 564
83fb7adf 565#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
5a9fdfec
FB
566
567/* same as PROT_xxx */
568#define PAGE_READ 0x0001
569#define PAGE_WRITE 0x0002
570#define PAGE_EXEC 0x0004
571#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
572#define PAGE_VALID 0x0008
573/* original state of the write flag (used when tracking self-modifying
574 code */
575#define PAGE_WRITE_ORG 0x0010
576
577void page_dump(FILE *f);
578int page_get_flags(unsigned long address);
579void page_set_flags(unsigned long start, unsigned long end, int flags);
580void page_unprotect_range(uint8_t *data, unsigned long data_size);
581
582#define SINGLE_CPU_DEFINES
583#ifdef SINGLE_CPU_DEFINES
584
585#if defined(TARGET_I386)
586
587#define CPUState CPUX86State
588#define cpu_init cpu_x86_init
589#define cpu_exec cpu_x86_exec
590#define cpu_gen_code cpu_x86_gen_code
5a9fdfec
FB
591#define cpu_signal_handler cpu_x86_signal_handler
592
593#elif defined(TARGET_ARM)
594
595#define CPUState CPUARMState
596#define cpu_init cpu_arm_init
597#define cpu_exec cpu_arm_exec
598#define cpu_gen_code cpu_arm_gen_code
5a9fdfec
FB
599#define cpu_signal_handler cpu_arm_signal_handler
600
93ac68bc
FB
601#elif defined(TARGET_SPARC)
602
603#define CPUState CPUSPARCState
604#define cpu_init cpu_sparc_init
605#define cpu_exec cpu_sparc_exec
606#define cpu_gen_code cpu_sparc_gen_code
93ac68bc
FB
607#define cpu_signal_handler cpu_sparc_signal_handler
608
67867308
FB
609#elif defined(TARGET_PPC)
610
611#define CPUState CPUPPCState
612#define cpu_init cpu_ppc_init
613#define cpu_exec cpu_ppc_exec
614#define cpu_gen_code cpu_ppc_gen_code
67867308
FB
615#define cpu_signal_handler cpu_ppc_signal_handler
616
5a9fdfec
FB
617#else
618
619#error unsupported target CPU
620
621#endif
622
972ddf78
FB
623#endif /* SINGLE_CPU_DEFINES */
624
7fe48483
FB
625void cpu_dump_state(CPUState *env, FILE *f,
626 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
627 int flags);
628
972ddf78 629void cpu_abort(CPUState *env, const char *fmt, ...);
e2f22898 630extern CPUState *cpu_single_env;
9acbed06 631extern int code_copy_enabled;
5a9fdfec 632
9acbed06
FB
633#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
634#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
635#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
ef792f9d 636#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
4690764b 637void cpu_interrupt(CPUState *s, int mask);
b54ad049 638void cpu_reset_interrupt(CPUState *env, int mask);
68a79315 639
2e12669a
FB
640int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
641int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
c33a346e 642void cpu_single_step(CPUState *env, int enabled);
d95dc32d 643void cpu_reset(CPUState *s);
4c3a88a2 644
13eb76e0
FB
645/* Return the physical page corresponding to a virtual one. Use it
646 only for debugging because no protection checks are done. Return -1
647 if no page found. */
648target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
649
9fddaa0c
FB
650#define CPU_LOG_TB_OUT_ASM (1 << 0)
651#define CPU_LOG_TB_IN_ASM (1 << 1)
f193c797
FB
652#define CPU_LOG_TB_OP (1 << 2)
653#define CPU_LOG_TB_OP_OPT (1 << 3)
654#define CPU_LOG_INT (1 << 4)
655#define CPU_LOG_EXEC (1 << 5)
656#define CPU_LOG_PCALL (1 << 6)
fd872598 657#define CPU_LOG_IOPORT (1 << 7)
9fddaa0c 658#define CPU_LOG_TB_CPU (1 << 8)
f193c797
FB
659
660/* define log items */
661typedef struct CPULogItem {
662 int mask;
663 const char *name;
664 const char *help;
665} CPULogItem;
666
667extern CPULogItem cpu_log_items[];
668
34865134
FB
669void cpu_set_log(int log_flags);
670void cpu_set_log_filename(const char *filename);
f193c797 671int cpu_str_to_log_mask(const char *str);
34865134 672
09683d35
FB
673/* IO ports API */
674
675/* NOTE: as these functions may be even used when there is an isa
676 brige on non x86 targets, we always defined them */
677#ifndef NO_CPU_IO_DEFS
678void cpu_outb(CPUState *env, int addr, int val);
679void cpu_outw(CPUState *env, int addr, int val);
680void cpu_outl(CPUState *env, int addr, int val);
681int cpu_inb(CPUState *env, int addr);
682int cpu_inw(CPUState *env, int addr);
683int cpu_inl(CPUState *env, int addr);
684#endif
685
33417e70
FB
686/* memory API */
687
edf75d59
FB
688extern int phys_ram_size;
689extern int phys_ram_fd;
690extern uint8_t *phys_ram_base;
1ccde1cb 691extern uint8_t *phys_ram_dirty;
edf75d59
FB
692
693/* physical memory access */
694#define IO_MEM_NB_ENTRIES 256
695#define TLB_INVALID_MASK (1 << 3)
696#define IO_MEM_SHIFT 4
697
698#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
699#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
700#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
1ccde1cb
FB
701#define IO_MEM_CODE (3 << IO_MEM_SHIFT) /* used internally, never use directly */
702#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
edf75d59 703
7727994d
FB
704typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
705typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
33417e70 706
2e12669a
FB
707void cpu_register_physical_memory(target_phys_addr_t start_addr,
708 unsigned long size,
709 unsigned long phys_offset);
33417e70
FB
710int cpu_register_io_memory(int io_index,
711 CPUReadMemoryFunc **mem_read,
7727994d
FB
712 CPUWriteMemoryFunc **mem_write,
713 void *opaque);
8926b517
FB
714CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
715CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
33417e70 716
2e12669a 717void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0 718 int len, int is_write);
2e12669a
FB
719static inline void cpu_physical_memory_read(target_phys_addr_t addr,
720 uint8_t *buf, int len)
8b1f24b0
FB
721{
722 cpu_physical_memory_rw(addr, buf, len, 0);
723}
2e12669a
FB
724static inline void cpu_physical_memory_write(target_phys_addr_t addr,
725 const uint8_t *buf, int len)
8b1f24b0
FB
726{
727 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
728}
8df1cd07
FB
729uint32_t ldl_phys(target_phys_addr_t addr);
730void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
731void stl_phys(target_phys_addr_t addr, uint32_t val);
8b1f24b0
FB
732
733int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
734 uint8_t *buf, int len, int is_write);
13eb76e0 735
1ccde1cb
FB
736/* read dirty bit (return 0 or 1) */
737static inline int cpu_physical_memory_is_dirty(target_ulong addr)
738{
739 return phys_ram_dirty[addr >> TARGET_PAGE_BITS];
740}
741
742static inline void cpu_physical_memory_set_dirty(target_ulong addr)
743{
744 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 1;
745}
746
747void cpu_physical_memory_reset_dirty(target_ulong start, target_ulong end);
748
e3db7226
FB
749void dump_exec_info(FILE *f,
750 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
751
5a9fdfec 752#endif /* CPU_ALL_H */
This page took 0.187286 seconds and 4 git commands to generate.