Adds interrupt support to the sh specific timer code (Magnus Damm).
[qemu.git] / cpu-all.h
CommitLineData
5a9fdfec
FB
1/*
2 * defines common to all virtual CPUs
5fafdf24 3 *
5a9fdfec
FB
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef CPU_ALL_H
21#define CPU_ALL_H
22
c4b89d18 23#if defined(__arm__) || defined(__sparc__) || defined(__mips__)
0ac4bd56
FB
24#define WORDS_ALIGNED
25#endif
26
5fafdf24
TS
27/* some important defines:
28 *
0ac4bd56
FB
29 * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
30 * memory accesses.
5fafdf24 31 *
0ac4bd56
FB
32 * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
33 * otherwise little endian.
5fafdf24 34 *
0ac4bd56 35 * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
5fafdf24 36 *
0ac4bd56
FB
37 * TARGET_WORDS_BIGENDIAN : same for target cpu
38 */
39
f193c797
FB
40#include "bswap.h"
41
42#if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
43#define BSWAP_NEEDED
44#endif
45
46#ifdef BSWAP_NEEDED
47
48static inline uint16_t tswap16(uint16_t s)
49{
50 return bswap16(s);
51}
52
53static inline uint32_t tswap32(uint32_t s)
54{
55 return bswap32(s);
56}
57
58static inline uint64_t tswap64(uint64_t s)
59{
60 return bswap64(s);
61}
62
63static inline void tswap16s(uint16_t *s)
64{
65 *s = bswap16(*s);
66}
67
68static inline void tswap32s(uint32_t *s)
69{
70 *s = bswap32(*s);
71}
72
73static inline void tswap64s(uint64_t *s)
74{
75 *s = bswap64(*s);
76}
77
78#else
79
80static inline uint16_t tswap16(uint16_t s)
81{
82 return s;
83}
84
85static inline uint32_t tswap32(uint32_t s)
86{
87 return s;
88}
89
90static inline uint64_t tswap64(uint64_t s)
91{
92 return s;
93}
94
95static inline void tswap16s(uint16_t *s)
96{
97}
98
99static inline void tswap32s(uint32_t *s)
100{
101}
102
103static inline void tswap64s(uint64_t *s)
104{
105}
106
107#endif
108
109#if TARGET_LONG_SIZE == 4
110#define tswapl(s) tswap32(s)
111#define tswapls(s) tswap32s((uint32_t *)(s))
0a962c02 112#define bswaptls(s) bswap32s(s)
f193c797
FB
113#else
114#define tswapl(s) tswap64(s)
115#define tswapls(s) tswap64s((uint64_t *)(s))
0a962c02 116#define bswaptls(s) bswap64s(s)
f193c797
FB
117#endif
118
832ed0fa
FB
119/* NOTE: arm FPA is horrible as double 32 bit words are stored in big
120 endian ! */
0ac4bd56 121typedef union {
53cd6637 122 float64 d;
9d60cac0
FB
123#if defined(WORDS_BIGENDIAN) \
124 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
0ac4bd56 125 struct {
0ac4bd56 126 uint32_t upper;
832ed0fa 127 uint32_t lower;
0ac4bd56
FB
128 } l;
129#else
130 struct {
0ac4bd56 131 uint32_t lower;
832ed0fa 132 uint32_t upper;
0ac4bd56
FB
133 } l;
134#endif
135 uint64_t ll;
136} CPU_DoubleU;
137
1f587329
BS
138#ifdef TARGET_SPARC
139typedef union {
140 float128 q;
141#if defined(WORDS_BIGENDIAN) \
142 || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
143 struct {
144 uint32_t upmost;
145 uint32_t upper;
146 uint32_t lower;
147 uint32_t lowest;
148 } l;
149 struct {
150 uint64_t upper;
151 uint64_t lower;
152 } ll;
153#else
154 struct {
155 uint32_t lowest;
156 uint32_t lower;
157 uint32_t upper;
158 uint32_t upmost;
159 } l;
160 struct {
161 uint64_t lower;
162 uint64_t upper;
163 } ll;
164#endif
165} CPU_QuadU;
166#endif
167
61382a50
FB
168/* CPU memory access without any memory or io remapping */
169
83d73968
FB
170/*
171 * the generic syntax for the memory accesses is:
172 *
173 * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
174 *
175 * store: st{type}{size}{endian}_{access_type}(ptr, val)
176 *
177 * type is:
178 * (empty): integer access
179 * f : float access
5fafdf24 180 *
83d73968
FB
181 * sign is:
182 * (empty): for floats or 32 bit size
183 * u : unsigned
184 * s : signed
185 *
186 * size is:
187 * b: 8 bits
188 * w: 16 bits
189 * l: 32 bits
190 * q: 64 bits
5fafdf24 191 *
83d73968
FB
192 * endian is:
193 * (empty): target cpu endianness or 8 bit access
194 * r : reversed target cpu endianness (not implemented yet)
195 * be : big endian (not implemented yet)
196 * le : little endian (not implemented yet)
197 *
198 * access_type is:
199 * raw : host memory access
200 * user : user mode access using soft MMU
201 * kernel : kernel mode access using soft MMU
202 */
c27004ec 203static inline int ldub_p(void *ptr)
5a9fdfec
FB
204{
205 return *(uint8_t *)ptr;
206}
207
c27004ec 208static inline int ldsb_p(void *ptr)
5a9fdfec
FB
209{
210 return *(int8_t *)ptr;
211}
212
c27004ec 213static inline void stb_p(void *ptr, int v)
5a9fdfec
FB
214{
215 *(uint8_t *)ptr = v;
216}
217
218/* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
219 kernel handles unaligned load/stores may give better results, but
220 it is a system wide setting : bad */
2df3b95d 221#if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
5a9fdfec
FB
222
223/* conservative code for little endian unaligned accesses */
2df3b95d 224static inline int lduw_le_p(void *ptr)
5a9fdfec
FB
225{
226#ifdef __powerpc__
227 int val;
228 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
229 return val;
230#else
231 uint8_t *p = ptr;
232 return p[0] | (p[1] << 8);
233#endif
234}
235
2df3b95d 236static inline int ldsw_le_p(void *ptr)
5a9fdfec
FB
237{
238#ifdef __powerpc__
239 int val;
240 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
241 return (int16_t)val;
242#else
243 uint8_t *p = ptr;
244 return (int16_t)(p[0] | (p[1] << 8));
245#endif
246}
247
2df3b95d 248static inline int ldl_le_p(void *ptr)
5a9fdfec
FB
249{
250#ifdef __powerpc__
251 int val;
252 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
253 return val;
254#else
255 uint8_t *p = ptr;
256 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
257#endif
258}
259
2df3b95d 260static inline uint64_t ldq_le_p(void *ptr)
5a9fdfec
FB
261{
262 uint8_t *p = ptr;
263 uint32_t v1, v2;
f0aca822
FB
264 v1 = ldl_le_p(p);
265 v2 = ldl_le_p(p + 4);
5a9fdfec
FB
266 return v1 | ((uint64_t)v2 << 32);
267}
268
2df3b95d 269static inline void stw_le_p(void *ptr, int v)
5a9fdfec
FB
270{
271#ifdef __powerpc__
272 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
273#else
274 uint8_t *p = ptr;
275 p[0] = v;
276 p[1] = v >> 8;
277#endif
278}
279
2df3b95d 280static inline void stl_le_p(void *ptr, int v)
5a9fdfec
FB
281{
282#ifdef __powerpc__
283 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
284#else
285 uint8_t *p = ptr;
286 p[0] = v;
287 p[1] = v >> 8;
288 p[2] = v >> 16;
289 p[3] = v >> 24;
290#endif
291}
292
2df3b95d 293static inline void stq_le_p(void *ptr, uint64_t v)
5a9fdfec
FB
294{
295 uint8_t *p = ptr;
f0aca822
FB
296 stl_le_p(p, (uint32_t)v);
297 stl_le_p(p + 4, v >> 32);
5a9fdfec
FB
298}
299
300/* float access */
301
2df3b95d 302static inline float32 ldfl_le_p(void *ptr)
5a9fdfec
FB
303{
304 union {
53cd6637 305 float32 f;
5a9fdfec
FB
306 uint32_t i;
307 } u;
2df3b95d 308 u.i = ldl_le_p(ptr);
5a9fdfec
FB
309 return u.f;
310}
311
2df3b95d 312static inline void stfl_le_p(void *ptr, float32 v)
5a9fdfec
FB
313{
314 union {
53cd6637 315 float32 f;
5a9fdfec
FB
316 uint32_t i;
317 } u;
318 u.f = v;
2df3b95d 319 stl_le_p(ptr, u.i);
5a9fdfec
FB
320}
321
2df3b95d 322static inline float64 ldfq_le_p(void *ptr)
5a9fdfec 323{
0ac4bd56 324 CPU_DoubleU u;
2df3b95d
FB
325 u.l.lower = ldl_le_p(ptr);
326 u.l.upper = ldl_le_p(ptr + 4);
5a9fdfec
FB
327 return u.d;
328}
329
2df3b95d 330static inline void stfq_le_p(void *ptr, float64 v)
5a9fdfec 331{
0ac4bd56 332 CPU_DoubleU u;
5a9fdfec 333 u.d = v;
2df3b95d
FB
334 stl_le_p(ptr, u.l.lower);
335 stl_le_p(ptr + 4, u.l.upper);
5a9fdfec
FB
336}
337
2df3b95d
FB
338#else
339
340static inline int lduw_le_p(void *ptr)
341{
342 return *(uint16_t *)ptr;
343}
344
345static inline int ldsw_le_p(void *ptr)
346{
347 return *(int16_t *)ptr;
348}
93ac68bc 349
2df3b95d
FB
350static inline int ldl_le_p(void *ptr)
351{
352 return *(uint32_t *)ptr;
353}
354
355static inline uint64_t ldq_le_p(void *ptr)
356{
357 return *(uint64_t *)ptr;
358}
359
360static inline void stw_le_p(void *ptr, int v)
361{
362 *(uint16_t *)ptr = v;
363}
364
365static inline void stl_le_p(void *ptr, int v)
366{
367 *(uint32_t *)ptr = v;
368}
369
370static inline void stq_le_p(void *ptr, uint64_t v)
371{
372 *(uint64_t *)ptr = v;
373}
374
375/* float access */
376
377static inline float32 ldfl_le_p(void *ptr)
378{
379 return *(float32 *)ptr;
380}
381
382static inline float64 ldfq_le_p(void *ptr)
383{
384 return *(float64 *)ptr;
385}
386
387static inline void stfl_le_p(void *ptr, float32 v)
388{
389 *(float32 *)ptr = v;
390}
391
392static inline void stfq_le_p(void *ptr, float64 v)
393{
394 *(float64 *)ptr = v;
395}
396#endif
397
398#if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
399
400static inline int lduw_be_p(void *ptr)
93ac68bc 401{
83d73968
FB
402#if defined(__i386__)
403 int val;
404 asm volatile ("movzwl %1, %0\n"
405 "xchgb %b0, %h0\n"
406 : "=q" (val)
407 : "m" (*(uint16_t *)ptr));
408 return val;
409#else
93ac68bc 410 uint8_t *b = (uint8_t *) ptr;
83d73968
FB
411 return ((b[0] << 8) | b[1]);
412#endif
93ac68bc
FB
413}
414
2df3b95d 415static inline int ldsw_be_p(void *ptr)
93ac68bc 416{
83d73968
FB
417#if defined(__i386__)
418 int val;
419 asm volatile ("movzwl %1, %0\n"
420 "xchgb %b0, %h0\n"
421 : "=q" (val)
422 : "m" (*(uint16_t *)ptr));
423 return (int16_t)val;
424#else
425 uint8_t *b = (uint8_t *) ptr;
426 return (int16_t)((b[0] << 8) | b[1]);
427#endif
93ac68bc
FB
428}
429
2df3b95d 430static inline int ldl_be_p(void *ptr)
93ac68bc 431{
4f2ac237 432#if defined(__i386__) || defined(__x86_64__)
83d73968
FB
433 int val;
434 asm volatile ("movl %1, %0\n"
435 "bswap %0\n"
436 : "=r" (val)
437 : "m" (*(uint32_t *)ptr));
438 return val;
439#else
93ac68bc 440 uint8_t *b = (uint8_t *) ptr;
83d73968
FB
441 return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
442#endif
93ac68bc
FB
443}
444
2df3b95d 445static inline uint64_t ldq_be_p(void *ptr)
93ac68bc
FB
446{
447 uint32_t a,b;
2df3b95d
FB
448 a = ldl_be_p(ptr);
449 b = ldl_be_p(ptr+4);
93ac68bc
FB
450 return (((uint64_t)a<<32)|b);
451}
452
2df3b95d 453static inline void stw_be_p(void *ptr, int v)
93ac68bc 454{
83d73968
FB
455#if defined(__i386__)
456 asm volatile ("xchgb %b0, %h0\n"
457 "movw %w0, %1\n"
458 : "=q" (v)
459 : "m" (*(uint16_t *)ptr), "0" (v));
460#else
93ac68bc
FB
461 uint8_t *d = (uint8_t *) ptr;
462 d[0] = v >> 8;
463 d[1] = v;
83d73968 464#endif
93ac68bc
FB
465}
466
2df3b95d 467static inline void stl_be_p(void *ptr, int v)
93ac68bc 468{
4f2ac237 469#if defined(__i386__) || defined(__x86_64__)
83d73968
FB
470 asm volatile ("bswap %0\n"
471 "movl %0, %1\n"
472 : "=r" (v)
473 : "m" (*(uint32_t *)ptr), "0" (v));
474#else
93ac68bc
FB
475 uint8_t *d = (uint8_t *) ptr;
476 d[0] = v >> 24;
477 d[1] = v >> 16;
478 d[2] = v >> 8;
479 d[3] = v;
83d73968 480#endif
93ac68bc
FB
481}
482
2df3b95d 483static inline void stq_be_p(void *ptr, uint64_t v)
93ac68bc 484{
2df3b95d
FB
485 stl_be_p(ptr, v >> 32);
486 stl_be_p(ptr + 4, v);
0ac4bd56
FB
487}
488
489/* float access */
490
2df3b95d 491static inline float32 ldfl_be_p(void *ptr)
0ac4bd56
FB
492{
493 union {
53cd6637 494 float32 f;
0ac4bd56
FB
495 uint32_t i;
496 } u;
2df3b95d 497 u.i = ldl_be_p(ptr);
0ac4bd56
FB
498 return u.f;
499}
500
2df3b95d 501static inline void stfl_be_p(void *ptr, float32 v)
0ac4bd56
FB
502{
503 union {
53cd6637 504 float32 f;
0ac4bd56
FB
505 uint32_t i;
506 } u;
507 u.f = v;
2df3b95d 508 stl_be_p(ptr, u.i);
0ac4bd56
FB
509}
510
2df3b95d 511static inline float64 ldfq_be_p(void *ptr)
0ac4bd56
FB
512{
513 CPU_DoubleU u;
2df3b95d
FB
514 u.l.upper = ldl_be_p(ptr);
515 u.l.lower = ldl_be_p(ptr + 4);
0ac4bd56
FB
516 return u.d;
517}
518
2df3b95d 519static inline void stfq_be_p(void *ptr, float64 v)
0ac4bd56
FB
520{
521 CPU_DoubleU u;
522 u.d = v;
2df3b95d
FB
523 stl_be_p(ptr, u.l.upper);
524 stl_be_p(ptr + 4, u.l.lower);
93ac68bc
FB
525}
526
5a9fdfec
FB
527#else
528
2df3b95d 529static inline int lduw_be_p(void *ptr)
5a9fdfec
FB
530{
531 return *(uint16_t *)ptr;
532}
533
2df3b95d 534static inline int ldsw_be_p(void *ptr)
5a9fdfec
FB
535{
536 return *(int16_t *)ptr;
537}
538
2df3b95d 539static inline int ldl_be_p(void *ptr)
5a9fdfec
FB
540{
541 return *(uint32_t *)ptr;
542}
543
2df3b95d 544static inline uint64_t ldq_be_p(void *ptr)
5a9fdfec
FB
545{
546 return *(uint64_t *)ptr;
547}
548
2df3b95d 549static inline void stw_be_p(void *ptr, int v)
5a9fdfec
FB
550{
551 *(uint16_t *)ptr = v;
552}
553
2df3b95d 554static inline void stl_be_p(void *ptr, int v)
5a9fdfec
FB
555{
556 *(uint32_t *)ptr = v;
557}
558
2df3b95d 559static inline void stq_be_p(void *ptr, uint64_t v)
5a9fdfec
FB
560{
561 *(uint64_t *)ptr = v;
562}
563
564/* float access */
565
2df3b95d 566static inline float32 ldfl_be_p(void *ptr)
5a9fdfec 567{
53cd6637 568 return *(float32 *)ptr;
5a9fdfec
FB
569}
570
2df3b95d 571static inline float64 ldfq_be_p(void *ptr)
5a9fdfec 572{
53cd6637 573 return *(float64 *)ptr;
5a9fdfec
FB
574}
575
2df3b95d 576static inline void stfl_be_p(void *ptr, float32 v)
5a9fdfec 577{
53cd6637 578 *(float32 *)ptr = v;
5a9fdfec
FB
579}
580
2df3b95d 581static inline void stfq_be_p(void *ptr, float64 v)
5a9fdfec 582{
53cd6637 583 *(float64 *)ptr = v;
5a9fdfec 584}
2df3b95d
FB
585
586#endif
587
588/* target CPU memory access functions */
589#if defined(TARGET_WORDS_BIGENDIAN)
590#define lduw_p(p) lduw_be_p(p)
591#define ldsw_p(p) ldsw_be_p(p)
592#define ldl_p(p) ldl_be_p(p)
593#define ldq_p(p) ldq_be_p(p)
594#define ldfl_p(p) ldfl_be_p(p)
595#define ldfq_p(p) ldfq_be_p(p)
596#define stw_p(p, v) stw_be_p(p, v)
597#define stl_p(p, v) stl_be_p(p, v)
598#define stq_p(p, v) stq_be_p(p, v)
599#define stfl_p(p, v) stfl_be_p(p, v)
600#define stfq_p(p, v) stfq_be_p(p, v)
601#else
602#define lduw_p(p) lduw_le_p(p)
603#define ldsw_p(p) ldsw_le_p(p)
604#define ldl_p(p) ldl_le_p(p)
605#define ldq_p(p) ldq_le_p(p)
606#define ldfl_p(p) ldfl_le_p(p)
607#define ldfq_p(p) ldfq_le_p(p)
608#define stw_p(p, v) stw_le_p(p, v)
609#define stl_p(p, v) stl_le_p(p, v)
610#define stq_p(p, v) stq_le_p(p, v)
611#define stfl_p(p, v) stfl_le_p(p, v)
612#define stfq_p(p, v) stfq_le_p(p, v)
5a9fdfec
FB
613#endif
614
61382a50
FB
615/* MMU memory access macros */
616
53a5960a
PB
617#if defined(CONFIG_USER_ONLY)
618/* On some host systems the guest address space is reserved on the host.
619 * This allows the guest address space to be offset to a convenient location.
620 */
621//#define GUEST_BASE 0x20000000
622#define GUEST_BASE 0
623
624/* All direct uses of g2h and h2g need to go away for usermode softmmu. */
625#define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
626#define h2g(x) ((target_ulong)(x - GUEST_BASE))
627
628#define saddr(x) g2h(x)
629#define laddr(x) g2h(x)
630
631#else /* !CONFIG_USER_ONLY */
c27004ec
FB
632/* NOTE: we use double casts if pointers and target_ulong have
633 different sizes */
53a5960a
PB
634#define saddr(x) (uint8_t *)(long)(x)
635#define laddr(x) (uint8_t *)(long)(x)
636#endif
637
638#define ldub_raw(p) ldub_p(laddr((p)))
639#define ldsb_raw(p) ldsb_p(laddr((p)))
640#define lduw_raw(p) lduw_p(laddr((p)))
641#define ldsw_raw(p) ldsw_p(laddr((p)))
642#define ldl_raw(p) ldl_p(laddr((p)))
643#define ldq_raw(p) ldq_p(laddr((p)))
644#define ldfl_raw(p) ldfl_p(laddr((p)))
645#define ldfq_raw(p) ldfq_p(laddr((p)))
646#define stb_raw(p, v) stb_p(saddr((p)), v)
647#define stw_raw(p, v) stw_p(saddr((p)), v)
648#define stl_raw(p, v) stl_p(saddr((p)), v)
649#define stq_raw(p, v) stq_p(saddr((p)), v)
650#define stfl_raw(p, v) stfl_p(saddr((p)), v)
651#define stfq_raw(p, v) stfq_p(saddr((p)), v)
c27004ec
FB
652
653
5fafdf24 654#if defined(CONFIG_USER_ONLY)
61382a50
FB
655
656/* if user mode, no other memory access functions */
657#define ldub(p) ldub_raw(p)
658#define ldsb(p) ldsb_raw(p)
659#define lduw(p) lduw_raw(p)
660#define ldsw(p) ldsw_raw(p)
661#define ldl(p) ldl_raw(p)
662#define ldq(p) ldq_raw(p)
663#define ldfl(p) ldfl_raw(p)
664#define ldfq(p) ldfq_raw(p)
665#define stb(p, v) stb_raw(p, v)
666#define stw(p, v) stw_raw(p, v)
667#define stl(p, v) stl_raw(p, v)
668#define stq(p, v) stq_raw(p, v)
669#define stfl(p, v) stfl_raw(p, v)
670#define stfq(p, v) stfq_raw(p, v)
671
672#define ldub_code(p) ldub_raw(p)
673#define ldsb_code(p) ldsb_raw(p)
674#define lduw_code(p) lduw_raw(p)
675#define ldsw_code(p) ldsw_raw(p)
676#define ldl_code(p) ldl_raw(p)
bc98a7ef 677#define ldq_code(p) ldq_raw(p)
61382a50
FB
678
679#define ldub_kernel(p) ldub_raw(p)
680#define ldsb_kernel(p) ldsb_raw(p)
681#define lduw_kernel(p) lduw_raw(p)
682#define ldsw_kernel(p) ldsw_raw(p)
683#define ldl_kernel(p) ldl_raw(p)
bc98a7ef 684#define ldq_kernel(p) ldq_raw(p)
0ac4bd56
FB
685#define ldfl_kernel(p) ldfl_raw(p)
686#define ldfq_kernel(p) ldfq_raw(p)
61382a50
FB
687#define stb_kernel(p, v) stb_raw(p, v)
688#define stw_kernel(p, v) stw_raw(p, v)
689#define stl_kernel(p, v) stl_raw(p, v)
690#define stq_kernel(p, v) stq_raw(p, v)
0ac4bd56
FB
691#define stfl_kernel(p, v) stfl_raw(p, v)
692#define stfq_kernel(p, vt) stfq_raw(p, v)
61382a50
FB
693
694#endif /* defined(CONFIG_USER_ONLY) */
695
5a9fdfec
FB
696/* page related stuff */
697
698#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
699#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
700#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
701
53a5960a 702/* ??? These should be the larger of unsigned long and target_ulong. */
83fb7adf
FB
703extern unsigned long qemu_real_host_page_size;
704extern unsigned long qemu_host_page_bits;
705extern unsigned long qemu_host_page_size;
706extern unsigned long qemu_host_page_mask;
5a9fdfec 707
83fb7adf 708#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
5a9fdfec
FB
709
710/* same as PROT_xxx */
711#define PAGE_READ 0x0001
712#define PAGE_WRITE 0x0002
713#define PAGE_EXEC 0x0004
714#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
715#define PAGE_VALID 0x0008
716/* original state of the write flag (used when tracking self-modifying
717 code */
5fafdf24 718#define PAGE_WRITE_ORG 0x0010
5a9fdfec
FB
719
720void page_dump(FILE *f);
53a5960a
PB
721int page_get_flags(target_ulong address);
722void page_set_flags(target_ulong start, target_ulong end, int flags);
3d97b40b 723int page_check_range(target_ulong start, target_ulong len, int flags);
5a9fdfec 724
c5be9f08
TS
725CPUState *cpu_copy(CPUState *env);
726
5fafdf24 727void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
728 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
729 int flags);
76a66253
JM
730void cpu_dump_statistics (CPUState *env, FILE *f,
731 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
732 int flags);
7fe48483 733
a90b7318 734void cpu_abort(CPUState *env, const char *fmt, ...)
c3d2689d
AZ
735 __attribute__ ((__format__ (__printf__, 2, 3)))
736 __attribute__ ((__noreturn__));
f0aca822 737extern CPUState *first_cpu;
e2f22898 738extern CPUState *cpu_single_env;
9acbed06 739extern int code_copy_enabled;
5a9fdfec 740
9acbed06
FB
741#define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
742#define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
743#define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
ef792f9d 744#define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
98699967 745#define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
ba3c64fb 746#define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
3b21e03e 747#define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
6658ffb8 748#define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
0573fbfc 749#define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
98699967 750
4690764b 751void cpu_interrupt(CPUState *s, int mask);
b54ad049 752void cpu_reset_interrupt(CPUState *env, int mask);
68a79315 753
6658ffb8
PB
754int cpu_watchpoint_insert(CPUState *env, target_ulong addr);
755int cpu_watchpoint_remove(CPUState *env, target_ulong addr);
2e12669a
FB
756int cpu_breakpoint_insert(CPUState *env, target_ulong pc);
757int cpu_breakpoint_remove(CPUState *env, target_ulong pc);
c33a346e 758void cpu_single_step(CPUState *env, int enabled);
d95dc32d 759void cpu_reset(CPUState *s);
4c3a88a2 760
13eb76e0
FB
761/* Return the physical page corresponding to a virtual one. Use it
762 only for debugging because no protection checks are done. Return -1
763 if no page found. */
9b3c35e0 764target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
13eb76e0 765
5fafdf24 766#define CPU_LOG_TB_OUT_ASM (1 << 0)
9fddaa0c 767#define CPU_LOG_TB_IN_ASM (1 << 1)
f193c797
FB
768#define CPU_LOG_TB_OP (1 << 2)
769#define CPU_LOG_TB_OP_OPT (1 << 3)
770#define CPU_LOG_INT (1 << 4)
771#define CPU_LOG_EXEC (1 << 5)
772#define CPU_LOG_PCALL (1 << 6)
fd872598 773#define CPU_LOG_IOPORT (1 << 7)
9fddaa0c 774#define CPU_LOG_TB_CPU (1 << 8)
f193c797
FB
775
776/* define log items */
777typedef struct CPULogItem {
778 int mask;
779 const char *name;
780 const char *help;
781} CPULogItem;
782
783extern CPULogItem cpu_log_items[];
784
34865134
FB
785void cpu_set_log(int log_flags);
786void cpu_set_log_filename(const char *filename);
f193c797 787int cpu_str_to_log_mask(const char *str);
34865134 788
09683d35
FB
789/* IO ports API */
790
791/* NOTE: as these functions may be even used when there is an isa
792 brige on non x86 targets, we always defined them */
793#ifndef NO_CPU_IO_DEFS
794void cpu_outb(CPUState *env, int addr, int val);
795void cpu_outw(CPUState *env, int addr, int val);
796void cpu_outl(CPUState *env, int addr, int val);
797int cpu_inb(CPUState *env, int addr);
798int cpu_inw(CPUState *env, int addr);
799int cpu_inl(CPUState *env, int addr);
800#endif
801
33417e70
FB
802/* memory API */
803
edf75d59
FB
804extern int phys_ram_size;
805extern int phys_ram_fd;
806extern uint8_t *phys_ram_base;
1ccde1cb 807extern uint8_t *phys_ram_dirty;
edf75d59
FB
808
809/* physical memory access */
edf75d59
FB
810#define TLB_INVALID_MASK (1 << 3)
811#define IO_MEM_SHIFT 4
98699967 812#define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
edf75d59
FB
813
814#define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
815#define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
816#define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
1ccde1cb 817#define IO_MEM_NOTDIRTY (4 << IO_MEM_SHIFT) /* used internally, never use directly */
2a4188a3
FB
818/* acts like a ROM when read and like a device when written. As an
819 exception, the write memory callback gets the ram offset instead of
820 the physical address */
821#define IO_MEM_ROMD (1)
db7b5426 822#define IO_MEM_SUBPAGE (2)
edf75d59 823
7727994d
FB
824typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
825typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
33417e70 826
5fafdf24 827void cpu_register_physical_memory(target_phys_addr_t start_addr,
2e12669a
FB
828 unsigned long size,
829 unsigned long phys_offset);
3b21e03e 830uint32_t cpu_get_physical_page_desc(target_phys_addr_t addr);
e9a1ab19
FB
831ram_addr_t qemu_ram_alloc(unsigned int size);
832void qemu_ram_free(ram_addr_t addr);
33417e70
FB
833int cpu_register_io_memory(int io_index,
834 CPUReadMemoryFunc **mem_read,
7727994d
FB
835 CPUWriteMemoryFunc **mem_write,
836 void *opaque);
8926b517
FB
837CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
838CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
33417e70 839
2e12669a 840void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
13eb76e0 841 int len, int is_write);
5fafdf24 842static inline void cpu_physical_memory_read(target_phys_addr_t addr,
2e12669a 843 uint8_t *buf, int len)
8b1f24b0
FB
844{
845 cpu_physical_memory_rw(addr, buf, len, 0);
846}
5fafdf24 847static inline void cpu_physical_memory_write(target_phys_addr_t addr,
2e12669a 848 const uint8_t *buf, int len)
8b1f24b0
FB
849{
850 cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
851}
aab33094
FB
852uint32_t ldub_phys(target_phys_addr_t addr);
853uint32_t lduw_phys(target_phys_addr_t addr);
8df1cd07 854uint32_t ldl_phys(target_phys_addr_t addr);
aab33094 855uint64_t ldq_phys(target_phys_addr_t addr);
8df1cd07 856void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
bc98a7ef 857void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
aab33094
FB
858void stb_phys(target_phys_addr_t addr, uint32_t val);
859void stw_phys(target_phys_addr_t addr, uint32_t val);
8df1cd07 860void stl_phys(target_phys_addr_t addr, uint32_t val);
aab33094 861void stq_phys(target_phys_addr_t addr, uint64_t val);
8b1f24b0 862
5fafdf24 863void cpu_physical_memory_write_rom(target_phys_addr_t addr,
d0ecd2aa 864 const uint8_t *buf, int len);
5fafdf24 865int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
8b1f24b0 866 uint8_t *buf, int len, int is_write);
13eb76e0 867
04c504cc
FB
868#define VGA_DIRTY_FLAG 0x01
869#define CODE_DIRTY_FLAG 0x02
0a962c02 870
1ccde1cb 871/* read dirty bit (return 0 or 1) */
04c504cc 872static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
1ccde1cb 873{
0a962c02
FB
874 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
875}
876
5fafdf24 877static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
0a962c02
FB
878 int dirty_flags)
879{
880 return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
1ccde1cb
FB
881}
882
04c504cc 883static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
1ccde1cb 884{
0a962c02 885 phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
1ccde1cb
FB
886}
887
04c504cc 888void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
0a962c02 889 int dirty_flags);
04c504cc 890void cpu_tlb_update_dirty(CPUState *env);
1ccde1cb 891
e3db7226
FB
892void dump_exec_info(FILE *f,
893 int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
894
effedbc9
FB
895/*******************************************/
896/* host CPU ticks (if available) */
897
898#if defined(__powerpc__)
899
5fafdf24 900static inline uint32_t get_tbl(void)
effedbc9
FB
901{
902 uint32_t tbl;
903 asm volatile("mftb %0" : "=r" (tbl));
904 return tbl;
905}
906
5fafdf24 907static inline uint32_t get_tbu(void)
effedbc9
FB
908{
909 uint32_t tbl;
910 asm volatile("mftbu %0" : "=r" (tbl));
911 return tbl;
912}
913
914static inline int64_t cpu_get_real_ticks(void)
915{
916 uint32_t l, h, h1;
917 /* NOTE: we test if wrapping has occurred */
918 do {
919 h = get_tbu();
920 l = get_tbl();
921 h1 = get_tbu();
922 } while (h != h1);
923 return ((int64_t)h << 32) | l;
924}
925
926#elif defined(__i386__)
927
928static inline int64_t cpu_get_real_ticks(void)
5f1ce948
FB
929{
930 int64_t val;
931 asm volatile ("rdtsc" : "=A" (val));
932 return val;
933}
934
effedbc9
FB
935#elif defined(__x86_64__)
936
937static inline int64_t cpu_get_real_ticks(void)
938{
939 uint32_t low,high;
940 int64_t val;
941 asm volatile("rdtsc" : "=a" (low), "=d" (high));
942 val = high;
943 val <<= 32;
944 val |= low;
945 return val;
946}
947
948#elif defined(__ia64)
949
950static inline int64_t cpu_get_real_ticks(void)
951{
952 int64_t val;
953 asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
954 return val;
955}
956
957#elif defined(__s390__)
958
959static inline int64_t cpu_get_real_ticks(void)
960{
961 int64_t val;
962 asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
963 return val;
964}
965
3142255c 966#elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
effedbc9
FB
967
968static inline int64_t cpu_get_real_ticks (void)
969{
970#if defined(_LP64)
971 uint64_t rval;
972 asm volatile("rd %%tick,%0" : "=r"(rval));
973 return rval;
974#else
975 union {
976 uint64_t i64;
977 struct {
978 uint32_t high;
979 uint32_t low;
980 } i32;
981 } rval;
982 asm volatile("rd %%tick,%1; srlx %1,32,%0"
983 : "=r"(rval.i32.high), "=r"(rval.i32.low));
984 return rval.i64;
985#endif
986}
c4b89d18
TS
987
988#elif defined(__mips__)
989
990static inline int64_t cpu_get_real_ticks(void)
991{
992#if __mips_isa_rev >= 2
993 uint32_t count;
994 static uint32_t cyc_per_count = 0;
995
996 if (!cyc_per_count)
997 __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
998
999 __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
1000 return (int64_t)(count * cyc_per_count);
1001#else
1002 /* FIXME */
1003 static int64_t ticks = 0;
1004 return ticks++;
1005#endif
1006}
1007
46152182
PB
1008#else
1009/* The host CPU doesn't have an easily accessible cycle counter.
85028e4d
TS
1010 Just return a monotonically increasing value. This will be
1011 totally wrong, but hopefully better than nothing. */
46152182
PB
1012static inline int64_t cpu_get_real_ticks (void)
1013{
1014 static int64_t ticks = 0;
1015 return ticks++;
1016}
effedbc9
FB
1017#endif
1018
1019/* profiling */
1020#ifdef CONFIG_PROFILER
1021static inline int64_t profile_getclock(void)
1022{
1023 return cpu_get_real_ticks();
1024}
1025
5f1ce948
FB
1026extern int64_t kqemu_time, kqemu_time_start;
1027extern int64_t qemu_time, qemu_time_start;
1028extern int64_t tlb_flush_time;
1029extern int64_t kqemu_exec_count;
1030extern int64_t dev_time;
1031extern int64_t kqemu_ret_int_count;
1032extern int64_t kqemu_ret_excp_count;
1033extern int64_t kqemu_ret_intr_count;
1034
1035#endif
1036
5a9fdfec 1037#endif /* CPU_ALL_H */
This page took 0.271249 seconds and 4 git commands to generate.