]> Git Repo - qemu.git/blame - target/ppc/machine.c
tcg/ppc: Optimize memory ordering generation with lwsync
[qemu.git] / target / ppc / machine.c
CommitLineData
0d75590d 1#include "qemu/osdep.h"
33c11879 2#include "cpu.h"
63c91552 3#include "exec/exec-all.h"
9c17d615 4#include "sysemu/kvm.h"
33edcde7 5#include "sysemu/tcg.h"
a90db158 6#include "helper_regs.h"
cd6a9bb6 7#include "mmu-hash64.h"
1e00b8d5 8#include "migration/cpu.h"
d5fc133e 9#include "qapi/error.h"
db725815 10#include "qemu/main-loop.h"
c363a37a 11#include "kvm_ppc.h"
6e8b9903 12#include "power8-pmu.h"
8dd3dca3 13
edece45d
RH
14static void post_load_update_msr(CPUPPCState *env)
15{
16 target_ulong msr = env->msr;
17
18 /*
19 * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB
d764184d 20 * before restoring. Note that this recomputes hflags.
edece45d
RH
21 */
22 env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB);
23 ppc_store_msr(env, msr);
33edcde7
DHB
24
25 if (tcg_enabled()) {
26 pmu_update_summaries(env);
27 }
edece45d
RH
28}
29
03fee66f
MAL
30static int get_avr(QEMUFile *f, void *pv, size_t size,
31 const VMStateField *field)
a90db158
AK
32{
33 ppc_avr_t *v = pv;
34
35 v->u64[0] = qemu_get_be64(f);
36 v->u64[1] = qemu_get_be64(f);
37
38 return 0;
39}
40
03fee66f 41static int put_avr(QEMUFile *f, void *pv, size_t size,
3ddba9a9 42 const VMStateField *field, JSONWriter *vmdesc)
a90db158
AK
43{
44 ppc_avr_t *v = pv;
45
46 qemu_put_be64(f, v->u64[0]);
47 qemu_put_be64(f, v->u64[1]);
2c21ee76 48 return 0;
a90db158
AK
49}
50
cfd54a04 51static const VMStateInfo vmstate_info_avr = {
a90db158
AK
52 .name = "avr",
53 .get = get_avr,
54 .put = put_avr,
55};
56
57#define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \
ef96e3ae 58 VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t)
a90db158
AK
59
60#define VMSTATE_AVR_ARRAY(_f, _s, _n) \
61 VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0)
62
ef96e3ae
MCA
63static int get_fpr(QEMUFile *f, void *pv, size_t size,
64 const VMStateField *field)
65{
66 ppc_vsr_t *v = pv;
67
8a14d31b 68 v->VsrD(0) = qemu_get_be64(f);
ef96e3ae
MCA
69
70 return 0;
71}
72
73static int put_fpr(QEMUFile *f, void *pv, size_t size,
3ddba9a9 74 const VMStateField *field, JSONWriter *vmdesc)
ef96e3ae
MCA
75{
76 ppc_vsr_t *v = pv;
77
8a14d31b 78 qemu_put_be64(f, v->VsrD(0));
ef96e3ae
MCA
79 return 0;
80}
81
82static const VMStateInfo vmstate_info_fpr = {
83 .name = "fpr",
84 .get = get_fpr,
85 .put = put_fpr,
86};
87
88#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
89 VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t)
90
91#define VMSTATE_FPR_ARRAY(_f, _s, _n) \
92 VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
93
94static int get_vsr(QEMUFile *f, void *pv, size_t size,
95 const VMStateField *field)
96{
97 ppc_vsr_t *v = pv;
98
8a14d31b 99 v->VsrD(1) = qemu_get_be64(f);
ef96e3ae
MCA
100
101 return 0;
102}
103
104static int put_vsr(QEMUFile *f, void *pv, size_t size,
3ddba9a9 105 const VMStateField *field, JSONWriter *vmdesc)
ef96e3ae
MCA
106{
107 ppc_vsr_t *v = pv;
108
8a14d31b 109 qemu_put_be64(f, v->VsrD(1));
ef96e3ae
MCA
110 return 0;
111}
112
113static const VMStateInfo vmstate_info_vsr = {
114 .name = "vsr",
115 .get = get_vsr,
116 .put = put_vsr,
117};
118
119#define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \
120 VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t)
121
122#define VMSTATE_VSR_ARRAY(_f, _s, _n) \
123 VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0)
124
146c11f1
DG
125static bool cpu_pre_2_8_migration(void *opaque, int version_id)
126{
127 PowerPCCPU *cpu = opaque;
128
129 return cpu->pre_2_8_migration;
130}
131
67d7d66f 132#if defined(TARGET_PPC64)
d8c0c7af 133static bool cpu_pre_3_0_migration(void *opaque, int version_id)
67d7d66f
DG
134{
135 PowerPCCPU *cpu = opaque;
136
d8c0c7af 137 return cpu->pre_3_0_migration;
67d7d66f
DG
138}
139#endif
140
44b1ff31 141static int cpu_pre_save(void *opaque)
a90db158
AK
142{
143 PowerPCCPU *cpu = opaque;
144 CPUPPCState *env = &cpu->env;
145 int i;
16a2497b
DG
146 uint64_t insns_compat_mask =
147 PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB
148 | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES
149 | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES
150 | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT
151 | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ
152 | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC
153 | PPC_64B | PPC_64BX | PPC_ALTIVEC
154 | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD;
155 uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX
156 | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206
157 | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206
158 | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207
159 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207
160 | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM;
a90db158
AK
161
162 env->spr[SPR_LR] = env->lr;
163 env->spr[SPR_CTR] = env->ctr;
aa378598 164 env->spr[SPR_XER] = cpu_read_xer(env);
a90db158
AK
165#if defined(TARGET_PPC64)
166 env->spr[SPR_CFAR] = env->cfar;
167#endif
168 env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr;
169
170 for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
6f7a6993
DG
171 env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i];
172 env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i];
173 env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i];
174 env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i];
a90db158 175 }
6f7a6993
DG
176 for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
177 env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4];
178 env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4];
179 env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4];
180 env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4];
a90db158 181 }
16a2497b
DG
182
183 /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */
146c11f1 184 if (cpu->pre_2_8_migration) {
6f7a6993
DG
185 /*
186 * Mask out bits that got added to msr_mask since the versions
187 * which stupidly included it in the migration stream.
188 */
efb7db25
DG
189 target_ulong metamask = 0
190#if defined(TARGET_PPC64)
191 | (1ULL << MSR_TS0)
192 | (1ULL << MSR_TS1)
193#endif
194 ;
195 cpu->mig_msr_mask = env->msr_mask & ~metamask;
146c11f1 196 cpu->mig_insns_flags = env->insns_flags & insns_compat_mask;
6f7a6993
DG
197 /*
198 * CPU models supported by old machines all have
199 * PPC_MEM_TLBIE, so we set it unconditionally to allow
200 * backward migration from a POWER9 host to a POWER8 host.
bce00964
GK
201 */
202 cpu->mig_insns_flags |= PPC_MEM_TLBIE;
146c11f1
DG
203 cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2;
204 cpu->mig_nb_BATs = env->nb_BATs;
205 }
d8c0c7af 206 if (cpu->pre_3_0_migration) {
67d7d66f
DG
207 if (cpu->hash64_opts) {
208 cpu->mig_slb_nr = cpu->hash64_opts->slb_size;
209 }
210 }
44b1ff31 211
005b69fd
CLG
212 /* Used to retain migration compatibility for pre 6.0 for 601 machines. */
213 env->hflags_compat_nmsr = 0;
f7a7b652 214
44b1ff31 215 return 0;
a90db158
AK
216}
217
d5fc133e
DG
218/*
219 * Determine if a given PVR is a "close enough" match to the CPU
220 * object. For TCG and KVM PR it would probably be sufficient to
221 * require an exact PVR match. However for KVM HV the user is
222 * restricted to a PVR exactly matching the host CPU. The correct way
223 * to handle this is to put the guest into an architected
224 * compatibility mode. However, to allow a more forgiving transition
225 * and migration from before this was widely done, we allow migration
226 * between sufficiently similar PVRs, as determined by the CPU class's
227 * pvr_match() hook.
228 */
229static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr)
230{
231 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
232
233 if (pvr == pcc->pvr) {
234 return true;
235 }
236 return pcc->pvr_match(pcc, pvr);
237}
238
a90db158
AK
239static int cpu_post_load(void *opaque, int version_id)
240{
241 PowerPCCPU *cpu = opaque;
242 CPUPPCState *env = &cpu->env;
243 int i;
244
569be9f0 245 /*
d5fc133e 246 * If we're operating in compat mode, we should be ok as long as
136fbf65 247 * the destination supports the same compatibility mode.
d5fc133e
DG
248 *
249 * Otherwise, however, we require that the destination has exactly
250 * the same CPU model as the source.
569be9f0 251 */
d5fc133e
DG
252
253#if defined(TARGET_PPC64)
254 if (cpu->compat_pvr) {
e07cc192 255 uint32_t compat_pvr = cpu->compat_pvr;
d5fc133e 256 Error *local_err = NULL;
899134eb 257 int ret;
d5fc133e 258
e07cc192 259 cpu->compat_pvr = 0;
899134eb
GK
260 ret = ppc_set_compat(cpu, compat_pvr, &local_err);
261 if (ret < 0) {
d5fc133e 262 error_report_err(local_err);
899134eb 263 return ret;
d5fc133e
DG
264 }
265 } else
266#endif
267 {
268 if (!pvr_match(cpu, env->spr[SPR_PVR])) {
899134eb 269 return -EINVAL;
d5fc133e
DG
270 }
271 }
272
c363a37a
DHB
273 /*
274 * If we're running with KVM HV, there is a chance that the guest
275 * is running with KVM HV and its kernel does not have the
276 * capability of dealing with a different PVR other than this
277 * exact host PVR in KVM_SET_SREGS. If that happens, the
278 * guest freezes after migration.
279 *
280 * The function kvmppc_pvr_workaround_required does this verification
281 * by first checking if the kernel has the cap, returning true immediately
282 * if that is the case. Otherwise, it checks if we're running in KVM PR.
283 * If the guest kernel does not have the cap and we're not running KVM-PR
284 * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will
285 * receive the PVR it expects as a workaround.
286 *
287 */
c363a37a
DHB
288 if (kvmppc_pvr_workaround_required(cpu)) {
289 env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value;
290 }
c363a37a 291
a90db158
AK
292 env->lr = env->spr[SPR_LR];
293 env->ctr = env->spr[SPR_CTR];
6a9620e6 294 cpu_write_xer(env, env->spr[SPR_XER]);
a90db158
AK
295#if defined(TARGET_PPC64)
296 env->cfar = env->spr[SPR_CFAR];
297#endif
298 env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR];
299
300 for (i = 0; (i < 4) && (i < env->nb_BATs); i++) {
6f7a6993
DG
301 env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i];
302 env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1];
303 env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i];
304 env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1];
a90db158 305 }
6f7a6993
DG
306 for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) {
307 env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i];
308 env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1];
309 env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i];
310 env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1];
a90db158
AK
311 }
312
e57ca75c 313 if (!cpu->vhyp) {
f3c75d42
AK
314 ppc_store_sdr1(env, env->spr[SPR_SDR1]);
315 }
2360b6e8 316
edece45d 317 post_load_update_msr(env);
2360b6e8 318
a90db158
AK
319 return 0;
320}
321
322static bool fpu_needed(void *opaque)
323{
324 PowerPCCPU *cpu = opaque;
325
6f7a6993 326 return cpu->env.insns_flags & PPC_FLOAT;
a90db158
AK
327}
328
329static const VMStateDescription vmstate_fpu = {
330 .name = "cpu/fpu",
331 .version_id = 1,
332 .minimum_version_id = 1,
5cd8cada 333 .needed = fpu_needed,
3aff6c2f 334 .fields = (VMStateField[]) {
ef96e3ae 335 VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32),
a90db158
AK
336 VMSTATE_UINTTL(env.fpscr, PowerPCCPU),
337 VMSTATE_END_OF_LIST()
338 },
339};
340
341static bool altivec_needed(void *opaque)
342{
343 PowerPCCPU *cpu = opaque;
344
6f7a6993 345 return cpu->env.insns_flags & PPC_ALTIVEC;
a90db158
AK
346}
347
596fff20
RH
348static int get_vscr(QEMUFile *f, void *opaque, size_t size,
349 const VMStateField *field)
350{
351 PowerPCCPU *cpu = opaque;
87aff238 352 ppc_store_vscr(&cpu->env, qemu_get_be32(f));
596fff20
RH
353 return 0;
354}
355
356static int put_vscr(QEMUFile *f, void *opaque, size_t size,
3ddba9a9 357 const VMStateField *field, JSONWriter *vmdesc)
596fff20
RH
358{
359 PowerPCCPU *cpu = opaque;
87aff238 360 qemu_put_be32(f, ppc_get_vscr(&cpu->env));
596fff20
RH
361 return 0;
362}
363
364static const VMStateInfo vmstate_vscr = {
365 .name = "cpu/altivec/vscr",
366 .get = get_vscr,
367 .put = put_vscr,
368};
369
a90db158
AK
370static const VMStateDescription vmstate_altivec = {
371 .name = "cpu/altivec",
372 .version_id = 1,
373 .minimum_version_id = 1,
5cd8cada 374 .needed = altivec_needed,
3aff6c2f 375 .fields = (VMStateField[]) {
ef96e3ae 376 VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32),
596fff20
RH
377 /*
378 * Save the architecture value of the vscr, not the internally
379 * expanded version. Since this architecture value does not
380 * exist in memory to be stored, this requires a but of hoop
381 * jumping. We want OFFSET=0 so that we effectively pass CPU
382 * to the helper functions.
383 */
384 {
385 .name = "vscr",
386 .version_id = 0,
387 .size = sizeof(uint32_t),
388 .info = &vmstate_vscr,
389 .flags = VMS_SINGLE,
390 .offset = 0
391 },
a90db158
AK
392 VMSTATE_END_OF_LIST()
393 },
394};
395
396static bool vsx_needed(void *opaque)
397{
398 PowerPCCPU *cpu = opaque;
399
6f7a6993 400 return cpu->env.insns_flags2 & PPC2_VSX;
a90db158
AK
401}
402
403static const VMStateDescription vmstate_vsx = {
404 .name = "cpu/vsx",
405 .version_id = 1,
406 .minimum_version_id = 1,
5cd8cada 407 .needed = vsx_needed,
3aff6c2f 408 .fields = (VMStateField[]) {
ef96e3ae 409 VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32),
a90db158
AK
410 VMSTATE_END_OF_LIST()
411 },
412};
413
80b3f79b
AK
414#ifdef TARGET_PPC64
415/* Transactional memory state */
416static bool tm_needed(void *opaque)
417{
418 PowerPCCPU *cpu = opaque;
419 CPUPPCState *env = &cpu->env;
ca241959 420 return FIELD_EX64(env->msr, MSR, TS);
80b3f79b
AK
421}
422
423static const VMStateDescription vmstate_tm = {
424 .name = "cpu/tm",
425 .version_id = 1,
426 .minimum_version_id = 1,
5cd8cada 427 .needed = tm_needed,
80b3f79b
AK
428 .fields = (VMStateField []) {
429 VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32),
430 VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64),
431 VMSTATE_UINT64(env.tm_cr, PowerPCCPU),
432 VMSTATE_UINT64(env.tm_lr, PowerPCCPU),
433 VMSTATE_UINT64(env.tm_ctr, PowerPCCPU),
434 VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU),
435 VMSTATE_UINT64(env.tm_amr, PowerPCCPU),
436 VMSTATE_UINT64(env.tm_ppr, PowerPCCPU),
437 VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU),
438 VMSTATE_UINT32(env.tm_vscr, PowerPCCPU),
439 VMSTATE_UINT64(env.tm_dscr, PowerPCCPU),
440 VMSTATE_UINT64(env.tm_tar, PowerPCCPU),
441 VMSTATE_END_OF_LIST()
442 },
443};
444#endif
445
a90db158
AK
446static bool sr_needed(void *opaque)
447{
448#ifdef TARGET_PPC64
449 PowerPCCPU *cpu = opaque;
450
d57d72a8 451 return !mmu_is_64bit(cpu->env.mmu_model);
a90db158
AK
452#else
453 return true;
454#endif
455}
456
457static const VMStateDescription vmstate_sr = {
458 .name = "cpu/sr",
459 .version_id = 1,
460 .minimum_version_id = 1,
5cd8cada 461 .needed = sr_needed,
3aff6c2f 462 .fields = (VMStateField[]) {
a90db158
AK
463 VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32),
464 VMSTATE_END_OF_LIST()
465 },
466};
467
468#ifdef TARGET_PPC64
03fee66f
MAL
469static int get_slbe(QEMUFile *f, void *pv, size_t size,
470 const VMStateField *field)
a90db158
AK
471{
472 ppc_slb_t *v = pv;
473
474 v->esid = qemu_get_be64(f);
475 v->vsid = qemu_get_be64(f);
476
477 return 0;
478}
479
03fee66f 480static int put_slbe(QEMUFile *f, void *pv, size_t size,
3ddba9a9 481 const VMStateField *field, JSONWriter *vmdesc)
a90db158
AK
482{
483 ppc_slb_t *v = pv;
484
485 qemu_put_be64(f, v->esid);
486 qemu_put_be64(f, v->vsid);
2c21ee76 487 return 0;
a90db158
AK
488}
489
cfd54a04 490static const VMStateInfo vmstate_info_slbe = {
a90db158
AK
491 .name = "slbe",
492 .get = get_slbe,
493 .put = put_slbe,
494};
495
496#define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \
497 VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t)
498
499#define VMSTATE_SLB_ARRAY(_f, _s, _n) \
500 VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0)
501
502static bool slb_needed(void *opaque)
503{
504 PowerPCCPU *cpu = opaque;
505
506 /* We don't support any of the old segment table based 64-bit CPUs */
d57d72a8 507 return mmu_is_64bit(cpu->env.mmu_model);
a90db158
AK
508}
509
cd6a9bb6
DG
510static int slb_post_load(void *opaque, int version_id)
511{
512 PowerPCCPU *cpu = opaque;
513 CPUPPCState *env = &cpu->env;
514 int i;
515
6f7a6993
DG
516 /*
517 * We've pulled in the raw esid and vsid values from the migration
518 * stream, but we need to recompute the page size pointers
519 */
67d7d66f 520 for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
cd6a9bb6
DG
521 if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) {
522 /* Migration source had bad values in its SLB */
523 return -1;
524 }
525 }
526
527 return 0;
528}
529
a90db158
AK
530static const VMStateDescription vmstate_slb = {
531 .name = "cpu/slb",
532 .version_id = 1,
533 .minimum_version_id = 1,
5cd8cada 534 .needed = slb_needed,
cd6a9bb6 535 .post_load = slb_post_load,
3aff6c2f 536 .fields = (VMStateField[]) {
d8c0c7af 537 VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration),
d83af167 538 VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES),
a90db158
AK
539 VMSTATE_END_OF_LIST()
540 }
541};
542#endif /* TARGET_PPC64 */
543
544static const VMStateDescription vmstate_tlb6xx_entry = {
545 .name = "cpu/tlb6xx_entry",
546 .version_id = 1,
547 .minimum_version_id = 1,
3aff6c2f 548 .fields = (VMStateField[]) {
a90db158
AK
549 VMSTATE_UINTTL(pte0, ppc6xx_tlb_t),
550 VMSTATE_UINTTL(pte1, ppc6xx_tlb_t),
551 VMSTATE_UINTTL(EPN, ppc6xx_tlb_t),
552 VMSTATE_END_OF_LIST()
553 },
554};
555
556static bool tlb6xx_needed(void *opaque)
557{
558 PowerPCCPU *cpu = opaque;
559 CPUPPCState *env = &cpu->env;
560
561 return env->nb_tlb && (env->tlb_type == TLB_6XX);
562}
563
564static const VMStateDescription vmstate_tlb6xx = {
565 .name = "cpu/tlb6xx",
566 .version_id = 1,
567 .minimum_version_id = 1,
5cd8cada 568 .needed = tlb6xx_needed,
3aff6c2f 569 .fields = (VMStateField[]) {
d2164ad3 570 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
a90db158
AK
571 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU,
572 env.nb_tlb,
573 vmstate_tlb6xx_entry,
574 ppc6xx_tlb_t),
575 VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4),
576 VMSTATE_END_OF_LIST()
577 }
578};
579
580static const VMStateDescription vmstate_tlbemb_entry = {
581 .name = "cpu/tlbemb_entry",
582 .version_id = 1,
583 .minimum_version_id = 1,
3aff6c2f 584 .fields = (VMStateField[]) {
a90db158
AK
585 VMSTATE_UINT64(RPN, ppcemb_tlb_t),
586 VMSTATE_UINTTL(EPN, ppcemb_tlb_t),
587 VMSTATE_UINTTL(PID, ppcemb_tlb_t),
588 VMSTATE_UINTTL(size, ppcemb_tlb_t),
589 VMSTATE_UINT32(prot, ppcemb_tlb_t),
590 VMSTATE_UINT32(attr, ppcemb_tlb_t),
591 VMSTATE_END_OF_LIST()
592 },
593};
594
595static bool tlbemb_needed(void *opaque)
596{
597 PowerPCCPU *cpu = opaque;
598 CPUPPCState *env = &cpu->env;
599
600 return env->nb_tlb && (env->tlb_type == TLB_EMB);
601}
602
a90db158
AK
603static const VMStateDescription vmstate_tlbemb = {
604 .name = "cpu/tlb6xx",
605 .version_id = 1,
606 .minimum_version_id = 1,
5cd8cada 607 .needed = tlbemb_needed,
3aff6c2f 608 .fields = (VMStateField[]) {
d2164ad3 609 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
a90db158
AK
610 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU,
611 env.nb_tlb,
612 vmstate_tlbemb_entry,
613 ppcemb_tlb_t),
a90db158
AK
614 VMSTATE_END_OF_LIST()
615 },
a90db158
AK
616};
617
618static const VMStateDescription vmstate_tlbmas_entry = {
619 .name = "cpu/tlbmas_entry",
620 .version_id = 1,
621 .minimum_version_id = 1,
3aff6c2f 622 .fields = (VMStateField[]) {
a90db158
AK
623 VMSTATE_UINT32(mas8, ppcmas_tlb_t),
624 VMSTATE_UINT32(mas1, ppcmas_tlb_t),
625 VMSTATE_UINT64(mas2, ppcmas_tlb_t),
626 VMSTATE_UINT64(mas7_3, ppcmas_tlb_t),
627 VMSTATE_END_OF_LIST()
628 },
629};
630
631static bool tlbmas_needed(void *opaque)
632{
633 PowerPCCPU *cpu = opaque;
634 CPUPPCState *env = &cpu->env;
635
636 return env->nb_tlb && (env->tlb_type == TLB_MAS);
637}
638
639static const VMStateDescription vmstate_tlbmas = {
640 .name = "cpu/tlbmas",
641 .version_id = 1,
642 .minimum_version_id = 1,
5cd8cada 643 .needed = tlbmas_needed,
3aff6c2f 644 .fields = (VMStateField[]) {
d2164ad3 645 VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL),
a90db158
AK
646 VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU,
647 env.nb_tlb,
648 vmstate_tlbmas_entry,
649 ppcmas_tlb_t),
650 VMSTATE_END_OF_LIST()
651 }
652};
653
d5fc133e
DG
654static bool compat_needed(void *opaque)
655{
656 PowerPCCPU *cpu = opaque;
657
658 assert(!(cpu->compat_pvr && !cpu->vhyp));
659 return !cpu->pre_2_10_migration && cpu->compat_pvr != 0;
660}
661
662static const VMStateDescription vmstate_compat = {
663 .name = "cpu/compat",
664 .version_id = 1,
665 .minimum_version_id = 1,
666 .needed = compat_needed,
667 .fields = (VMStateField[]) {
668 VMSTATE_UINT32(compat_pvr, PowerPCCPU),
669 VMSTATE_END_OF_LIST()
670 }
671};
672
a90db158
AK
673const VMStateDescription vmstate_ppc_cpu = {
674 .name = "cpu",
675 .version_id = 5,
676 .minimum_version_id = 5,
a90db158
AK
677 .pre_save = cpu_pre_save,
678 .post_load = cpu_post_load,
3aff6c2f 679 .fields = (VMStateField[]) {
569be9f0 680 VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */
a90db158
AK
681
682 /* User mode architected state */
683 VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32),
684#if !defined(TARGET_PPC64)
685 VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32),
686#endif
687 VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8),
688 VMSTATE_UINTTL(env.nip, PowerPCCPU),
689
690 /* SPRs */
691 VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024),
692 VMSTATE_UINT64(env.spe_acc, PowerPCCPU),
693
694 /* Reservation */
695 VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU),
696
697 /* Supervisor mode architected state */
698 VMSTATE_UINTTL(env.msr, PowerPCCPU),
699
f7a7b652
RH
700 /* Backward compatible internal state */
701 VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU),
a90db158
AK
702
703 /* Sanity checking */
146c11f1
DG
704 VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration),
705 VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration),
706 VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU,
707 cpu_pre_2_8_migration),
708 VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration),
a90db158
AK
709 VMSTATE_END_OF_LIST()
710 },
5cd8cada
JQ
711 .subsections = (const VMStateDescription*[]) {
712 &vmstate_fpu,
713 &vmstate_altivec,
714 &vmstate_vsx,
715 &vmstate_sr,
a90db158 716#ifdef TARGET_PPC64
5cd8cada
JQ
717 &vmstate_tm,
718 &vmstate_slb,
a90db158 719#endif /* TARGET_PPC64 */
5cd8cada
JQ
720 &vmstate_tlb6xx,
721 &vmstate_tlbemb,
722 &vmstate_tlbmas,
d5fc133e 723 &vmstate_compat,
5cd8cada 724 NULL
a90db158
AK
725 }
726};
This page took 1.106608 seconds and 4 git commands to generate.