]>
Commit | Line | Data |
---|---|---|
0d75590d | 1 | #include "qemu/osdep.h" |
33c11879 | 2 | #include "cpu.h" |
63c91552 | 3 | #include "exec/exec-all.h" |
9c17d615 | 4 | #include "sysemu/kvm.h" |
a90db158 | 5 | #include "helper_regs.h" |
cd6a9bb6 | 6 | #include "mmu-hash64.h" |
1e00b8d5 | 7 | #include "migration/cpu.h" |
d5fc133e | 8 | #include "qapi/error.h" |
db725815 | 9 | #include "qemu/main-loop.h" |
c363a37a | 10 | #include "kvm_ppc.h" |
596fff20 | 11 | #include "exec/helper-proto.h" |
8dd3dca3 | 12 | |
edece45d RH |
13 | static void post_load_update_msr(CPUPPCState *env) |
14 | { | |
15 | target_ulong msr = env->msr; | |
16 | ||
17 | /* | |
18 | * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB | |
19 | * before restoring. Note that this recomputes hflags and mem_idx. | |
20 | */ | |
21 | env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); | |
22 | ppc_store_msr(env, msr); | |
23 | } | |
24 | ||
a90db158 | 25 | static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) |
8dd3dca3 | 26 | { |
a90db158 AK |
27 | PowerPCCPU *cpu = opaque; |
28 | CPUPPCState *env = &cpu->env; | |
a456d59c | 29 | unsigned int i, j; |
bb593904 | 30 | target_ulong sdr1; |
596fff20 | 31 | uint32_t fpscr, vscr; |
67d7d66f DG |
32 | #if defined(TARGET_PPC64) |
33 | int32_t slb_nr; | |
34 | #endif | |
da91a00f | 35 | target_ulong xer; |
a456d59c | 36 | |
6f7a6993 | 37 | for (i = 0; i < 32; i++) { |
a456d59c | 38 | qemu_get_betls(f, &env->gpr[i]); |
6f7a6993 | 39 | } |
a456d59c | 40 | #if !defined(TARGET_PPC64) |
6f7a6993 | 41 | for (i = 0; i < 32; i++) { |
a456d59c | 42 | qemu_get_betls(f, &env->gprh[i]); |
6f7a6993 | 43 | } |
a456d59c BS |
44 | #endif |
45 | qemu_get_betls(f, &env->lr); | |
46 | qemu_get_betls(f, &env->ctr); | |
6f7a6993 | 47 | for (i = 0; i < 8; i++) { |
a456d59c | 48 | qemu_get_be32s(f, &env->crf[i]); |
6f7a6993 | 49 | } |
da91a00f RH |
50 | qemu_get_betls(f, &xer); |
51 | cpu_write_xer(env, xer); | |
18b21a2f | 52 | qemu_get_betls(f, &env->reserve_addr); |
a456d59c | 53 | qemu_get_betls(f, &env->msr); |
6f7a6993 | 54 | for (i = 0; i < 4; i++) { |
a456d59c | 55 | qemu_get_betls(f, &env->tgpr[i]); |
6f7a6993 | 56 | } |
a456d59c BS |
57 | for (i = 0; i < 32; i++) { |
58 | union { | |
59 | float64 d; | |
60 | uint64_t l; | |
61 | } u; | |
62 | u.l = qemu_get_be64(f); | |
ef96e3ae | 63 | *cpu_fpr_ptr(env, i) = u.d; |
a456d59c | 64 | } |
30304420 DG |
65 | qemu_get_be32s(f, &fpscr); |
66 | env->fpscr = fpscr; | |
a456d59c | 67 | qemu_get_sbe32s(f, &env->access_type); |
a456d59c | 68 | #if defined(TARGET_PPC64) |
9baea4a3 | 69 | qemu_get_betls(f, &env->spr[SPR_ASR]); |
67d7d66f | 70 | qemu_get_sbe32s(f, &slb_nr); |
a456d59c | 71 | #endif |
bb593904 | 72 | qemu_get_betls(f, &sdr1); |
6f7a6993 | 73 | for (i = 0; i < 32; i++) { |
a456d59c | 74 | qemu_get_betls(f, &env->sr[i]); |
6f7a6993 DG |
75 | } |
76 | for (i = 0; i < 2; i++) { | |
77 | for (j = 0; j < 8; j++) { | |
a456d59c | 78 | qemu_get_betls(f, &env->DBAT[i][j]); |
6f7a6993 DG |
79 | } |
80 | } | |
81 | for (i = 0; i < 2; i++) { | |
82 | for (j = 0; j < 8; j++) { | |
a456d59c | 83 | qemu_get_betls(f, &env->IBAT[i][j]); |
6f7a6993 DG |
84 | } |
85 | } | |
a456d59c BS |
86 | qemu_get_sbe32s(f, &env->nb_tlb); |
87 | qemu_get_sbe32s(f, &env->tlb_per_way); | |
88 | qemu_get_sbe32s(f, &env->nb_ways); | |
89 | qemu_get_sbe32s(f, &env->last_way); | |
90 | qemu_get_sbe32s(f, &env->id_tlbs); | |
91 | qemu_get_sbe32s(f, &env->nb_pids); | |
1c53accc | 92 | if (env->tlb.tlb6) { |
6f7a6993 | 93 | /* XXX assumes 6xx */ |
a456d59c | 94 | for (i = 0; i < env->nb_tlb; i++) { |
1c53accc AG |
95 | qemu_get_betls(f, &env->tlb.tlb6[i].pte0); |
96 | qemu_get_betls(f, &env->tlb.tlb6[i].pte1); | |
97 | qemu_get_betls(f, &env->tlb.tlb6[i].EPN); | |
a456d59c BS |
98 | } |
99 | } | |
6f7a6993 | 100 | for (i = 0; i < 4; i++) { |
a456d59c | 101 | qemu_get_betls(f, &env->pb[i]); |
6f7a6993 DG |
102 | } |
103 | for (i = 0; i < 1024; i++) { | |
a456d59c | 104 | qemu_get_betls(f, &env->spr[i]); |
6f7a6993 | 105 | } |
e57ca75c | 106 | if (!cpu->vhyp) { |
f3c75d42 AK |
107 | ppc_store_sdr1(env, sdr1); |
108 | } | |
596fff20 RH |
109 | qemu_get_be32s(f, &vscr); |
110 | helper_mtvscr(env, vscr); | |
a456d59c BS |
111 | qemu_get_be64s(f, &env->spe_acc); |
112 | qemu_get_be32s(f, &env->spe_fscr); | |
113 | qemu_get_betls(f, &env->msr_mask); | |
114 | qemu_get_be32s(f, &env->flags); | |
115 | qemu_get_sbe32s(f, &env->error_code); | |
116 | qemu_get_be32s(f, &env->pending_interrupts); | |
a456d59c | 117 | qemu_get_be32s(f, &env->irq_input_state); |
6f7a6993 | 118 | for (i = 0; i < POWERPC_EXCP_NB; i++) { |
a456d59c | 119 | qemu_get_betls(f, &env->excp_vectors[i]); |
6f7a6993 | 120 | } |
a456d59c BS |
121 | qemu_get_betls(f, &env->excp_prefix); |
122 | qemu_get_betls(f, &env->ivor_mask); | |
123 | qemu_get_betls(f, &env->ivpr_mask); | |
124 | qemu_get_betls(f, &env->hreset_vector); | |
a456d59c | 125 | qemu_get_betls(f, &env->nip); |
dafe299c RH |
126 | qemu_get_sbetl(f); /* Discard unused hflags */ |
127 | qemu_get_sbetl(f); /* Discard unused hflags_nmsr */ | |
9fb04491 | 128 | qemu_get_sbe32(f); /* Discard unused mmu_idx */ |
011aba24 | 129 | qemu_get_sbe32(f); /* Discard unused power_mode */ |
a456d59c | 130 | |
edece45d | 131 | post_load_update_msr(env); |
dafe299c | 132 | |
8dd3dca3 AJ |
133 | return 0; |
134 | } | |
a90db158 | 135 | |
03fee66f MAL |
136 | static int get_avr(QEMUFile *f, void *pv, size_t size, |
137 | const VMStateField *field) | |
a90db158 AK |
138 | { |
139 | ppc_avr_t *v = pv; | |
140 | ||
141 | v->u64[0] = qemu_get_be64(f); | |
142 | v->u64[1] = qemu_get_be64(f); | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
03fee66f | 147 | static int put_avr(QEMUFile *f, void *pv, size_t size, |
3ddba9a9 | 148 | const VMStateField *field, JSONWriter *vmdesc) |
a90db158 AK |
149 | { |
150 | ppc_avr_t *v = pv; | |
151 | ||
152 | qemu_put_be64(f, v->u64[0]); | |
153 | qemu_put_be64(f, v->u64[1]); | |
2c21ee76 | 154 | return 0; |
a90db158 AK |
155 | } |
156 | ||
cfd54a04 | 157 | static const VMStateInfo vmstate_info_avr = { |
a90db158 AK |
158 | .name = "avr", |
159 | .get = get_avr, | |
160 | .put = put_avr, | |
161 | }; | |
162 | ||
163 | #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ | |
ef96e3ae | 164 | VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t) |
a90db158 AK |
165 | |
166 | #define VMSTATE_AVR_ARRAY(_f, _s, _n) \ | |
167 | VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) | |
168 | ||
ef96e3ae MCA |
169 | static int get_fpr(QEMUFile *f, void *pv, size_t size, |
170 | const VMStateField *field) | |
171 | { | |
172 | ppc_vsr_t *v = pv; | |
173 | ||
8a14d31b | 174 | v->VsrD(0) = qemu_get_be64(f); |
ef96e3ae MCA |
175 | |
176 | return 0; | |
177 | } | |
178 | ||
179 | static int put_fpr(QEMUFile *f, void *pv, size_t size, | |
3ddba9a9 | 180 | const VMStateField *field, JSONWriter *vmdesc) |
ef96e3ae MCA |
181 | { |
182 | ppc_vsr_t *v = pv; | |
183 | ||
8a14d31b | 184 | qemu_put_be64(f, v->VsrD(0)); |
ef96e3ae MCA |
185 | return 0; |
186 | } | |
187 | ||
188 | static const VMStateInfo vmstate_info_fpr = { | |
189 | .name = "fpr", | |
190 | .get = get_fpr, | |
191 | .put = put_fpr, | |
192 | }; | |
193 | ||
194 | #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ | |
195 | VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t) | |
196 | ||
197 | #define VMSTATE_FPR_ARRAY(_f, _s, _n) \ | |
198 | VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) | |
199 | ||
200 | static int get_vsr(QEMUFile *f, void *pv, size_t size, | |
201 | const VMStateField *field) | |
202 | { | |
203 | ppc_vsr_t *v = pv; | |
204 | ||
8a14d31b | 205 | v->VsrD(1) = qemu_get_be64(f); |
ef96e3ae MCA |
206 | |
207 | return 0; | |
208 | } | |
209 | ||
210 | static int put_vsr(QEMUFile *f, void *pv, size_t size, | |
3ddba9a9 | 211 | const VMStateField *field, JSONWriter *vmdesc) |
ef96e3ae MCA |
212 | { |
213 | ppc_vsr_t *v = pv; | |
214 | ||
8a14d31b | 215 | qemu_put_be64(f, v->VsrD(1)); |
ef96e3ae MCA |
216 | return 0; |
217 | } | |
218 | ||
219 | static const VMStateInfo vmstate_info_vsr = { | |
220 | .name = "vsr", | |
221 | .get = get_vsr, | |
222 | .put = put_vsr, | |
223 | }; | |
224 | ||
225 | #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \ | |
226 | VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t) | |
227 | ||
228 | #define VMSTATE_VSR_ARRAY(_f, _s, _n) \ | |
229 | VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) | |
230 | ||
146c11f1 DG |
231 | static bool cpu_pre_2_8_migration(void *opaque, int version_id) |
232 | { | |
233 | PowerPCCPU *cpu = opaque; | |
234 | ||
235 | return cpu->pre_2_8_migration; | |
236 | } | |
237 | ||
67d7d66f | 238 | #if defined(TARGET_PPC64) |
d8c0c7af | 239 | static bool cpu_pre_3_0_migration(void *opaque, int version_id) |
67d7d66f DG |
240 | { |
241 | PowerPCCPU *cpu = opaque; | |
242 | ||
d8c0c7af | 243 | return cpu->pre_3_0_migration; |
67d7d66f DG |
244 | } |
245 | #endif | |
246 | ||
44b1ff31 | 247 | static int cpu_pre_save(void *opaque) |
a90db158 AK |
248 | { |
249 | PowerPCCPU *cpu = opaque; | |
250 | CPUPPCState *env = &cpu->env; | |
251 | int i; | |
16a2497b DG |
252 | uint64_t insns_compat_mask = |
253 | PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | |
254 | | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | |
255 | | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | |
256 | | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | |
257 | | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | |
258 | | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | |
259 | | PPC_64B | PPC_64BX | PPC_ALTIVEC | |
260 | | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; | |
261 | uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | |
262 | | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | |
263 | | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | |
264 | | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | |
265 | | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | |
266 | | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; | |
a90db158 AK |
267 | |
268 | env->spr[SPR_LR] = env->lr; | |
269 | env->spr[SPR_CTR] = env->ctr; | |
aa378598 | 270 | env->spr[SPR_XER] = cpu_read_xer(env); |
a90db158 AK |
271 | #if defined(TARGET_PPC64) |
272 | env->spr[SPR_CFAR] = env->cfar; | |
273 | #endif | |
274 | env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; | |
275 | ||
276 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { | |
6f7a6993 DG |
277 | env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i]; |
278 | env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i]; | |
279 | env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i]; | |
280 | env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i]; | |
a90db158 | 281 | } |
6f7a6993 DG |
282 | for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { |
283 | env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4]; | |
284 | env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4]; | |
285 | env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4]; | |
286 | env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; | |
a90db158 | 287 | } |
16a2497b DG |
288 | |
289 | /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ | |
146c11f1 | 290 | if (cpu->pre_2_8_migration) { |
6f7a6993 DG |
291 | /* |
292 | * Mask out bits that got added to msr_mask since the versions | |
293 | * which stupidly included it in the migration stream. | |
294 | */ | |
efb7db25 DG |
295 | target_ulong metamask = 0 |
296 | #if defined(TARGET_PPC64) | |
297 | | (1ULL << MSR_TS0) | |
298 | | (1ULL << MSR_TS1) | |
299 | #endif | |
300 | ; | |
301 | cpu->mig_msr_mask = env->msr_mask & ~metamask; | |
146c11f1 | 302 | cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; |
6f7a6993 DG |
303 | /* |
304 | * CPU models supported by old machines all have | |
305 | * PPC_MEM_TLBIE, so we set it unconditionally to allow | |
306 | * backward migration from a POWER9 host to a POWER8 host. | |
bce00964 GK |
307 | */ |
308 | cpu->mig_insns_flags |= PPC_MEM_TLBIE; | |
146c11f1 DG |
309 | cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; |
310 | cpu->mig_nb_BATs = env->nb_BATs; | |
311 | } | |
d8c0c7af | 312 | if (cpu->pre_3_0_migration) { |
67d7d66f DG |
313 | if (cpu->hash64_opts) { |
314 | cpu->mig_slb_nr = cpu->hash64_opts->slb_size; | |
315 | } | |
316 | } | |
44b1ff31 | 317 | |
f7a7b652 RH |
318 | /* Retain migration compatibility for pre 6.0 for 601 machines. */ |
319 | env->hflags_compat_nmsr = (env->flags & POWERPC_FLAG_HID0_LE | |
320 | ? env->hflags & MSR_LE : 0); | |
321 | ||
44b1ff31 | 322 | return 0; |
a90db158 AK |
323 | } |
324 | ||
d5fc133e DG |
325 | /* |
326 | * Determine if a given PVR is a "close enough" match to the CPU | |
327 | * object. For TCG and KVM PR it would probably be sufficient to | |
328 | * require an exact PVR match. However for KVM HV the user is | |
329 | * restricted to a PVR exactly matching the host CPU. The correct way | |
330 | * to handle this is to put the guest into an architected | |
331 | * compatibility mode. However, to allow a more forgiving transition | |
332 | * and migration from before this was widely done, we allow migration | |
333 | * between sufficiently similar PVRs, as determined by the CPU class's | |
334 | * pvr_match() hook. | |
335 | */ | |
336 | static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) | |
337 | { | |
338 | PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); | |
339 | ||
340 | if (pvr == pcc->pvr) { | |
341 | return true; | |
342 | } | |
343 | return pcc->pvr_match(pcc, pvr); | |
344 | } | |
345 | ||
a90db158 AK |
346 | static int cpu_post_load(void *opaque, int version_id) |
347 | { | |
348 | PowerPCCPU *cpu = opaque; | |
349 | CPUPPCState *env = &cpu->env; | |
350 | int i; | |
351 | ||
569be9f0 | 352 | /* |
d5fc133e | 353 | * If we're operating in compat mode, we should be ok as long as |
136fbf65 | 354 | * the destination supports the same compatibility mode. |
d5fc133e DG |
355 | * |
356 | * Otherwise, however, we require that the destination has exactly | |
357 | * the same CPU model as the source. | |
569be9f0 | 358 | */ |
d5fc133e DG |
359 | |
360 | #if defined(TARGET_PPC64) | |
361 | if (cpu->compat_pvr) { | |
e07cc192 | 362 | uint32_t compat_pvr = cpu->compat_pvr; |
d5fc133e | 363 | Error *local_err = NULL; |
899134eb | 364 | int ret; |
d5fc133e | 365 | |
e07cc192 | 366 | cpu->compat_pvr = 0; |
899134eb GK |
367 | ret = ppc_set_compat(cpu, compat_pvr, &local_err); |
368 | if (ret < 0) { | |
d5fc133e | 369 | error_report_err(local_err); |
899134eb | 370 | return ret; |
d5fc133e DG |
371 | } |
372 | } else | |
373 | #endif | |
374 | { | |
375 | if (!pvr_match(cpu, env->spr[SPR_PVR])) { | |
899134eb | 376 | return -EINVAL; |
d5fc133e DG |
377 | } |
378 | } | |
379 | ||
c363a37a DHB |
380 | /* |
381 | * If we're running with KVM HV, there is a chance that the guest | |
382 | * is running with KVM HV and its kernel does not have the | |
383 | * capability of dealing with a different PVR other than this | |
384 | * exact host PVR in KVM_SET_SREGS. If that happens, the | |
385 | * guest freezes after migration. | |
386 | * | |
387 | * The function kvmppc_pvr_workaround_required does this verification | |
388 | * by first checking if the kernel has the cap, returning true immediately | |
389 | * if that is the case. Otherwise, it checks if we're running in KVM PR. | |
390 | * If the guest kernel does not have the cap and we're not running KVM-PR | |
391 | * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will | |
392 | * receive the PVR it expects as a workaround. | |
393 | * | |
394 | */ | |
c363a37a DHB |
395 | if (kvmppc_pvr_workaround_required(cpu)) { |
396 | env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; | |
397 | } | |
c363a37a | 398 | |
a90db158 AK |
399 | env->lr = env->spr[SPR_LR]; |
400 | env->ctr = env->spr[SPR_CTR]; | |
6a9620e6 | 401 | cpu_write_xer(env, env->spr[SPR_XER]); |
a90db158 AK |
402 | #if defined(TARGET_PPC64) |
403 | env->cfar = env->spr[SPR_CFAR]; | |
404 | #endif | |
405 | env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; | |
406 | ||
407 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { | |
6f7a6993 DG |
408 | env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i]; |
409 | env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1]; | |
410 | env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i]; | |
411 | env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1]; | |
a90db158 | 412 | } |
6f7a6993 DG |
413 | for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { |
414 | env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i]; | |
415 | env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1]; | |
416 | env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i]; | |
417 | env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1]; | |
a90db158 AK |
418 | } |
419 | ||
e57ca75c | 420 | if (!cpu->vhyp) { |
f3c75d42 AK |
421 | ppc_store_sdr1(env, env->spr[SPR_SDR1]); |
422 | } | |
2360b6e8 | 423 | |
edece45d | 424 | post_load_update_msr(env); |
2360b6e8 | 425 | |
a90db158 AK |
426 | return 0; |
427 | } | |
428 | ||
429 | static bool fpu_needed(void *opaque) | |
430 | { | |
431 | PowerPCCPU *cpu = opaque; | |
432 | ||
6f7a6993 | 433 | return cpu->env.insns_flags & PPC_FLOAT; |
a90db158 AK |
434 | } |
435 | ||
436 | static const VMStateDescription vmstate_fpu = { | |
437 | .name = "cpu/fpu", | |
438 | .version_id = 1, | |
439 | .minimum_version_id = 1, | |
5cd8cada | 440 | .needed = fpu_needed, |
3aff6c2f | 441 | .fields = (VMStateField[]) { |
ef96e3ae | 442 | VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), |
a90db158 AK |
443 | VMSTATE_UINTTL(env.fpscr, PowerPCCPU), |
444 | VMSTATE_END_OF_LIST() | |
445 | }, | |
446 | }; | |
447 | ||
448 | static bool altivec_needed(void *opaque) | |
449 | { | |
450 | PowerPCCPU *cpu = opaque; | |
451 | ||
6f7a6993 | 452 | return cpu->env.insns_flags & PPC_ALTIVEC; |
a90db158 AK |
453 | } |
454 | ||
596fff20 RH |
455 | static int get_vscr(QEMUFile *f, void *opaque, size_t size, |
456 | const VMStateField *field) | |
457 | { | |
458 | PowerPCCPU *cpu = opaque; | |
459 | helper_mtvscr(&cpu->env, qemu_get_be32(f)); | |
460 | return 0; | |
461 | } | |
462 | ||
463 | static int put_vscr(QEMUFile *f, void *opaque, size_t size, | |
3ddba9a9 | 464 | const VMStateField *field, JSONWriter *vmdesc) |
596fff20 RH |
465 | { |
466 | PowerPCCPU *cpu = opaque; | |
467 | qemu_put_be32(f, helper_mfvscr(&cpu->env)); | |
468 | return 0; | |
469 | } | |
470 | ||
471 | static const VMStateInfo vmstate_vscr = { | |
472 | .name = "cpu/altivec/vscr", | |
473 | .get = get_vscr, | |
474 | .put = put_vscr, | |
475 | }; | |
476 | ||
a90db158 AK |
477 | static const VMStateDescription vmstate_altivec = { |
478 | .name = "cpu/altivec", | |
479 | .version_id = 1, | |
480 | .minimum_version_id = 1, | |
5cd8cada | 481 | .needed = altivec_needed, |
3aff6c2f | 482 | .fields = (VMStateField[]) { |
ef96e3ae | 483 | VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), |
596fff20 RH |
484 | /* |
485 | * Save the architecture value of the vscr, not the internally | |
486 | * expanded version. Since this architecture value does not | |
487 | * exist in memory to be stored, this requires a but of hoop | |
488 | * jumping. We want OFFSET=0 so that we effectively pass CPU | |
489 | * to the helper functions. | |
490 | */ | |
491 | { | |
492 | .name = "vscr", | |
493 | .version_id = 0, | |
494 | .size = sizeof(uint32_t), | |
495 | .info = &vmstate_vscr, | |
496 | .flags = VMS_SINGLE, | |
497 | .offset = 0 | |
498 | }, | |
a90db158 AK |
499 | VMSTATE_END_OF_LIST() |
500 | }, | |
501 | }; | |
502 | ||
503 | static bool vsx_needed(void *opaque) | |
504 | { | |
505 | PowerPCCPU *cpu = opaque; | |
506 | ||
6f7a6993 | 507 | return cpu->env.insns_flags2 & PPC2_VSX; |
a90db158 AK |
508 | } |
509 | ||
510 | static const VMStateDescription vmstate_vsx = { | |
511 | .name = "cpu/vsx", | |
512 | .version_id = 1, | |
513 | .minimum_version_id = 1, | |
5cd8cada | 514 | .needed = vsx_needed, |
3aff6c2f | 515 | .fields = (VMStateField[]) { |
ef96e3ae | 516 | VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), |
a90db158 AK |
517 | VMSTATE_END_OF_LIST() |
518 | }, | |
519 | }; | |
520 | ||
80b3f79b AK |
521 | #ifdef TARGET_PPC64 |
522 | /* Transactional memory state */ | |
523 | static bool tm_needed(void *opaque) | |
524 | { | |
525 | PowerPCCPU *cpu = opaque; | |
526 | CPUPPCState *env = &cpu->env; | |
527 | return msr_ts; | |
528 | } | |
529 | ||
530 | static const VMStateDescription vmstate_tm = { | |
531 | .name = "cpu/tm", | |
532 | .version_id = 1, | |
533 | .minimum_version_id = 1, | |
534 | .minimum_version_id_old = 1, | |
5cd8cada | 535 | .needed = tm_needed, |
80b3f79b AK |
536 | .fields = (VMStateField []) { |
537 | VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), | |
538 | VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), | |
539 | VMSTATE_UINT64(env.tm_cr, PowerPCCPU), | |
540 | VMSTATE_UINT64(env.tm_lr, PowerPCCPU), | |
541 | VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), | |
542 | VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), | |
543 | VMSTATE_UINT64(env.tm_amr, PowerPCCPU), | |
544 | VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), | |
545 | VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), | |
546 | VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), | |
547 | VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), | |
548 | VMSTATE_UINT64(env.tm_tar, PowerPCCPU), | |
549 | VMSTATE_END_OF_LIST() | |
550 | }, | |
551 | }; | |
552 | #endif | |
553 | ||
a90db158 AK |
554 | static bool sr_needed(void *opaque) |
555 | { | |
556 | #ifdef TARGET_PPC64 | |
557 | PowerPCCPU *cpu = opaque; | |
558 | ||
d57d72a8 | 559 | return !mmu_is_64bit(cpu->env.mmu_model); |
a90db158 AK |
560 | #else |
561 | return true; | |
562 | #endif | |
563 | } | |
564 | ||
565 | static const VMStateDescription vmstate_sr = { | |
566 | .name = "cpu/sr", | |
567 | .version_id = 1, | |
568 | .minimum_version_id = 1, | |
5cd8cada | 569 | .needed = sr_needed, |
3aff6c2f | 570 | .fields = (VMStateField[]) { |
a90db158 AK |
571 | VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), |
572 | VMSTATE_END_OF_LIST() | |
573 | }, | |
574 | }; | |
575 | ||
576 | #ifdef TARGET_PPC64 | |
03fee66f MAL |
577 | static int get_slbe(QEMUFile *f, void *pv, size_t size, |
578 | const VMStateField *field) | |
a90db158 AK |
579 | { |
580 | ppc_slb_t *v = pv; | |
581 | ||
582 | v->esid = qemu_get_be64(f); | |
583 | v->vsid = qemu_get_be64(f); | |
584 | ||
585 | return 0; | |
586 | } | |
587 | ||
03fee66f | 588 | static int put_slbe(QEMUFile *f, void *pv, size_t size, |
3ddba9a9 | 589 | const VMStateField *field, JSONWriter *vmdesc) |
a90db158 AK |
590 | { |
591 | ppc_slb_t *v = pv; | |
592 | ||
593 | qemu_put_be64(f, v->esid); | |
594 | qemu_put_be64(f, v->vsid); | |
2c21ee76 | 595 | return 0; |
a90db158 AK |
596 | } |
597 | ||
cfd54a04 | 598 | static const VMStateInfo vmstate_info_slbe = { |
a90db158 AK |
599 | .name = "slbe", |
600 | .get = get_slbe, | |
601 | .put = put_slbe, | |
602 | }; | |
603 | ||
604 | #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ | |
605 | VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) | |
606 | ||
607 | #define VMSTATE_SLB_ARRAY(_f, _s, _n) \ | |
608 | VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) | |
609 | ||
610 | static bool slb_needed(void *opaque) | |
611 | { | |
612 | PowerPCCPU *cpu = opaque; | |
613 | ||
614 | /* We don't support any of the old segment table based 64-bit CPUs */ | |
d57d72a8 | 615 | return mmu_is_64bit(cpu->env.mmu_model); |
a90db158 AK |
616 | } |
617 | ||
cd6a9bb6 DG |
618 | static int slb_post_load(void *opaque, int version_id) |
619 | { | |
620 | PowerPCCPU *cpu = opaque; | |
621 | CPUPPCState *env = &cpu->env; | |
622 | int i; | |
623 | ||
6f7a6993 DG |
624 | /* |
625 | * We've pulled in the raw esid and vsid values from the migration | |
626 | * stream, but we need to recompute the page size pointers | |
627 | */ | |
67d7d66f | 628 | for (i = 0; i < cpu->hash64_opts->slb_size; i++) { |
cd6a9bb6 DG |
629 | if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { |
630 | /* Migration source had bad values in its SLB */ | |
631 | return -1; | |
632 | } | |
633 | } | |
634 | ||
635 | return 0; | |
636 | } | |
637 | ||
a90db158 AK |
638 | static const VMStateDescription vmstate_slb = { |
639 | .name = "cpu/slb", | |
640 | .version_id = 1, | |
641 | .minimum_version_id = 1, | |
5cd8cada | 642 | .needed = slb_needed, |
cd6a9bb6 | 643 | .post_load = slb_post_load, |
3aff6c2f | 644 | .fields = (VMStateField[]) { |
d8c0c7af | 645 | VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), |
d83af167 | 646 | VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), |
a90db158 AK |
647 | VMSTATE_END_OF_LIST() |
648 | } | |
649 | }; | |
650 | #endif /* TARGET_PPC64 */ | |
651 | ||
652 | static const VMStateDescription vmstate_tlb6xx_entry = { | |
653 | .name = "cpu/tlb6xx_entry", | |
654 | .version_id = 1, | |
655 | .minimum_version_id = 1, | |
3aff6c2f | 656 | .fields = (VMStateField[]) { |
a90db158 AK |
657 | VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), |
658 | VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), | |
659 | VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), | |
660 | VMSTATE_END_OF_LIST() | |
661 | }, | |
662 | }; | |
663 | ||
664 | static bool tlb6xx_needed(void *opaque) | |
665 | { | |
666 | PowerPCCPU *cpu = opaque; | |
667 | CPUPPCState *env = &cpu->env; | |
668 | ||
669 | return env->nb_tlb && (env->tlb_type == TLB_6XX); | |
670 | } | |
671 | ||
672 | static const VMStateDescription vmstate_tlb6xx = { | |
673 | .name = "cpu/tlb6xx", | |
674 | .version_id = 1, | |
675 | .minimum_version_id = 1, | |
5cd8cada | 676 | .needed = tlb6xx_needed, |
3aff6c2f | 677 | .fields = (VMStateField[]) { |
d2164ad3 | 678 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
679 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, |
680 | env.nb_tlb, | |
681 | vmstate_tlb6xx_entry, | |
682 | ppc6xx_tlb_t), | |
683 | VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), | |
684 | VMSTATE_END_OF_LIST() | |
685 | } | |
686 | }; | |
687 | ||
688 | static const VMStateDescription vmstate_tlbemb_entry = { | |
689 | .name = "cpu/tlbemb_entry", | |
690 | .version_id = 1, | |
691 | .minimum_version_id = 1, | |
3aff6c2f | 692 | .fields = (VMStateField[]) { |
a90db158 AK |
693 | VMSTATE_UINT64(RPN, ppcemb_tlb_t), |
694 | VMSTATE_UINTTL(EPN, ppcemb_tlb_t), | |
695 | VMSTATE_UINTTL(PID, ppcemb_tlb_t), | |
696 | VMSTATE_UINTTL(size, ppcemb_tlb_t), | |
697 | VMSTATE_UINT32(prot, ppcemb_tlb_t), | |
698 | VMSTATE_UINT32(attr, ppcemb_tlb_t), | |
699 | VMSTATE_END_OF_LIST() | |
700 | }, | |
701 | }; | |
702 | ||
703 | static bool tlbemb_needed(void *opaque) | |
704 | { | |
705 | PowerPCCPU *cpu = opaque; | |
706 | CPUPPCState *env = &cpu->env; | |
707 | ||
708 | return env->nb_tlb && (env->tlb_type == TLB_EMB); | |
709 | } | |
710 | ||
711 | static bool pbr403_needed(void *opaque) | |
712 | { | |
713 | PowerPCCPU *cpu = opaque; | |
714 | uint32_t pvr = cpu->env.spr[SPR_PVR]; | |
715 | ||
716 | return (pvr & 0xffff0000) == 0x00200000; | |
717 | } | |
718 | ||
719 | static const VMStateDescription vmstate_pbr403 = { | |
720 | .name = "cpu/pbr403", | |
721 | .version_id = 1, | |
722 | .minimum_version_id = 1, | |
5cd8cada | 723 | .needed = pbr403_needed, |
3aff6c2f | 724 | .fields = (VMStateField[]) { |
a90db158 AK |
725 | VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), |
726 | VMSTATE_END_OF_LIST() | |
727 | }, | |
728 | }; | |
729 | ||
730 | static const VMStateDescription vmstate_tlbemb = { | |
731 | .name = "cpu/tlb6xx", | |
732 | .version_id = 1, | |
733 | .minimum_version_id = 1, | |
5cd8cada | 734 | .needed = tlbemb_needed, |
3aff6c2f | 735 | .fields = (VMStateField[]) { |
d2164ad3 | 736 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
737 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, |
738 | env.nb_tlb, | |
739 | vmstate_tlbemb_entry, | |
740 | ppcemb_tlb_t), | |
741 | /* 403 protection registers */ | |
742 | VMSTATE_END_OF_LIST() | |
743 | }, | |
5cd8cada JQ |
744 | .subsections = (const VMStateDescription*[]) { |
745 | &vmstate_pbr403, | |
746 | NULL | |
a90db158 AK |
747 | } |
748 | }; | |
749 | ||
750 | static const VMStateDescription vmstate_tlbmas_entry = { | |
751 | .name = "cpu/tlbmas_entry", | |
752 | .version_id = 1, | |
753 | .minimum_version_id = 1, | |
3aff6c2f | 754 | .fields = (VMStateField[]) { |
a90db158 AK |
755 | VMSTATE_UINT32(mas8, ppcmas_tlb_t), |
756 | VMSTATE_UINT32(mas1, ppcmas_tlb_t), | |
757 | VMSTATE_UINT64(mas2, ppcmas_tlb_t), | |
758 | VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), | |
759 | VMSTATE_END_OF_LIST() | |
760 | }, | |
761 | }; | |
762 | ||
763 | static bool tlbmas_needed(void *opaque) | |
764 | { | |
765 | PowerPCCPU *cpu = opaque; | |
766 | CPUPPCState *env = &cpu->env; | |
767 | ||
768 | return env->nb_tlb && (env->tlb_type == TLB_MAS); | |
769 | } | |
770 | ||
771 | static const VMStateDescription vmstate_tlbmas = { | |
772 | .name = "cpu/tlbmas", | |
773 | .version_id = 1, | |
774 | .minimum_version_id = 1, | |
5cd8cada | 775 | .needed = tlbmas_needed, |
3aff6c2f | 776 | .fields = (VMStateField[]) { |
d2164ad3 | 777 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
778 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, |
779 | env.nb_tlb, | |
780 | vmstate_tlbmas_entry, | |
781 | ppcmas_tlb_t), | |
782 | VMSTATE_END_OF_LIST() | |
783 | } | |
784 | }; | |
785 | ||
d5fc133e DG |
786 | static bool compat_needed(void *opaque) |
787 | { | |
788 | PowerPCCPU *cpu = opaque; | |
789 | ||
790 | assert(!(cpu->compat_pvr && !cpu->vhyp)); | |
791 | return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; | |
792 | } | |
793 | ||
794 | static const VMStateDescription vmstate_compat = { | |
795 | .name = "cpu/compat", | |
796 | .version_id = 1, | |
797 | .minimum_version_id = 1, | |
798 | .needed = compat_needed, | |
799 | .fields = (VMStateField[]) { | |
800 | VMSTATE_UINT32(compat_pvr, PowerPCCPU), | |
801 | VMSTATE_END_OF_LIST() | |
802 | } | |
803 | }; | |
804 | ||
a90db158 AK |
805 | const VMStateDescription vmstate_ppc_cpu = { |
806 | .name = "cpu", | |
807 | .version_id = 5, | |
808 | .minimum_version_id = 5, | |
809 | .minimum_version_id_old = 4, | |
810 | .load_state_old = cpu_load_old, | |
811 | .pre_save = cpu_pre_save, | |
812 | .post_load = cpu_post_load, | |
3aff6c2f | 813 | .fields = (VMStateField[]) { |
569be9f0 | 814 | VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ |
a90db158 AK |
815 | |
816 | /* User mode architected state */ | |
817 | VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), | |
818 | #if !defined(TARGET_PPC64) | |
819 | VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), | |
820 | #endif | |
821 | VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), | |
822 | VMSTATE_UINTTL(env.nip, PowerPCCPU), | |
823 | ||
824 | /* SPRs */ | |
825 | VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), | |
826 | VMSTATE_UINT64(env.spe_acc, PowerPCCPU), | |
827 | ||
828 | /* Reservation */ | |
829 | VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), | |
830 | ||
831 | /* Supervisor mode architected state */ | |
832 | VMSTATE_UINTTL(env.msr, PowerPCCPU), | |
833 | ||
f7a7b652 RH |
834 | /* Backward compatible internal state */ |
835 | VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU), | |
a90db158 AK |
836 | |
837 | /* Sanity checking */ | |
146c11f1 DG |
838 | VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), |
839 | VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), | |
840 | VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, | |
841 | cpu_pre_2_8_migration), | |
842 | VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), | |
a90db158 AK |
843 | VMSTATE_END_OF_LIST() |
844 | }, | |
5cd8cada JQ |
845 | .subsections = (const VMStateDescription*[]) { |
846 | &vmstate_fpu, | |
847 | &vmstate_altivec, | |
848 | &vmstate_vsx, | |
849 | &vmstate_sr, | |
a90db158 | 850 | #ifdef TARGET_PPC64 |
5cd8cada JQ |
851 | &vmstate_tm, |
852 | &vmstate_slb, | |
a90db158 | 853 | #endif /* TARGET_PPC64 */ |
5cd8cada JQ |
854 | &vmstate_tlb6xx, |
855 | &vmstate_tlbemb, | |
856 | &vmstate_tlbmas, | |
d5fc133e | 857 | &vmstate_compat, |
5cd8cada | 858 | NULL |
a90db158 AK |
859 | } |
860 | }; |