]>
Commit | Line | Data |
---|---|---|
1 | #include "qemu/osdep.h" | |
2 | #include "cpu.h" | |
3 | #include "exec/exec-all.h" | |
4 | #include "sysemu/kvm.h" | |
5 | #include "helper_regs.h" | |
6 | #include "mmu-hash64.h" | |
7 | #include "migration/cpu.h" | |
8 | #include "qapi/error.h" | |
9 | #include "qemu/main-loop.h" | |
10 | #include "kvm_ppc.h" | |
11 | #include "exec/helper-proto.h" | |
12 | ||
13 | static void post_load_update_msr(CPUPPCState *env) | |
14 | { | |
15 | target_ulong msr = env->msr; | |
16 | ||
17 | /* | |
18 | * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB | |
19 | * before restoring. Note that this recomputes hflags and mem_idx. | |
20 | */ | |
21 | env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); | |
22 | ppc_store_msr(env, msr); | |
23 | } | |
24 | ||
25 | static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) | |
26 | { | |
27 | PowerPCCPU *cpu = opaque; | |
28 | CPUPPCState *env = &cpu->env; | |
29 | unsigned int i, j; | |
30 | target_ulong sdr1; | |
31 | uint32_t fpscr, vscr; | |
32 | #if defined(TARGET_PPC64) | |
33 | int32_t slb_nr; | |
34 | #endif | |
35 | target_ulong xer; | |
36 | ||
37 | for (i = 0; i < 32; i++) { | |
38 | qemu_get_betls(f, &env->gpr[i]); | |
39 | } | |
40 | #if !defined(TARGET_PPC64) | |
41 | for (i = 0; i < 32; i++) { | |
42 | qemu_get_betls(f, &env->gprh[i]); | |
43 | } | |
44 | #endif | |
45 | qemu_get_betls(f, &env->lr); | |
46 | qemu_get_betls(f, &env->ctr); | |
47 | for (i = 0; i < 8; i++) { | |
48 | qemu_get_be32s(f, &env->crf[i]); | |
49 | } | |
50 | qemu_get_betls(f, &xer); | |
51 | cpu_write_xer(env, xer); | |
52 | qemu_get_betls(f, &env->reserve_addr); | |
53 | qemu_get_betls(f, &env->msr); | |
54 | for (i = 0; i < 4; i++) { | |
55 | qemu_get_betls(f, &env->tgpr[i]); | |
56 | } | |
57 | for (i = 0; i < 32; i++) { | |
58 | union { | |
59 | float64 d; | |
60 | uint64_t l; | |
61 | } u; | |
62 | u.l = qemu_get_be64(f); | |
63 | *cpu_fpr_ptr(env, i) = u.d; | |
64 | } | |
65 | qemu_get_be32s(f, &fpscr); | |
66 | env->fpscr = fpscr; | |
67 | qemu_get_sbe32s(f, &env->access_type); | |
68 | #if defined(TARGET_PPC64) | |
69 | qemu_get_betls(f, &env->spr[SPR_ASR]); | |
70 | qemu_get_sbe32s(f, &slb_nr); | |
71 | #endif | |
72 | qemu_get_betls(f, &sdr1); | |
73 | for (i = 0; i < 32; i++) { | |
74 | qemu_get_betls(f, &env->sr[i]); | |
75 | } | |
76 | for (i = 0; i < 2; i++) { | |
77 | for (j = 0; j < 8; j++) { | |
78 | qemu_get_betls(f, &env->DBAT[i][j]); | |
79 | } | |
80 | } | |
81 | for (i = 0; i < 2; i++) { | |
82 | for (j = 0; j < 8; j++) { | |
83 | qemu_get_betls(f, &env->IBAT[i][j]); | |
84 | } | |
85 | } | |
86 | qemu_get_sbe32s(f, &env->nb_tlb); | |
87 | qemu_get_sbe32s(f, &env->tlb_per_way); | |
88 | qemu_get_sbe32s(f, &env->nb_ways); | |
89 | qemu_get_sbe32s(f, &env->last_way); | |
90 | qemu_get_sbe32s(f, &env->id_tlbs); | |
91 | qemu_get_sbe32s(f, &env->nb_pids); | |
92 | if (env->tlb.tlb6) { | |
93 | /* XXX assumes 6xx */ | |
94 | for (i = 0; i < env->nb_tlb; i++) { | |
95 | qemu_get_betls(f, &env->tlb.tlb6[i].pte0); | |
96 | qemu_get_betls(f, &env->tlb.tlb6[i].pte1); | |
97 | qemu_get_betls(f, &env->tlb.tlb6[i].EPN); | |
98 | } | |
99 | } | |
100 | for (i = 0; i < 4; i++) { | |
101 | qemu_get_betls(f, &env->pb[i]); | |
102 | } | |
103 | for (i = 0; i < 1024; i++) { | |
104 | qemu_get_betls(f, &env->spr[i]); | |
105 | } | |
106 | if (!cpu->vhyp) { | |
107 | ppc_store_sdr1(env, sdr1); | |
108 | } | |
109 | qemu_get_be32s(f, &vscr); | |
110 | helper_mtvscr(env, vscr); | |
111 | qemu_get_be64s(f, &env->spe_acc); | |
112 | qemu_get_be32s(f, &env->spe_fscr); | |
113 | qemu_get_betls(f, &env->msr_mask); | |
114 | qemu_get_be32s(f, &env->flags); | |
115 | qemu_get_sbe32s(f, &env->error_code); | |
116 | qemu_get_be32s(f, &env->pending_interrupts); | |
117 | qemu_get_be32s(f, &env->irq_input_state); | |
118 | for (i = 0; i < POWERPC_EXCP_NB; i++) { | |
119 | qemu_get_betls(f, &env->excp_vectors[i]); | |
120 | } | |
121 | qemu_get_betls(f, &env->excp_prefix); | |
122 | qemu_get_betls(f, &env->ivor_mask); | |
123 | qemu_get_betls(f, &env->ivpr_mask); | |
124 | qemu_get_betls(f, &env->hreset_vector); | |
125 | qemu_get_betls(f, &env->nip); | |
126 | qemu_get_sbetl(f); /* Discard unused hflags */ | |
127 | qemu_get_sbetl(f); /* Discard unused hflags_nmsr */ | |
128 | qemu_get_sbe32(f); /* Discard unused mmu_idx */ | |
129 | qemu_get_sbe32(f); /* Discard unused power_mode */ | |
130 | ||
131 | post_load_update_msr(env); | |
132 | ||
133 | return 0; | |
134 | } | |
135 | ||
136 | static int get_avr(QEMUFile *f, void *pv, size_t size, | |
137 | const VMStateField *field) | |
138 | { | |
139 | ppc_avr_t *v = pv; | |
140 | ||
141 | v->u64[0] = qemu_get_be64(f); | |
142 | v->u64[1] = qemu_get_be64(f); | |
143 | ||
144 | return 0; | |
145 | } | |
146 | ||
147 | static int put_avr(QEMUFile *f, void *pv, size_t size, | |
148 | const VMStateField *field, JSONWriter *vmdesc) | |
149 | { | |
150 | ppc_avr_t *v = pv; | |
151 | ||
152 | qemu_put_be64(f, v->u64[0]); | |
153 | qemu_put_be64(f, v->u64[1]); | |
154 | return 0; | |
155 | } | |
156 | ||
157 | static const VMStateInfo vmstate_info_avr = { | |
158 | .name = "avr", | |
159 | .get = get_avr, | |
160 | .put = put_avr, | |
161 | }; | |
162 | ||
163 | #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ | |
164 | VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t) | |
165 | ||
166 | #define VMSTATE_AVR_ARRAY(_f, _s, _n) \ | |
167 | VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) | |
168 | ||
169 | static int get_fpr(QEMUFile *f, void *pv, size_t size, | |
170 | const VMStateField *field) | |
171 | { | |
172 | ppc_vsr_t *v = pv; | |
173 | ||
174 | v->VsrD(0) = qemu_get_be64(f); | |
175 | ||
176 | return 0; | |
177 | } | |
178 | ||
179 | static int put_fpr(QEMUFile *f, void *pv, size_t size, | |
180 | const VMStateField *field, JSONWriter *vmdesc) | |
181 | { | |
182 | ppc_vsr_t *v = pv; | |
183 | ||
184 | qemu_put_be64(f, v->VsrD(0)); | |
185 | return 0; | |
186 | } | |
187 | ||
188 | static const VMStateInfo vmstate_info_fpr = { | |
189 | .name = "fpr", | |
190 | .get = get_fpr, | |
191 | .put = put_fpr, | |
192 | }; | |
193 | ||
194 | #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ | |
195 | VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t) | |
196 | ||
197 | #define VMSTATE_FPR_ARRAY(_f, _s, _n) \ | |
198 | VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) | |
199 | ||
200 | static int get_vsr(QEMUFile *f, void *pv, size_t size, | |
201 | const VMStateField *field) | |
202 | { | |
203 | ppc_vsr_t *v = pv; | |
204 | ||
205 | v->VsrD(1) = qemu_get_be64(f); | |
206 | ||
207 | return 0; | |
208 | } | |
209 | ||
210 | static int put_vsr(QEMUFile *f, void *pv, size_t size, | |
211 | const VMStateField *field, JSONWriter *vmdesc) | |
212 | { | |
213 | ppc_vsr_t *v = pv; | |
214 | ||
215 | qemu_put_be64(f, v->VsrD(1)); | |
216 | return 0; | |
217 | } | |
218 | ||
219 | static const VMStateInfo vmstate_info_vsr = { | |
220 | .name = "vsr", | |
221 | .get = get_vsr, | |
222 | .put = put_vsr, | |
223 | }; | |
224 | ||
225 | #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \ | |
226 | VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t) | |
227 | ||
228 | #define VMSTATE_VSR_ARRAY(_f, _s, _n) \ | |
229 | VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) | |
230 | ||
231 | static bool cpu_pre_2_8_migration(void *opaque, int version_id) | |
232 | { | |
233 | PowerPCCPU *cpu = opaque; | |
234 | ||
235 | return cpu->pre_2_8_migration; | |
236 | } | |
237 | ||
238 | #if defined(TARGET_PPC64) | |
239 | static bool cpu_pre_3_0_migration(void *opaque, int version_id) | |
240 | { | |
241 | PowerPCCPU *cpu = opaque; | |
242 | ||
243 | return cpu->pre_3_0_migration; | |
244 | } | |
245 | #endif | |
246 | ||
247 | static int cpu_pre_save(void *opaque) | |
248 | { | |
249 | PowerPCCPU *cpu = opaque; | |
250 | CPUPPCState *env = &cpu->env; | |
251 | int i; | |
252 | uint64_t insns_compat_mask = | |
253 | PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | |
254 | | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | |
255 | | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | |
256 | | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | |
257 | | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | |
258 | | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | |
259 | | PPC_64B | PPC_64BX | PPC_ALTIVEC | |
260 | | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; | |
261 | uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | |
262 | | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | |
263 | | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | |
264 | | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | |
265 | | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | |
266 | | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; | |
267 | ||
268 | env->spr[SPR_LR] = env->lr; | |
269 | env->spr[SPR_CTR] = env->ctr; | |
270 | env->spr[SPR_XER] = cpu_read_xer(env); | |
271 | #if defined(TARGET_PPC64) | |
272 | env->spr[SPR_CFAR] = env->cfar; | |
273 | #endif | |
274 | env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; | |
275 | ||
276 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { | |
277 | env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i]; | |
278 | env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i]; | |
279 | env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i]; | |
280 | env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i]; | |
281 | } | |
282 | for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { | |
283 | env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4]; | |
284 | env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4]; | |
285 | env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4]; | |
286 | env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; | |
287 | } | |
288 | ||
289 | /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ | |
290 | if (cpu->pre_2_8_migration) { | |
291 | /* | |
292 | * Mask out bits that got added to msr_mask since the versions | |
293 | * which stupidly included it in the migration stream. | |
294 | */ | |
295 | target_ulong metamask = 0 | |
296 | #if defined(TARGET_PPC64) | |
297 | | (1ULL << MSR_TS0) | |
298 | | (1ULL << MSR_TS1) | |
299 | #endif | |
300 | ; | |
301 | cpu->mig_msr_mask = env->msr_mask & ~metamask; | |
302 | cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; | |
303 | /* | |
304 | * CPU models supported by old machines all have | |
305 | * PPC_MEM_TLBIE, so we set it unconditionally to allow | |
306 | * backward migration from a POWER9 host to a POWER8 host. | |
307 | */ | |
308 | cpu->mig_insns_flags |= PPC_MEM_TLBIE; | |
309 | cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; | |
310 | cpu->mig_nb_BATs = env->nb_BATs; | |
311 | } | |
312 | if (cpu->pre_3_0_migration) { | |
313 | if (cpu->hash64_opts) { | |
314 | cpu->mig_slb_nr = cpu->hash64_opts->slb_size; | |
315 | } | |
316 | } | |
317 | ||
318 | /* Retain migration compatibility for pre 6.0 for 601 machines. */ | |
319 | env->hflags_compat_nmsr = (env->flags & POWERPC_FLAG_HID0_LE | |
320 | ? env->hflags & MSR_LE : 0); | |
321 | ||
322 | return 0; | |
323 | } | |
324 | ||
325 | /* | |
326 | * Determine if a given PVR is a "close enough" match to the CPU | |
327 | * object. For TCG and KVM PR it would probably be sufficient to | |
328 | * require an exact PVR match. However for KVM HV the user is | |
329 | * restricted to a PVR exactly matching the host CPU. The correct way | |
330 | * to handle this is to put the guest into an architected | |
331 | * compatibility mode. However, to allow a more forgiving transition | |
332 | * and migration from before this was widely done, we allow migration | |
333 | * between sufficiently similar PVRs, as determined by the CPU class's | |
334 | * pvr_match() hook. | |
335 | */ | |
336 | static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) | |
337 | { | |
338 | PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); | |
339 | ||
340 | if (pvr == pcc->pvr) { | |
341 | return true; | |
342 | } | |
343 | return pcc->pvr_match(pcc, pvr); | |
344 | } | |
345 | ||
346 | static int cpu_post_load(void *opaque, int version_id) | |
347 | { | |
348 | PowerPCCPU *cpu = opaque; | |
349 | CPUPPCState *env = &cpu->env; | |
350 | int i; | |
351 | ||
352 | /* | |
353 | * If we're operating in compat mode, we should be ok as long as | |
354 | * the destination supports the same compatibility mode. | |
355 | * | |
356 | * Otherwise, however, we require that the destination has exactly | |
357 | * the same CPU model as the source. | |
358 | */ | |
359 | ||
360 | #if defined(TARGET_PPC64) | |
361 | if (cpu->compat_pvr) { | |
362 | uint32_t compat_pvr = cpu->compat_pvr; | |
363 | Error *local_err = NULL; | |
364 | int ret; | |
365 | ||
366 | cpu->compat_pvr = 0; | |
367 | ret = ppc_set_compat(cpu, compat_pvr, &local_err); | |
368 | if (ret < 0) { | |
369 | error_report_err(local_err); | |
370 | return ret; | |
371 | } | |
372 | } else | |
373 | #endif | |
374 | { | |
375 | if (!pvr_match(cpu, env->spr[SPR_PVR])) { | |
376 | return -EINVAL; | |
377 | } | |
378 | } | |
379 | ||
380 | /* | |
381 | * If we're running with KVM HV, there is a chance that the guest | |
382 | * is running with KVM HV and its kernel does not have the | |
383 | * capability of dealing with a different PVR other than this | |
384 | * exact host PVR in KVM_SET_SREGS. If that happens, the | |
385 | * guest freezes after migration. | |
386 | * | |
387 | * The function kvmppc_pvr_workaround_required does this verification | |
388 | * by first checking if the kernel has the cap, returning true immediately | |
389 | * if that is the case. Otherwise, it checks if we're running in KVM PR. | |
390 | * If the guest kernel does not have the cap and we're not running KVM-PR | |
391 | * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will | |
392 | * receive the PVR it expects as a workaround. | |
393 | * | |
394 | */ | |
395 | if (kvmppc_pvr_workaround_required(cpu)) { | |
396 | env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; | |
397 | } | |
398 | ||
399 | env->lr = env->spr[SPR_LR]; | |
400 | env->ctr = env->spr[SPR_CTR]; | |
401 | cpu_write_xer(env, env->spr[SPR_XER]); | |
402 | #if defined(TARGET_PPC64) | |
403 | env->cfar = env->spr[SPR_CFAR]; | |
404 | #endif | |
405 | env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; | |
406 | ||
407 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { | |
408 | env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i]; | |
409 | env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1]; | |
410 | env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i]; | |
411 | env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1]; | |
412 | } | |
413 | for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { | |
414 | env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i]; | |
415 | env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1]; | |
416 | env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i]; | |
417 | env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1]; | |
418 | } | |
419 | ||
420 | if (!cpu->vhyp) { | |
421 | ppc_store_sdr1(env, env->spr[SPR_SDR1]); | |
422 | } | |
423 | ||
424 | post_load_update_msr(env); | |
425 | ||
426 | return 0; | |
427 | } | |
428 | ||
429 | static bool fpu_needed(void *opaque) | |
430 | { | |
431 | PowerPCCPU *cpu = opaque; | |
432 | ||
433 | return cpu->env.insns_flags & PPC_FLOAT; | |
434 | } | |
435 | ||
436 | static const VMStateDescription vmstate_fpu = { | |
437 | .name = "cpu/fpu", | |
438 | .version_id = 1, | |
439 | .minimum_version_id = 1, | |
440 | .needed = fpu_needed, | |
441 | .fields = (VMStateField[]) { | |
442 | VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), | |
443 | VMSTATE_UINTTL(env.fpscr, PowerPCCPU), | |
444 | VMSTATE_END_OF_LIST() | |
445 | }, | |
446 | }; | |
447 | ||
448 | static bool altivec_needed(void *opaque) | |
449 | { | |
450 | PowerPCCPU *cpu = opaque; | |
451 | ||
452 | return cpu->env.insns_flags & PPC_ALTIVEC; | |
453 | } | |
454 | ||
455 | static int get_vscr(QEMUFile *f, void *opaque, size_t size, | |
456 | const VMStateField *field) | |
457 | { | |
458 | PowerPCCPU *cpu = opaque; | |
459 | helper_mtvscr(&cpu->env, qemu_get_be32(f)); | |
460 | return 0; | |
461 | } | |
462 | ||
463 | static int put_vscr(QEMUFile *f, void *opaque, size_t size, | |
464 | const VMStateField *field, JSONWriter *vmdesc) | |
465 | { | |
466 | PowerPCCPU *cpu = opaque; | |
467 | qemu_put_be32(f, helper_mfvscr(&cpu->env)); | |
468 | return 0; | |
469 | } | |
470 | ||
471 | static const VMStateInfo vmstate_vscr = { | |
472 | .name = "cpu/altivec/vscr", | |
473 | .get = get_vscr, | |
474 | .put = put_vscr, | |
475 | }; | |
476 | ||
477 | static const VMStateDescription vmstate_altivec = { | |
478 | .name = "cpu/altivec", | |
479 | .version_id = 1, | |
480 | .minimum_version_id = 1, | |
481 | .needed = altivec_needed, | |
482 | .fields = (VMStateField[]) { | |
483 | VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), | |
484 | /* | |
485 | * Save the architecture value of the vscr, not the internally | |
486 | * expanded version. Since this architecture value does not | |
487 | * exist in memory to be stored, this requires a but of hoop | |
488 | * jumping. We want OFFSET=0 so that we effectively pass CPU | |
489 | * to the helper functions. | |
490 | */ | |
491 | { | |
492 | .name = "vscr", | |
493 | .version_id = 0, | |
494 | .size = sizeof(uint32_t), | |
495 | .info = &vmstate_vscr, | |
496 | .flags = VMS_SINGLE, | |
497 | .offset = 0 | |
498 | }, | |
499 | VMSTATE_END_OF_LIST() | |
500 | }, | |
501 | }; | |
502 | ||
503 | static bool vsx_needed(void *opaque) | |
504 | { | |
505 | PowerPCCPU *cpu = opaque; | |
506 | ||
507 | return cpu->env.insns_flags2 & PPC2_VSX; | |
508 | } | |
509 | ||
510 | static const VMStateDescription vmstate_vsx = { | |
511 | .name = "cpu/vsx", | |
512 | .version_id = 1, | |
513 | .minimum_version_id = 1, | |
514 | .needed = vsx_needed, | |
515 | .fields = (VMStateField[]) { | |
516 | VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), | |
517 | VMSTATE_END_OF_LIST() | |
518 | }, | |
519 | }; | |
520 | ||
521 | #ifdef TARGET_PPC64 | |
522 | /* Transactional memory state */ | |
523 | static bool tm_needed(void *opaque) | |
524 | { | |
525 | PowerPCCPU *cpu = opaque; | |
526 | CPUPPCState *env = &cpu->env; | |
527 | return msr_ts; | |
528 | } | |
529 | ||
530 | static const VMStateDescription vmstate_tm = { | |
531 | .name = "cpu/tm", | |
532 | .version_id = 1, | |
533 | .minimum_version_id = 1, | |
534 | .minimum_version_id_old = 1, | |
535 | .needed = tm_needed, | |
536 | .fields = (VMStateField []) { | |
537 | VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), | |
538 | VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), | |
539 | VMSTATE_UINT64(env.tm_cr, PowerPCCPU), | |
540 | VMSTATE_UINT64(env.tm_lr, PowerPCCPU), | |
541 | VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), | |
542 | VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), | |
543 | VMSTATE_UINT64(env.tm_amr, PowerPCCPU), | |
544 | VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), | |
545 | VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), | |
546 | VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), | |
547 | VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), | |
548 | VMSTATE_UINT64(env.tm_tar, PowerPCCPU), | |
549 | VMSTATE_END_OF_LIST() | |
550 | }, | |
551 | }; | |
552 | #endif | |
553 | ||
554 | static bool sr_needed(void *opaque) | |
555 | { | |
556 | #ifdef TARGET_PPC64 | |
557 | PowerPCCPU *cpu = opaque; | |
558 | ||
559 | return !mmu_is_64bit(cpu->env.mmu_model); | |
560 | #else | |
561 | return true; | |
562 | #endif | |
563 | } | |
564 | ||
565 | static const VMStateDescription vmstate_sr = { | |
566 | .name = "cpu/sr", | |
567 | .version_id = 1, | |
568 | .minimum_version_id = 1, | |
569 | .needed = sr_needed, | |
570 | .fields = (VMStateField[]) { | |
571 | VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), | |
572 | VMSTATE_END_OF_LIST() | |
573 | }, | |
574 | }; | |
575 | ||
576 | #ifdef TARGET_PPC64 | |
577 | static int get_slbe(QEMUFile *f, void *pv, size_t size, | |
578 | const VMStateField *field) | |
579 | { | |
580 | ppc_slb_t *v = pv; | |
581 | ||
582 | v->esid = qemu_get_be64(f); | |
583 | v->vsid = qemu_get_be64(f); | |
584 | ||
585 | return 0; | |
586 | } | |
587 | ||
588 | static int put_slbe(QEMUFile *f, void *pv, size_t size, | |
589 | const VMStateField *field, JSONWriter *vmdesc) | |
590 | { | |
591 | ppc_slb_t *v = pv; | |
592 | ||
593 | qemu_put_be64(f, v->esid); | |
594 | qemu_put_be64(f, v->vsid); | |
595 | return 0; | |
596 | } | |
597 | ||
598 | static const VMStateInfo vmstate_info_slbe = { | |
599 | .name = "slbe", | |
600 | .get = get_slbe, | |
601 | .put = put_slbe, | |
602 | }; | |
603 | ||
604 | #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ | |
605 | VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) | |
606 | ||
607 | #define VMSTATE_SLB_ARRAY(_f, _s, _n) \ | |
608 | VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) | |
609 | ||
610 | static bool slb_needed(void *opaque) | |
611 | { | |
612 | PowerPCCPU *cpu = opaque; | |
613 | ||
614 | /* We don't support any of the old segment table based 64-bit CPUs */ | |
615 | return mmu_is_64bit(cpu->env.mmu_model); | |
616 | } | |
617 | ||
618 | static int slb_post_load(void *opaque, int version_id) | |
619 | { | |
620 | PowerPCCPU *cpu = opaque; | |
621 | CPUPPCState *env = &cpu->env; | |
622 | int i; | |
623 | ||
624 | /* | |
625 | * We've pulled in the raw esid and vsid values from the migration | |
626 | * stream, but we need to recompute the page size pointers | |
627 | */ | |
628 | for (i = 0; i < cpu->hash64_opts->slb_size; i++) { | |
629 | if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { | |
630 | /* Migration source had bad values in its SLB */ | |
631 | return -1; | |
632 | } | |
633 | } | |
634 | ||
635 | return 0; | |
636 | } | |
637 | ||
638 | static const VMStateDescription vmstate_slb = { | |
639 | .name = "cpu/slb", | |
640 | .version_id = 1, | |
641 | .minimum_version_id = 1, | |
642 | .needed = slb_needed, | |
643 | .post_load = slb_post_load, | |
644 | .fields = (VMStateField[]) { | |
645 | VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), | |
646 | VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), | |
647 | VMSTATE_END_OF_LIST() | |
648 | } | |
649 | }; | |
650 | #endif /* TARGET_PPC64 */ | |
651 | ||
652 | static const VMStateDescription vmstate_tlb6xx_entry = { | |
653 | .name = "cpu/tlb6xx_entry", | |
654 | .version_id = 1, | |
655 | .minimum_version_id = 1, | |
656 | .fields = (VMStateField[]) { | |
657 | VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), | |
658 | VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), | |
659 | VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), | |
660 | VMSTATE_END_OF_LIST() | |
661 | }, | |
662 | }; | |
663 | ||
664 | static bool tlb6xx_needed(void *opaque) | |
665 | { | |
666 | PowerPCCPU *cpu = opaque; | |
667 | CPUPPCState *env = &cpu->env; | |
668 | ||
669 | return env->nb_tlb && (env->tlb_type == TLB_6XX); | |
670 | } | |
671 | ||
672 | static const VMStateDescription vmstate_tlb6xx = { | |
673 | .name = "cpu/tlb6xx", | |
674 | .version_id = 1, | |
675 | .minimum_version_id = 1, | |
676 | .needed = tlb6xx_needed, | |
677 | .fields = (VMStateField[]) { | |
678 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), | |
679 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, | |
680 | env.nb_tlb, | |
681 | vmstate_tlb6xx_entry, | |
682 | ppc6xx_tlb_t), | |
683 | VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), | |
684 | VMSTATE_END_OF_LIST() | |
685 | } | |
686 | }; | |
687 | ||
688 | static const VMStateDescription vmstate_tlbemb_entry = { | |
689 | .name = "cpu/tlbemb_entry", | |
690 | .version_id = 1, | |
691 | .minimum_version_id = 1, | |
692 | .fields = (VMStateField[]) { | |
693 | VMSTATE_UINT64(RPN, ppcemb_tlb_t), | |
694 | VMSTATE_UINTTL(EPN, ppcemb_tlb_t), | |
695 | VMSTATE_UINTTL(PID, ppcemb_tlb_t), | |
696 | VMSTATE_UINTTL(size, ppcemb_tlb_t), | |
697 | VMSTATE_UINT32(prot, ppcemb_tlb_t), | |
698 | VMSTATE_UINT32(attr, ppcemb_tlb_t), | |
699 | VMSTATE_END_OF_LIST() | |
700 | }, | |
701 | }; | |
702 | ||
703 | static bool tlbemb_needed(void *opaque) | |
704 | { | |
705 | PowerPCCPU *cpu = opaque; | |
706 | CPUPPCState *env = &cpu->env; | |
707 | ||
708 | return env->nb_tlb && (env->tlb_type == TLB_EMB); | |
709 | } | |
710 | ||
711 | static bool pbr403_needed(void *opaque) | |
712 | { | |
713 | PowerPCCPU *cpu = opaque; | |
714 | uint32_t pvr = cpu->env.spr[SPR_PVR]; | |
715 | ||
716 | return (pvr & 0xffff0000) == 0x00200000; | |
717 | } | |
718 | ||
719 | static const VMStateDescription vmstate_pbr403 = { | |
720 | .name = "cpu/pbr403", | |
721 | .version_id = 1, | |
722 | .minimum_version_id = 1, | |
723 | .needed = pbr403_needed, | |
724 | .fields = (VMStateField[]) { | |
725 | VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), | |
726 | VMSTATE_END_OF_LIST() | |
727 | }, | |
728 | }; | |
729 | ||
730 | static const VMStateDescription vmstate_tlbemb = { | |
731 | .name = "cpu/tlb6xx", | |
732 | .version_id = 1, | |
733 | .minimum_version_id = 1, | |
734 | .needed = tlbemb_needed, | |
735 | .fields = (VMStateField[]) { | |
736 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), | |
737 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, | |
738 | env.nb_tlb, | |
739 | vmstate_tlbemb_entry, | |
740 | ppcemb_tlb_t), | |
741 | /* 403 protection registers */ | |
742 | VMSTATE_END_OF_LIST() | |
743 | }, | |
744 | .subsections = (const VMStateDescription*[]) { | |
745 | &vmstate_pbr403, | |
746 | NULL | |
747 | } | |
748 | }; | |
749 | ||
750 | static const VMStateDescription vmstate_tlbmas_entry = { | |
751 | .name = "cpu/tlbmas_entry", | |
752 | .version_id = 1, | |
753 | .minimum_version_id = 1, | |
754 | .fields = (VMStateField[]) { | |
755 | VMSTATE_UINT32(mas8, ppcmas_tlb_t), | |
756 | VMSTATE_UINT32(mas1, ppcmas_tlb_t), | |
757 | VMSTATE_UINT64(mas2, ppcmas_tlb_t), | |
758 | VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), | |
759 | VMSTATE_END_OF_LIST() | |
760 | }, | |
761 | }; | |
762 | ||
763 | static bool tlbmas_needed(void *opaque) | |
764 | { | |
765 | PowerPCCPU *cpu = opaque; | |
766 | CPUPPCState *env = &cpu->env; | |
767 | ||
768 | return env->nb_tlb && (env->tlb_type == TLB_MAS); | |
769 | } | |
770 | ||
771 | static const VMStateDescription vmstate_tlbmas = { | |
772 | .name = "cpu/tlbmas", | |
773 | .version_id = 1, | |
774 | .minimum_version_id = 1, | |
775 | .needed = tlbmas_needed, | |
776 | .fields = (VMStateField[]) { | |
777 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), | |
778 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, | |
779 | env.nb_tlb, | |
780 | vmstate_tlbmas_entry, | |
781 | ppcmas_tlb_t), | |
782 | VMSTATE_END_OF_LIST() | |
783 | } | |
784 | }; | |
785 | ||
786 | static bool compat_needed(void *opaque) | |
787 | { | |
788 | PowerPCCPU *cpu = opaque; | |
789 | ||
790 | assert(!(cpu->compat_pvr && !cpu->vhyp)); | |
791 | return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; | |
792 | } | |
793 | ||
794 | static const VMStateDescription vmstate_compat = { | |
795 | .name = "cpu/compat", | |
796 | .version_id = 1, | |
797 | .minimum_version_id = 1, | |
798 | .needed = compat_needed, | |
799 | .fields = (VMStateField[]) { | |
800 | VMSTATE_UINT32(compat_pvr, PowerPCCPU), | |
801 | VMSTATE_END_OF_LIST() | |
802 | } | |
803 | }; | |
804 | ||
805 | const VMStateDescription vmstate_ppc_cpu = { | |
806 | .name = "cpu", | |
807 | .version_id = 5, | |
808 | .minimum_version_id = 5, | |
809 | .minimum_version_id_old = 4, | |
810 | .load_state_old = cpu_load_old, | |
811 | .pre_save = cpu_pre_save, | |
812 | .post_load = cpu_post_load, | |
813 | .fields = (VMStateField[]) { | |
814 | VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ | |
815 | ||
816 | /* User mode architected state */ | |
817 | VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), | |
818 | #if !defined(TARGET_PPC64) | |
819 | VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), | |
820 | #endif | |
821 | VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), | |
822 | VMSTATE_UINTTL(env.nip, PowerPCCPU), | |
823 | ||
824 | /* SPRs */ | |
825 | VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), | |
826 | VMSTATE_UINT64(env.spe_acc, PowerPCCPU), | |
827 | ||
828 | /* Reservation */ | |
829 | VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), | |
830 | ||
831 | /* Supervisor mode architected state */ | |
832 | VMSTATE_UINTTL(env.msr, PowerPCCPU), | |
833 | ||
834 | /* Backward compatible internal state */ | |
835 | VMSTATE_UINTTL(env.hflags_compat_nmsr, PowerPCCPU), | |
836 | ||
837 | /* Sanity checking */ | |
838 | VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), | |
839 | VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), | |
840 | VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, | |
841 | cpu_pre_2_8_migration), | |
842 | VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), | |
843 | VMSTATE_END_OF_LIST() | |
844 | }, | |
845 | .subsections = (const VMStateDescription*[]) { | |
846 | &vmstate_fpu, | |
847 | &vmstate_altivec, | |
848 | &vmstate_vsx, | |
849 | &vmstate_sr, | |
850 | #ifdef TARGET_PPC64 | |
851 | &vmstate_tm, | |
852 | &vmstate_slb, | |
853 | #endif /* TARGET_PPC64 */ | |
854 | &vmstate_tlb6xx, | |
855 | &vmstate_tlbemb, | |
856 | &vmstate_tlbmas, | |
857 | &vmstate_compat, | |
858 | NULL | |
859 | } | |
860 | }; |