]>
Commit | Line | Data |
---|---|---|
0d75590d | 1 | #include "qemu/osdep.h" |
33c11879 | 2 | #include "cpu.h" |
63c91552 | 3 | #include "exec/exec-all.h" |
8dd3dca3 | 4 | #include "hw/boards.h" |
9c17d615 | 5 | #include "sysemu/kvm.h" |
a90db158 | 6 | #include "helper_regs.h" |
cd6a9bb6 | 7 | #include "mmu-hash64.h" |
1e00b8d5 | 8 | #include "migration/cpu.h" |
d5fc133e | 9 | #include "qapi/error.h" |
db725815 | 10 | #include "qemu/main-loop.h" |
c363a37a | 11 | #include "kvm_ppc.h" |
596fff20 | 12 | #include "exec/helper-proto.h" |
8dd3dca3 | 13 | |
a90db158 | 14 | static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) |
8dd3dca3 | 15 | { |
a90db158 AK |
16 | PowerPCCPU *cpu = opaque; |
17 | CPUPPCState *env = &cpu->env; | |
a456d59c | 18 | unsigned int i, j; |
bb593904 | 19 | target_ulong sdr1; |
596fff20 | 20 | uint32_t fpscr, vscr; |
67d7d66f DG |
21 | #if defined(TARGET_PPC64) |
22 | int32_t slb_nr; | |
23 | #endif | |
da91a00f | 24 | target_ulong xer; |
a456d59c | 25 | |
6f7a6993 | 26 | for (i = 0; i < 32; i++) { |
a456d59c | 27 | qemu_get_betls(f, &env->gpr[i]); |
6f7a6993 | 28 | } |
a456d59c | 29 | #if !defined(TARGET_PPC64) |
6f7a6993 | 30 | for (i = 0; i < 32; i++) { |
a456d59c | 31 | qemu_get_betls(f, &env->gprh[i]); |
6f7a6993 | 32 | } |
a456d59c BS |
33 | #endif |
34 | qemu_get_betls(f, &env->lr); | |
35 | qemu_get_betls(f, &env->ctr); | |
6f7a6993 | 36 | for (i = 0; i < 8; i++) { |
a456d59c | 37 | qemu_get_be32s(f, &env->crf[i]); |
6f7a6993 | 38 | } |
da91a00f RH |
39 | qemu_get_betls(f, &xer); |
40 | cpu_write_xer(env, xer); | |
18b21a2f | 41 | qemu_get_betls(f, &env->reserve_addr); |
a456d59c | 42 | qemu_get_betls(f, &env->msr); |
6f7a6993 | 43 | for (i = 0; i < 4; i++) { |
a456d59c | 44 | qemu_get_betls(f, &env->tgpr[i]); |
6f7a6993 | 45 | } |
a456d59c BS |
46 | for (i = 0; i < 32; i++) { |
47 | union { | |
48 | float64 d; | |
49 | uint64_t l; | |
50 | } u; | |
51 | u.l = qemu_get_be64(f); | |
ef96e3ae | 52 | *cpu_fpr_ptr(env, i) = u.d; |
a456d59c | 53 | } |
30304420 DG |
54 | qemu_get_be32s(f, &fpscr); |
55 | env->fpscr = fpscr; | |
a456d59c | 56 | qemu_get_sbe32s(f, &env->access_type); |
a456d59c | 57 | #if defined(TARGET_PPC64) |
9baea4a3 | 58 | qemu_get_betls(f, &env->spr[SPR_ASR]); |
67d7d66f | 59 | qemu_get_sbe32s(f, &slb_nr); |
a456d59c | 60 | #endif |
bb593904 | 61 | qemu_get_betls(f, &sdr1); |
6f7a6993 | 62 | for (i = 0; i < 32; i++) { |
a456d59c | 63 | qemu_get_betls(f, &env->sr[i]); |
6f7a6993 DG |
64 | } |
65 | for (i = 0; i < 2; i++) { | |
66 | for (j = 0; j < 8; j++) { | |
a456d59c | 67 | qemu_get_betls(f, &env->DBAT[i][j]); |
6f7a6993 DG |
68 | } |
69 | } | |
70 | for (i = 0; i < 2; i++) { | |
71 | for (j = 0; j < 8; j++) { | |
a456d59c | 72 | qemu_get_betls(f, &env->IBAT[i][j]); |
6f7a6993 DG |
73 | } |
74 | } | |
a456d59c BS |
75 | qemu_get_sbe32s(f, &env->nb_tlb); |
76 | qemu_get_sbe32s(f, &env->tlb_per_way); | |
77 | qemu_get_sbe32s(f, &env->nb_ways); | |
78 | qemu_get_sbe32s(f, &env->last_way); | |
79 | qemu_get_sbe32s(f, &env->id_tlbs); | |
80 | qemu_get_sbe32s(f, &env->nb_pids); | |
1c53accc | 81 | if (env->tlb.tlb6) { |
6f7a6993 | 82 | /* XXX assumes 6xx */ |
a456d59c | 83 | for (i = 0; i < env->nb_tlb; i++) { |
1c53accc AG |
84 | qemu_get_betls(f, &env->tlb.tlb6[i].pte0); |
85 | qemu_get_betls(f, &env->tlb.tlb6[i].pte1); | |
86 | qemu_get_betls(f, &env->tlb.tlb6[i].EPN); | |
a456d59c BS |
87 | } |
88 | } | |
6f7a6993 | 89 | for (i = 0; i < 4; i++) { |
a456d59c | 90 | qemu_get_betls(f, &env->pb[i]); |
6f7a6993 DG |
91 | } |
92 | for (i = 0; i < 1024; i++) { | |
a456d59c | 93 | qemu_get_betls(f, &env->spr[i]); |
6f7a6993 | 94 | } |
e57ca75c | 95 | if (!cpu->vhyp) { |
f3c75d42 AK |
96 | ppc_store_sdr1(env, sdr1); |
97 | } | |
596fff20 RH |
98 | qemu_get_be32s(f, &vscr); |
99 | helper_mtvscr(env, vscr); | |
a456d59c BS |
100 | qemu_get_be64s(f, &env->spe_acc); |
101 | qemu_get_be32s(f, &env->spe_fscr); | |
102 | qemu_get_betls(f, &env->msr_mask); | |
103 | qemu_get_be32s(f, &env->flags); | |
104 | qemu_get_sbe32s(f, &env->error_code); | |
105 | qemu_get_be32s(f, &env->pending_interrupts); | |
a456d59c | 106 | qemu_get_be32s(f, &env->irq_input_state); |
6f7a6993 | 107 | for (i = 0; i < POWERPC_EXCP_NB; i++) { |
a456d59c | 108 | qemu_get_betls(f, &env->excp_vectors[i]); |
6f7a6993 | 109 | } |
a456d59c BS |
110 | qemu_get_betls(f, &env->excp_prefix); |
111 | qemu_get_betls(f, &env->ivor_mask); | |
112 | qemu_get_betls(f, &env->ivpr_mask); | |
113 | qemu_get_betls(f, &env->hreset_vector); | |
a456d59c BS |
114 | qemu_get_betls(f, &env->nip); |
115 | qemu_get_betls(f, &env->hflags); | |
116 | qemu_get_betls(f, &env->hflags_nmsr); | |
9fb04491 | 117 | qemu_get_sbe32(f); /* Discard unused mmu_idx */ |
011aba24 | 118 | qemu_get_sbe32(f); /* Discard unused power_mode */ |
a456d59c | 119 | |
9fb04491 BH |
120 | /* Recompute mmu indices */ |
121 | hreg_compute_mem_idx(env); | |
122 | ||
8dd3dca3 AJ |
123 | return 0; |
124 | } | |
a90db158 | 125 | |
03fee66f MAL |
126 | static int get_avr(QEMUFile *f, void *pv, size_t size, |
127 | const VMStateField *field) | |
a90db158 AK |
128 | { |
129 | ppc_avr_t *v = pv; | |
130 | ||
131 | v->u64[0] = qemu_get_be64(f); | |
132 | v->u64[1] = qemu_get_be64(f); | |
133 | ||
134 | return 0; | |
135 | } | |
136 | ||
03fee66f MAL |
137 | static int put_avr(QEMUFile *f, void *pv, size_t size, |
138 | const VMStateField *field, QJSON *vmdesc) | |
a90db158 AK |
139 | { |
140 | ppc_avr_t *v = pv; | |
141 | ||
142 | qemu_put_be64(f, v->u64[0]); | |
143 | qemu_put_be64(f, v->u64[1]); | |
2c21ee76 | 144 | return 0; |
a90db158 AK |
145 | } |
146 | ||
cfd54a04 | 147 | static const VMStateInfo vmstate_info_avr = { |
a90db158 AK |
148 | .name = "avr", |
149 | .get = get_avr, | |
150 | .put = put_avr, | |
151 | }; | |
152 | ||
153 | #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ | |
ef96e3ae | 154 | VMSTATE_SUB_ARRAY(_f, _s, 32, _n, _v, vmstate_info_avr, ppc_avr_t) |
a90db158 AK |
155 | |
156 | #define VMSTATE_AVR_ARRAY(_f, _s, _n) \ | |
157 | VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) | |
158 | ||
ef96e3ae MCA |
159 | static int get_fpr(QEMUFile *f, void *pv, size_t size, |
160 | const VMStateField *field) | |
161 | { | |
162 | ppc_vsr_t *v = pv; | |
163 | ||
8a14d31b | 164 | v->VsrD(0) = qemu_get_be64(f); |
ef96e3ae MCA |
165 | |
166 | return 0; | |
167 | } | |
168 | ||
169 | static int put_fpr(QEMUFile *f, void *pv, size_t size, | |
170 | const VMStateField *field, QJSON *vmdesc) | |
171 | { | |
172 | ppc_vsr_t *v = pv; | |
173 | ||
8a14d31b | 174 | qemu_put_be64(f, v->VsrD(0)); |
ef96e3ae MCA |
175 | return 0; |
176 | } | |
177 | ||
178 | static const VMStateInfo vmstate_info_fpr = { | |
179 | .name = "fpr", | |
180 | .get = get_fpr, | |
181 | .put = put_fpr, | |
182 | }; | |
183 | ||
184 | #define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ | |
185 | VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_fpr, ppc_vsr_t) | |
186 | ||
187 | #define VMSTATE_FPR_ARRAY(_f, _s, _n) \ | |
188 | VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) | |
189 | ||
190 | static int get_vsr(QEMUFile *f, void *pv, size_t size, | |
191 | const VMStateField *field) | |
192 | { | |
193 | ppc_vsr_t *v = pv; | |
194 | ||
8a14d31b | 195 | v->VsrD(1) = qemu_get_be64(f); |
ef96e3ae MCA |
196 | |
197 | return 0; | |
198 | } | |
199 | ||
200 | static int put_vsr(QEMUFile *f, void *pv, size_t size, | |
201 | const VMStateField *field, QJSON *vmdesc) | |
202 | { | |
203 | ppc_vsr_t *v = pv; | |
204 | ||
8a14d31b | 205 | qemu_put_be64(f, v->VsrD(1)); |
ef96e3ae MCA |
206 | return 0; |
207 | } | |
208 | ||
209 | static const VMStateInfo vmstate_info_vsr = { | |
210 | .name = "vsr", | |
211 | .get = get_vsr, | |
212 | .put = put_vsr, | |
213 | }; | |
214 | ||
215 | #define VMSTATE_VSR_ARRAY_V(_f, _s, _n, _v) \ | |
216 | VMSTATE_SUB_ARRAY(_f, _s, 0, _n, _v, vmstate_info_vsr, ppc_vsr_t) | |
217 | ||
218 | #define VMSTATE_VSR_ARRAY(_f, _s, _n) \ | |
219 | VMSTATE_VSR_ARRAY_V(_f, _s, _n, 0) | |
220 | ||
146c11f1 DG |
221 | static bool cpu_pre_2_8_migration(void *opaque, int version_id) |
222 | { | |
223 | PowerPCCPU *cpu = opaque; | |
224 | ||
225 | return cpu->pre_2_8_migration; | |
226 | } | |
227 | ||
67d7d66f | 228 | #if defined(TARGET_PPC64) |
d8c0c7af | 229 | static bool cpu_pre_3_0_migration(void *opaque, int version_id) |
67d7d66f DG |
230 | { |
231 | PowerPCCPU *cpu = opaque; | |
232 | ||
d8c0c7af | 233 | return cpu->pre_3_0_migration; |
67d7d66f DG |
234 | } |
235 | #endif | |
236 | ||
44b1ff31 | 237 | static int cpu_pre_save(void *opaque) |
a90db158 AK |
238 | { |
239 | PowerPCCPU *cpu = opaque; | |
240 | CPUPPCState *env = &cpu->env; | |
241 | int i; | |
16a2497b DG |
242 | uint64_t insns_compat_mask = |
243 | PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | |
244 | | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | |
245 | | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | |
246 | | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | |
247 | | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | |
248 | | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | |
249 | | PPC_64B | PPC_64BX | PPC_ALTIVEC | |
250 | | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; | |
251 | uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | |
252 | | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | |
253 | | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | |
254 | | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | |
255 | | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | |
256 | | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; | |
a90db158 AK |
257 | |
258 | env->spr[SPR_LR] = env->lr; | |
259 | env->spr[SPR_CTR] = env->ctr; | |
aa378598 | 260 | env->spr[SPR_XER] = cpu_read_xer(env); |
a90db158 AK |
261 | #if defined(TARGET_PPC64) |
262 | env->spr[SPR_CFAR] = env->cfar; | |
263 | #endif | |
264 | env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; | |
265 | ||
266 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { | |
6f7a6993 DG |
267 | env->spr[SPR_DBAT0U + 2 * i] = env->DBAT[0][i]; |
268 | env->spr[SPR_DBAT0U + 2 * i + 1] = env->DBAT[1][i]; | |
269 | env->spr[SPR_IBAT0U + 2 * i] = env->IBAT[0][i]; | |
270 | env->spr[SPR_IBAT0U + 2 * i + 1] = env->IBAT[1][i]; | |
a90db158 | 271 | } |
6f7a6993 DG |
272 | for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { |
273 | env->spr[SPR_DBAT4U + 2 * i] = env->DBAT[0][i + 4]; | |
274 | env->spr[SPR_DBAT4U + 2 * i + 1] = env->DBAT[1][i + 4]; | |
275 | env->spr[SPR_IBAT4U + 2 * i] = env->IBAT[0][i + 4]; | |
276 | env->spr[SPR_IBAT4U + 2 * i + 1] = env->IBAT[1][i + 4]; | |
a90db158 | 277 | } |
16a2497b DG |
278 | |
279 | /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ | |
146c11f1 | 280 | if (cpu->pre_2_8_migration) { |
6f7a6993 DG |
281 | /* |
282 | * Mask out bits that got added to msr_mask since the versions | |
283 | * which stupidly included it in the migration stream. | |
284 | */ | |
efb7db25 DG |
285 | target_ulong metamask = 0 |
286 | #if defined(TARGET_PPC64) | |
287 | | (1ULL << MSR_TS0) | |
288 | | (1ULL << MSR_TS1) | |
289 | #endif | |
290 | ; | |
291 | cpu->mig_msr_mask = env->msr_mask & ~metamask; | |
146c11f1 | 292 | cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; |
6f7a6993 DG |
293 | /* |
294 | * CPU models supported by old machines all have | |
295 | * PPC_MEM_TLBIE, so we set it unconditionally to allow | |
296 | * backward migration from a POWER9 host to a POWER8 host. | |
bce00964 GK |
297 | */ |
298 | cpu->mig_insns_flags |= PPC_MEM_TLBIE; | |
146c11f1 DG |
299 | cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; |
300 | cpu->mig_nb_BATs = env->nb_BATs; | |
301 | } | |
d8c0c7af | 302 | if (cpu->pre_3_0_migration) { |
67d7d66f DG |
303 | if (cpu->hash64_opts) { |
304 | cpu->mig_slb_nr = cpu->hash64_opts->slb_size; | |
305 | } | |
306 | } | |
44b1ff31 DDAG |
307 | |
308 | return 0; | |
a90db158 AK |
309 | } |
310 | ||
d5fc133e DG |
311 | /* |
312 | * Determine if a given PVR is a "close enough" match to the CPU | |
313 | * object. For TCG and KVM PR it would probably be sufficient to | |
314 | * require an exact PVR match. However for KVM HV the user is | |
315 | * restricted to a PVR exactly matching the host CPU. The correct way | |
316 | * to handle this is to put the guest into an architected | |
317 | * compatibility mode. However, to allow a more forgiving transition | |
318 | * and migration from before this was widely done, we allow migration | |
319 | * between sufficiently similar PVRs, as determined by the CPU class's | |
320 | * pvr_match() hook. | |
321 | */ | |
322 | static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) | |
323 | { | |
324 | PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); | |
325 | ||
326 | if (pvr == pcc->pvr) { | |
327 | return true; | |
328 | } | |
329 | return pcc->pvr_match(pcc, pvr); | |
330 | } | |
331 | ||
a90db158 AK |
332 | static int cpu_post_load(void *opaque, int version_id) |
333 | { | |
334 | PowerPCCPU *cpu = opaque; | |
335 | CPUPPCState *env = &cpu->env; | |
336 | int i; | |
2360b6e8 | 337 | target_ulong msr; |
a90db158 | 338 | |
569be9f0 | 339 | /* |
d5fc133e DG |
340 | * If we're operating in compat mode, we should be ok as long as |
341 | * the destination supports the same compatiblity mode. | |
342 | * | |
343 | * Otherwise, however, we require that the destination has exactly | |
344 | * the same CPU model as the source. | |
569be9f0 | 345 | */ |
d5fc133e DG |
346 | |
347 | #if defined(TARGET_PPC64) | |
348 | if (cpu->compat_pvr) { | |
e07cc192 | 349 | uint32_t compat_pvr = cpu->compat_pvr; |
d5fc133e DG |
350 | Error *local_err = NULL; |
351 | ||
e07cc192 SJS |
352 | cpu->compat_pvr = 0; |
353 | ppc_set_compat(cpu, compat_pvr, &local_err); | |
d5fc133e DG |
354 | if (local_err) { |
355 | error_report_err(local_err); | |
d5fc133e DG |
356 | return -1; |
357 | } | |
358 | } else | |
359 | #endif | |
360 | { | |
361 | if (!pvr_match(cpu, env->spr[SPR_PVR])) { | |
362 | return -1; | |
363 | } | |
364 | } | |
365 | ||
c363a37a DHB |
366 | /* |
367 | * If we're running with KVM HV, there is a chance that the guest | |
368 | * is running with KVM HV and its kernel does not have the | |
369 | * capability of dealing with a different PVR other than this | |
370 | * exact host PVR in KVM_SET_SREGS. If that happens, the | |
371 | * guest freezes after migration. | |
372 | * | |
373 | * The function kvmppc_pvr_workaround_required does this verification | |
374 | * by first checking if the kernel has the cap, returning true immediately | |
375 | * if that is the case. Otherwise, it checks if we're running in KVM PR. | |
376 | * If the guest kernel does not have the cap and we're not running KVM-PR | |
377 | * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will | |
378 | * receive the PVR it expects as a workaround. | |
379 | * | |
380 | */ | |
c363a37a DHB |
381 | if (kvmppc_pvr_workaround_required(cpu)) { |
382 | env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; | |
383 | } | |
c363a37a | 384 | |
a90db158 AK |
385 | env->lr = env->spr[SPR_LR]; |
386 | env->ctr = env->spr[SPR_CTR]; | |
6a9620e6 | 387 | cpu_write_xer(env, env->spr[SPR_XER]); |
a90db158 AK |
388 | #if defined(TARGET_PPC64) |
389 | env->cfar = env->spr[SPR_CFAR]; | |
390 | #endif | |
391 | env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; | |
392 | ||
393 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { | |
6f7a6993 DG |
394 | env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2 * i]; |
395 | env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2 * i + 1]; | |
396 | env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2 * i]; | |
397 | env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2 * i + 1]; | |
a90db158 | 398 | } |
6f7a6993 DG |
399 | for (i = 0; (i < 4) && ((i + 4) < env->nb_BATs); i++) { |
400 | env->DBAT[0][i + 4] = env->spr[SPR_DBAT4U + 2 * i]; | |
401 | env->DBAT[1][i + 4] = env->spr[SPR_DBAT4U + 2 * i + 1]; | |
402 | env->IBAT[0][i + 4] = env->spr[SPR_IBAT4U + 2 * i]; | |
403 | env->IBAT[1][i + 4] = env->spr[SPR_IBAT4U + 2 * i + 1]; | |
a90db158 AK |
404 | } |
405 | ||
e57ca75c | 406 | if (!cpu->vhyp) { |
f3c75d42 AK |
407 | ppc_store_sdr1(env, env->spr[SPR_SDR1]); |
408 | } | |
2360b6e8 | 409 | |
6f7a6993 DG |
410 | /* |
411 | * Invalidate all supported msr bits except MSR_TGPR/MSR_HVB | |
412 | * before restoring | |
413 | */ | |
2360b6e8 | 414 | msr = env->msr; |
be1b21e8 | 415 | env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); |
2360b6e8 MCA |
416 | ppc_store_msr(env, msr); |
417 | ||
a90db158 AK |
418 | hreg_compute_mem_idx(env); |
419 | ||
420 | return 0; | |
421 | } | |
422 | ||
423 | static bool fpu_needed(void *opaque) | |
424 | { | |
425 | PowerPCCPU *cpu = opaque; | |
426 | ||
6f7a6993 | 427 | return cpu->env.insns_flags & PPC_FLOAT; |
a90db158 AK |
428 | } |
429 | ||
430 | static const VMStateDescription vmstate_fpu = { | |
431 | .name = "cpu/fpu", | |
432 | .version_id = 1, | |
433 | .minimum_version_id = 1, | |
5cd8cada | 434 | .needed = fpu_needed, |
3aff6c2f | 435 | .fields = (VMStateField[]) { |
ef96e3ae | 436 | VMSTATE_FPR_ARRAY(env.vsr, PowerPCCPU, 32), |
a90db158 AK |
437 | VMSTATE_UINTTL(env.fpscr, PowerPCCPU), |
438 | VMSTATE_END_OF_LIST() | |
439 | }, | |
440 | }; | |
441 | ||
442 | static bool altivec_needed(void *opaque) | |
443 | { | |
444 | PowerPCCPU *cpu = opaque; | |
445 | ||
6f7a6993 | 446 | return cpu->env.insns_flags & PPC_ALTIVEC; |
a90db158 AK |
447 | } |
448 | ||
596fff20 RH |
449 | static int get_vscr(QEMUFile *f, void *opaque, size_t size, |
450 | const VMStateField *field) | |
451 | { | |
452 | PowerPCCPU *cpu = opaque; | |
453 | helper_mtvscr(&cpu->env, qemu_get_be32(f)); | |
454 | return 0; | |
455 | } | |
456 | ||
457 | static int put_vscr(QEMUFile *f, void *opaque, size_t size, | |
458 | const VMStateField *field, QJSON *vmdesc) | |
459 | { | |
460 | PowerPCCPU *cpu = opaque; | |
461 | qemu_put_be32(f, helper_mfvscr(&cpu->env)); | |
462 | return 0; | |
463 | } | |
464 | ||
465 | static const VMStateInfo vmstate_vscr = { | |
466 | .name = "cpu/altivec/vscr", | |
467 | .get = get_vscr, | |
468 | .put = put_vscr, | |
469 | }; | |
470 | ||
a90db158 AK |
471 | static const VMStateDescription vmstate_altivec = { |
472 | .name = "cpu/altivec", | |
473 | .version_id = 1, | |
474 | .minimum_version_id = 1, | |
5cd8cada | 475 | .needed = altivec_needed, |
3aff6c2f | 476 | .fields = (VMStateField[]) { |
ef96e3ae | 477 | VMSTATE_AVR_ARRAY(env.vsr, PowerPCCPU, 32), |
596fff20 RH |
478 | /* |
479 | * Save the architecture value of the vscr, not the internally | |
480 | * expanded version. Since this architecture value does not | |
481 | * exist in memory to be stored, this requires a but of hoop | |
482 | * jumping. We want OFFSET=0 so that we effectively pass CPU | |
483 | * to the helper functions. | |
484 | */ | |
485 | { | |
486 | .name = "vscr", | |
487 | .version_id = 0, | |
488 | .size = sizeof(uint32_t), | |
489 | .info = &vmstate_vscr, | |
490 | .flags = VMS_SINGLE, | |
491 | .offset = 0 | |
492 | }, | |
a90db158 AK |
493 | VMSTATE_END_OF_LIST() |
494 | }, | |
495 | }; | |
496 | ||
497 | static bool vsx_needed(void *opaque) | |
498 | { | |
499 | PowerPCCPU *cpu = opaque; | |
500 | ||
6f7a6993 | 501 | return cpu->env.insns_flags2 & PPC2_VSX; |
a90db158 AK |
502 | } |
503 | ||
504 | static const VMStateDescription vmstate_vsx = { | |
505 | .name = "cpu/vsx", | |
506 | .version_id = 1, | |
507 | .minimum_version_id = 1, | |
5cd8cada | 508 | .needed = vsx_needed, |
3aff6c2f | 509 | .fields = (VMStateField[]) { |
ef96e3ae | 510 | VMSTATE_VSR_ARRAY(env.vsr, PowerPCCPU, 32), |
a90db158 AK |
511 | VMSTATE_END_OF_LIST() |
512 | }, | |
513 | }; | |
514 | ||
80b3f79b AK |
515 | #ifdef TARGET_PPC64 |
516 | /* Transactional memory state */ | |
517 | static bool tm_needed(void *opaque) | |
518 | { | |
519 | PowerPCCPU *cpu = opaque; | |
520 | CPUPPCState *env = &cpu->env; | |
521 | return msr_ts; | |
522 | } | |
523 | ||
524 | static const VMStateDescription vmstate_tm = { | |
525 | .name = "cpu/tm", | |
526 | .version_id = 1, | |
527 | .minimum_version_id = 1, | |
528 | .minimum_version_id_old = 1, | |
5cd8cada | 529 | .needed = tm_needed, |
80b3f79b AK |
530 | .fields = (VMStateField []) { |
531 | VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), | |
532 | VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), | |
533 | VMSTATE_UINT64(env.tm_cr, PowerPCCPU), | |
534 | VMSTATE_UINT64(env.tm_lr, PowerPCCPU), | |
535 | VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), | |
536 | VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), | |
537 | VMSTATE_UINT64(env.tm_amr, PowerPCCPU), | |
538 | VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), | |
539 | VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), | |
540 | VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), | |
541 | VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), | |
542 | VMSTATE_UINT64(env.tm_tar, PowerPCCPU), | |
543 | VMSTATE_END_OF_LIST() | |
544 | }, | |
545 | }; | |
546 | #endif | |
547 | ||
a90db158 AK |
548 | static bool sr_needed(void *opaque) |
549 | { | |
550 | #ifdef TARGET_PPC64 | |
551 | PowerPCCPU *cpu = opaque; | |
552 | ||
553 | return !(cpu->env.mmu_model & POWERPC_MMU_64); | |
554 | #else | |
555 | return true; | |
556 | #endif | |
557 | } | |
558 | ||
559 | static const VMStateDescription vmstate_sr = { | |
560 | .name = "cpu/sr", | |
561 | .version_id = 1, | |
562 | .minimum_version_id = 1, | |
5cd8cada | 563 | .needed = sr_needed, |
3aff6c2f | 564 | .fields = (VMStateField[]) { |
a90db158 AK |
565 | VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), |
566 | VMSTATE_END_OF_LIST() | |
567 | }, | |
568 | }; | |
569 | ||
570 | #ifdef TARGET_PPC64 | |
03fee66f MAL |
571 | static int get_slbe(QEMUFile *f, void *pv, size_t size, |
572 | const VMStateField *field) | |
a90db158 AK |
573 | { |
574 | ppc_slb_t *v = pv; | |
575 | ||
576 | v->esid = qemu_get_be64(f); | |
577 | v->vsid = qemu_get_be64(f); | |
578 | ||
579 | return 0; | |
580 | } | |
581 | ||
03fee66f MAL |
582 | static int put_slbe(QEMUFile *f, void *pv, size_t size, |
583 | const VMStateField *field, QJSON *vmdesc) | |
a90db158 AK |
584 | { |
585 | ppc_slb_t *v = pv; | |
586 | ||
587 | qemu_put_be64(f, v->esid); | |
588 | qemu_put_be64(f, v->vsid); | |
2c21ee76 | 589 | return 0; |
a90db158 AK |
590 | } |
591 | ||
cfd54a04 | 592 | static const VMStateInfo vmstate_info_slbe = { |
a90db158 AK |
593 | .name = "slbe", |
594 | .get = get_slbe, | |
595 | .put = put_slbe, | |
596 | }; | |
597 | ||
598 | #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ | |
599 | VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) | |
600 | ||
601 | #define VMSTATE_SLB_ARRAY(_f, _s, _n) \ | |
602 | VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) | |
603 | ||
604 | static bool slb_needed(void *opaque) | |
605 | { | |
606 | PowerPCCPU *cpu = opaque; | |
607 | ||
608 | /* We don't support any of the old segment table based 64-bit CPUs */ | |
6f7a6993 | 609 | return cpu->env.mmu_model & POWERPC_MMU_64; |
a90db158 AK |
610 | } |
611 | ||
cd6a9bb6 DG |
612 | static int slb_post_load(void *opaque, int version_id) |
613 | { | |
614 | PowerPCCPU *cpu = opaque; | |
615 | CPUPPCState *env = &cpu->env; | |
616 | int i; | |
617 | ||
6f7a6993 DG |
618 | /* |
619 | * We've pulled in the raw esid and vsid values from the migration | |
620 | * stream, but we need to recompute the page size pointers | |
621 | */ | |
67d7d66f | 622 | for (i = 0; i < cpu->hash64_opts->slb_size; i++) { |
cd6a9bb6 DG |
623 | if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { |
624 | /* Migration source had bad values in its SLB */ | |
625 | return -1; | |
626 | } | |
627 | } | |
628 | ||
629 | return 0; | |
630 | } | |
631 | ||
a90db158 AK |
632 | static const VMStateDescription vmstate_slb = { |
633 | .name = "cpu/slb", | |
634 | .version_id = 1, | |
635 | .minimum_version_id = 1, | |
5cd8cada | 636 | .needed = slb_needed, |
cd6a9bb6 | 637 | .post_load = slb_post_load, |
3aff6c2f | 638 | .fields = (VMStateField[]) { |
d8c0c7af | 639 | VMSTATE_INT32_TEST(mig_slb_nr, PowerPCCPU, cpu_pre_3_0_migration), |
d83af167 | 640 | VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), |
a90db158 AK |
641 | VMSTATE_END_OF_LIST() |
642 | } | |
643 | }; | |
644 | #endif /* TARGET_PPC64 */ | |
645 | ||
646 | static const VMStateDescription vmstate_tlb6xx_entry = { | |
647 | .name = "cpu/tlb6xx_entry", | |
648 | .version_id = 1, | |
649 | .minimum_version_id = 1, | |
3aff6c2f | 650 | .fields = (VMStateField[]) { |
a90db158 AK |
651 | VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), |
652 | VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), | |
653 | VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), | |
654 | VMSTATE_END_OF_LIST() | |
655 | }, | |
656 | }; | |
657 | ||
658 | static bool tlb6xx_needed(void *opaque) | |
659 | { | |
660 | PowerPCCPU *cpu = opaque; | |
661 | CPUPPCState *env = &cpu->env; | |
662 | ||
663 | return env->nb_tlb && (env->tlb_type == TLB_6XX); | |
664 | } | |
665 | ||
666 | static const VMStateDescription vmstate_tlb6xx = { | |
667 | .name = "cpu/tlb6xx", | |
668 | .version_id = 1, | |
669 | .minimum_version_id = 1, | |
5cd8cada | 670 | .needed = tlb6xx_needed, |
3aff6c2f | 671 | .fields = (VMStateField[]) { |
d2164ad3 | 672 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
673 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, |
674 | env.nb_tlb, | |
675 | vmstate_tlb6xx_entry, | |
676 | ppc6xx_tlb_t), | |
677 | VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), | |
678 | VMSTATE_END_OF_LIST() | |
679 | } | |
680 | }; | |
681 | ||
682 | static const VMStateDescription vmstate_tlbemb_entry = { | |
683 | .name = "cpu/tlbemb_entry", | |
684 | .version_id = 1, | |
685 | .minimum_version_id = 1, | |
3aff6c2f | 686 | .fields = (VMStateField[]) { |
a90db158 AK |
687 | VMSTATE_UINT64(RPN, ppcemb_tlb_t), |
688 | VMSTATE_UINTTL(EPN, ppcemb_tlb_t), | |
689 | VMSTATE_UINTTL(PID, ppcemb_tlb_t), | |
690 | VMSTATE_UINTTL(size, ppcemb_tlb_t), | |
691 | VMSTATE_UINT32(prot, ppcemb_tlb_t), | |
692 | VMSTATE_UINT32(attr, ppcemb_tlb_t), | |
693 | VMSTATE_END_OF_LIST() | |
694 | }, | |
695 | }; | |
696 | ||
697 | static bool tlbemb_needed(void *opaque) | |
698 | { | |
699 | PowerPCCPU *cpu = opaque; | |
700 | CPUPPCState *env = &cpu->env; | |
701 | ||
702 | return env->nb_tlb && (env->tlb_type == TLB_EMB); | |
703 | } | |
704 | ||
705 | static bool pbr403_needed(void *opaque) | |
706 | { | |
707 | PowerPCCPU *cpu = opaque; | |
708 | uint32_t pvr = cpu->env.spr[SPR_PVR]; | |
709 | ||
710 | return (pvr & 0xffff0000) == 0x00200000; | |
711 | } | |
712 | ||
713 | static const VMStateDescription vmstate_pbr403 = { | |
714 | .name = "cpu/pbr403", | |
715 | .version_id = 1, | |
716 | .minimum_version_id = 1, | |
5cd8cada | 717 | .needed = pbr403_needed, |
3aff6c2f | 718 | .fields = (VMStateField[]) { |
a90db158 AK |
719 | VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), |
720 | VMSTATE_END_OF_LIST() | |
721 | }, | |
722 | }; | |
723 | ||
724 | static const VMStateDescription vmstate_tlbemb = { | |
725 | .name = "cpu/tlb6xx", | |
726 | .version_id = 1, | |
727 | .minimum_version_id = 1, | |
5cd8cada | 728 | .needed = tlbemb_needed, |
3aff6c2f | 729 | .fields = (VMStateField[]) { |
d2164ad3 | 730 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
731 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, |
732 | env.nb_tlb, | |
733 | vmstate_tlbemb_entry, | |
734 | ppcemb_tlb_t), | |
735 | /* 403 protection registers */ | |
736 | VMSTATE_END_OF_LIST() | |
737 | }, | |
5cd8cada JQ |
738 | .subsections = (const VMStateDescription*[]) { |
739 | &vmstate_pbr403, | |
740 | NULL | |
a90db158 AK |
741 | } |
742 | }; | |
743 | ||
744 | static const VMStateDescription vmstate_tlbmas_entry = { | |
745 | .name = "cpu/tlbmas_entry", | |
746 | .version_id = 1, | |
747 | .minimum_version_id = 1, | |
3aff6c2f | 748 | .fields = (VMStateField[]) { |
a90db158 AK |
749 | VMSTATE_UINT32(mas8, ppcmas_tlb_t), |
750 | VMSTATE_UINT32(mas1, ppcmas_tlb_t), | |
751 | VMSTATE_UINT64(mas2, ppcmas_tlb_t), | |
752 | VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), | |
753 | VMSTATE_END_OF_LIST() | |
754 | }, | |
755 | }; | |
756 | ||
757 | static bool tlbmas_needed(void *opaque) | |
758 | { | |
759 | PowerPCCPU *cpu = opaque; | |
760 | CPUPPCState *env = &cpu->env; | |
761 | ||
762 | return env->nb_tlb && (env->tlb_type == TLB_MAS); | |
763 | } | |
764 | ||
765 | static const VMStateDescription vmstate_tlbmas = { | |
766 | .name = "cpu/tlbmas", | |
767 | .version_id = 1, | |
768 | .minimum_version_id = 1, | |
5cd8cada | 769 | .needed = tlbmas_needed, |
3aff6c2f | 770 | .fields = (VMStateField[]) { |
d2164ad3 | 771 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
772 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, |
773 | env.nb_tlb, | |
774 | vmstate_tlbmas_entry, | |
775 | ppcmas_tlb_t), | |
776 | VMSTATE_END_OF_LIST() | |
777 | } | |
778 | }; | |
779 | ||
d5fc133e DG |
780 | static bool compat_needed(void *opaque) |
781 | { | |
782 | PowerPCCPU *cpu = opaque; | |
783 | ||
784 | assert(!(cpu->compat_pvr && !cpu->vhyp)); | |
785 | return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; | |
786 | } | |
787 | ||
788 | static const VMStateDescription vmstate_compat = { | |
789 | .name = "cpu/compat", | |
790 | .version_id = 1, | |
791 | .minimum_version_id = 1, | |
792 | .needed = compat_needed, | |
793 | .fields = (VMStateField[]) { | |
794 | VMSTATE_UINT32(compat_pvr, PowerPCCPU), | |
795 | VMSTATE_END_OF_LIST() | |
796 | } | |
797 | }; | |
798 | ||
a90db158 AK |
799 | const VMStateDescription vmstate_ppc_cpu = { |
800 | .name = "cpu", | |
801 | .version_id = 5, | |
802 | .minimum_version_id = 5, | |
803 | .minimum_version_id_old = 4, | |
804 | .load_state_old = cpu_load_old, | |
805 | .pre_save = cpu_pre_save, | |
806 | .post_load = cpu_post_load, | |
3aff6c2f | 807 | .fields = (VMStateField[]) { |
569be9f0 | 808 | VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ |
a90db158 AK |
809 | |
810 | /* User mode architected state */ | |
811 | VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), | |
812 | #if !defined(TARGET_PPC64) | |
813 | VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), | |
814 | #endif | |
815 | VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), | |
816 | VMSTATE_UINTTL(env.nip, PowerPCCPU), | |
817 | ||
818 | /* SPRs */ | |
819 | VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), | |
820 | VMSTATE_UINT64(env.spe_acc, PowerPCCPU), | |
821 | ||
822 | /* Reservation */ | |
823 | VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), | |
824 | ||
825 | /* Supervisor mode architected state */ | |
826 | VMSTATE_UINTTL(env.msr, PowerPCCPU), | |
827 | ||
828 | /* Internal state */ | |
829 | VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU), | |
830 | /* FIXME: access_type? */ | |
831 | ||
832 | /* Sanity checking */ | |
146c11f1 DG |
833 | VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), |
834 | VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), | |
835 | VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, | |
836 | cpu_pre_2_8_migration), | |
837 | VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), | |
a90db158 AK |
838 | VMSTATE_END_OF_LIST() |
839 | }, | |
5cd8cada JQ |
840 | .subsections = (const VMStateDescription*[]) { |
841 | &vmstate_fpu, | |
842 | &vmstate_altivec, | |
843 | &vmstate_vsx, | |
844 | &vmstate_sr, | |
a90db158 | 845 | #ifdef TARGET_PPC64 |
5cd8cada JQ |
846 | &vmstate_tm, |
847 | &vmstate_slb, | |
a90db158 | 848 | #endif /* TARGET_PPC64 */ |
5cd8cada JQ |
849 | &vmstate_tlb6xx, |
850 | &vmstate_tlbemb, | |
851 | &vmstate_tlbmas, | |
d5fc133e | 852 | &vmstate_compat, |
5cd8cada | 853 | NULL |
a90db158 AK |
854 | } |
855 | }; |