]>
Commit | Line | Data |
---|---|---|
0d75590d | 1 | #include "qemu/osdep.h" |
33c11879 PB |
2 | #include "qemu-common.h" |
3 | #include "cpu.h" | |
63c91552 | 4 | #include "exec/exec-all.h" |
8dd3dca3 AJ |
5 | #include "hw/hw.h" |
6 | #include "hw/boards.h" | |
9c17d615 | 7 | #include "sysemu/kvm.h" |
a90db158 | 8 | #include "helper_regs.h" |
cd6a9bb6 | 9 | #include "mmu-hash64.h" |
1e00b8d5 | 10 | #include "migration/cpu.h" |
d5fc133e | 11 | #include "qapi/error.h" |
c363a37a | 12 | #include "kvm_ppc.h" |
8dd3dca3 | 13 | |
a90db158 | 14 | static int cpu_load_old(QEMUFile *f, void *opaque, int version_id) |
8dd3dca3 | 15 | { |
a90db158 AK |
16 | PowerPCCPU *cpu = opaque; |
17 | CPUPPCState *env = &cpu->env; | |
a456d59c | 18 | unsigned int i, j; |
bb593904 | 19 | target_ulong sdr1; |
30304420 | 20 | uint32_t fpscr; |
da91a00f | 21 | target_ulong xer; |
a456d59c BS |
22 | |
23 | for (i = 0; i < 32; i++) | |
24 | qemu_get_betls(f, &env->gpr[i]); | |
25 | #if !defined(TARGET_PPC64) | |
26 | for (i = 0; i < 32; i++) | |
27 | qemu_get_betls(f, &env->gprh[i]); | |
28 | #endif | |
29 | qemu_get_betls(f, &env->lr); | |
30 | qemu_get_betls(f, &env->ctr); | |
31 | for (i = 0; i < 8; i++) | |
32 | qemu_get_be32s(f, &env->crf[i]); | |
da91a00f RH |
33 | qemu_get_betls(f, &xer); |
34 | cpu_write_xer(env, xer); | |
18b21a2f | 35 | qemu_get_betls(f, &env->reserve_addr); |
a456d59c BS |
36 | qemu_get_betls(f, &env->msr); |
37 | for (i = 0; i < 4; i++) | |
38 | qemu_get_betls(f, &env->tgpr[i]); | |
39 | for (i = 0; i < 32; i++) { | |
40 | union { | |
41 | float64 d; | |
42 | uint64_t l; | |
43 | } u; | |
44 | u.l = qemu_get_be64(f); | |
45 | env->fpr[i] = u.d; | |
46 | } | |
30304420 DG |
47 | qemu_get_be32s(f, &fpscr); |
48 | env->fpscr = fpscr; | |
a456d59c | 49 | qemu_get_sbe32s(f, &env->access_type); |
a456d59c | 50 | #if defined(TARGET_PPC64) |
9baea4a3 | 51 | qemu_get_betls(f, &env->spr[SPR_ASR]); |
a456d59c BS |
52 | qemu_get_sbe32s(f, &env->slb_nr); |
53 | #endif | |
bb593904 | 54 | qemu_get_betls(f, &sdr1); |
a456d59c BS |
55 | for (i = 0; i < 32; i++) |
56 | qemu_get_betls(f, &env->sr[i]); | |
57 | for (i = 0; i < 2; i++) | |
58 | for (j = 0; j < 8; j++) | |
59 | qemu_get_betls(f, &env->DBAT[i][j]); | |
60 | for (i = 0; i < 2; i++) | |
61 | for (j = 0; j < 8; j++) | |
62 | qemu_get_betls(f, &env->IBAT[i][j]); | |
63 | qemu_get_sbe32s(f, &env->nb_tlb); | |
64 | qemu_get_sbe32s(f, &env->tlb_per_way); | |
65 | qemu_get_sbe32s(f, &env->nb_ways); | |
66 | qemu_get_sbe32s(f, &env->last_way); | |
67 | qemu_get_sbe32s(f, &env->id_tlbs); | |
68 | qemu_get_sbe32s(f, &env->nb_pids); | |
1c53accc | 69 | if (env->tlb.tlb6) { |
a456d59c BS |
70 | // XXX assumes 6xx |
71 | for (i = 0; i < env->nb_tlb; i++) { | |
1c53accc AG |
72 | qemu_get_betls(f, &env->tlb.tlb6[i].pte0); |
73 | qemu_get_betls(f, &env->tlb.tlb6[i].pte1); | |
74 | qemu_get_betls(f, &env->tlb.tlb6[i].EPN); | |
a456d59c BS |
75 | } |
76 | } | |
77 | for (i = 0; i < 4; i++) | |
78 | qemu_get_betls(f, &env->pb[i]); | |
a456d59c BS |
79 | for (i = 0; i < 1024; i++) |
80 | qemu_get_betls(f, &env->spr[i]); | |
e57ca75c | 81 | if (!cpu->vhyp) { |
f3c75d42 AK |
82 | ppc_store_sdr1(env, sdr1); |
83 | } | |
a456d59c BS |
84 | qemu_get_be32s(f, &env->vscr); |
85 | qemu_get_be64s(f, &env->spe_acc); | |
86 | qemu_get_be32s(f, &env->spe_fscr); | |
87 | qemu_get_betls(f, &env->msr_mask); | |
88 | qemu_get_be32s(f, &env->flags); | |
89 | qemu_get_sbe32s(f, &env->error_code); | |
90 | qemu_get_be32s(f, &env->pending_interrupts); | |
a456d59c BS |
91 | qemu_get_be32s(f, &env->irq_input_state); |
92 | for (i = 0; i < POWERPC_EXCP_NB; i++) | |
93 | qemu_get_betls(f, &env->excp_vectors[i]); | |
94 | qemu_get_betls(f, &env->excp_prefix); | |
95 | qemu_get_betls(f, &env->ivor_mask); | |
96 | qemu_get_betls(f, &env->ivpr_mask); | |
97 | qemu_get_betls(f, &env->hreset_vector); | |
a456d59c BS |
98 | qemu_get_betls(f, &env->nip); |
99 | qemu_get_betls(f, &env->hflags); | |
100 | qemu_get_betls(f, &env->hflags_nmsr); | |
9fb04491 | 101 | qemu_get_sbe32(f); /* Discard unused mmu_idx */ |
011aba24 | 102 | qemu_get_sbe32(f); /* Discard unused power_mode */ |
a456d59c | 103 | |
9fb04491 BH |
104 | /* Recompute mmu indices */ |
105 | hreg_compute_mem_idx(env); | |
106 | ||
8dd3dca3 AJ |
107 | return 0; |
108 | } | |
a90db158 | 109 | |
2c21ee76 | 110 | static int get_avr(QEMUFile *f, void *pv, size_t size, VMStateField *field) |
a90db158 AK |
111 | { |
112 | ppc_avr_t *v = pv; | |
113 | ||
114 | v->u64[0] = qemu_get_be64(f); | |
115 | v->u64[1] = qemu_get_be64(f); | |
116 | ||
117 | return 0; | |
118 | } | |
119 | ||
2c21ee76 JD |
120 | static int put_avr(QEMUFile *f, void *pv, size_t size, VMStateField *field, |
121 | QJSON *vmdesc) | |
a90db158 AK |
122 | { |
123 | ppc_avr_t *v = pv; | |
124 | ||
125 | qemu_put_be64(f, v->u64[0]); | |
126 | qemu_put_be64(f, v->u64[1]); | |
2c21ee76 | 127 | return 0; |
a90db158 AK |
128 | } |
129 | ||
cfd54a04 | 130 | static const VMStateInfo vmstate_info_avr = { |
a90db158 AK |
131 | .name = "avr", |
132 | .get = get_avr, | |
133 | .put = put_avr, | |
134 | }; | |
135 | ||
136 | #define VMSTATE_AVR_ARRAY_V(_f, _s, _n, _v) \ | |
137 | VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_avr, ppc_avr_t) | |
138 | ||
139 | #define VMSTATE_AVR_ARRAY(_f, _s, _n) \ | |
140 | VMSTATE_AVR_ARRAY_V(_f, _s, _n, 0) | |
141 | ||
146c11f1 DG |
142 | static bool cpu_pre_2_8_migration(void *opaque, int version_id) |
143 | { | |
144 | PowerPCCPU *cpu = opaque; | |
145 | ||
146 | return cpu->pre_2_8_migration; | |
147 | } | |
148 | ||
44b1ff31 | 149 | static int cpu_pre_save(void *opaque) |
a90db158 AK |
150 | { |
151 | PowerPCCPU *cpu = opaque; | |
152 | CPUPPCState *env = &cpu->env; | |
153 | int i; | |
16a2497b DG |
154 | uint64_t insns_compat_mask = |
155 | PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | |
156 | | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | |
157 | | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | PPC_FLOAT_FRSQRTES | |
158 | | PPC_FLOAT_STFIWX | PPC_FLOAT_EXT | |
159 | | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | |
160 | | PPC_MEM_SYNC | PPC_MEM_EIEIO | PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | |
161 | | PPC_64B | PPC_64BX | PPC_ALTIVEC | |
162 | | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD; | |
163 | uint64_t insns_compat_mask2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX | |
164 | | PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | |
165 | | PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206 | |
166 | | PPC2_FP_TST_ISA206 | PPC2_BCTAR_ISA207 | |
167 | | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | |
168 | | PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 | PPC2_TM; | |
a90db158 AK |
169 | |
170 | env->spr[SPR_LR] = env->lr; | |
171 | env->spr[SPR_CTR] = env->ctr; | |
aa378598 | 172 | env->spr[SPR_XER] = cpu_read_xer(env); |
a90db158 AK |
173 | #if defined(TARGET_PPC64) |
174 | env->spr[SPR_CFAR] = env->cfar; | |
175 | #endif | |
176 | env->spr[SPR_BOOKE_SPEFSCR] = env->spe_fscr; | |
177 | ||
178 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { | |
179 | env->spr[SPR_DBAT0U + 2*i] = env->DBAT[0][i]; | |
180 | env->spr[SPR_DBAT0U + 2*i + 1] = env->DBAT[1][i]; | |
181 | env->spr[SPR_IBAT0U + 2*i] = env->IBAT[0][i]; | |
182 | env->spr[SPR_IBAT0U + 2*i + 1] = env->IBAT[1][i]; | |
183 | } | |
184 | for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { | |
185 | env->spr[SPR_DBAT4U + 2*i] = env->DBAT[0][i+4]; | |
186 | env->spr[SPR_DBAT4U + 2*i + 1] = env->DBAT[1][i+4]; | |
187 | env->spr[SPR_IBAT4U + 2*i] = env->IBAT[0][i+4]; | |
188 | env->spr[SPR_IBAT4U + 2*i + 1] = env->IBAT[1][i+4]; | |
189 | } | |
16a2497b DG |
190 | |
191 | /* Hacks for migration compatibility between 2.6, 2.7 & 2.8 */ | |
146c11f1 | 192 | if (cpu->pre_2_8_migration) { |
efb7db25 DG |
193 | /* Mask out bits that got added to msr_mask since the versions |
194 | * which stupidly included it in the migration stream. */ | |
195 | target_ulong metamask = 0 | |
196 | #if defined(TARGET_PPC64) | |
197 | | (1ULL << MSR_TS0) | |
198 | | (1ULL << MSR_TS1) | |
199 | #endif | |
200 | ; | |
201 | cpu->mig_msr_mask = env->msr_mask & ~metamask; | |
146c11f1 DG |
202 | cpu->mig_insns_flags = env->insns_flags & insns_compat_mask; |
203 | cpu->mig_insns_flags2 = env->insns_flags2 & insns_compat_mask2; | |
204 | cpu->mig_nb_BATs = env->nb_BATs; | |
205 | } | |
44b1ff31 DDAG |
206 | |
207 | return 0; | |
a90db158 AK |
208 | } |
209 | ||
d5fc133e DG |
210 | /* |
211 | * Determine if a given PVR is a "close enough" match to the CPU | |
212 | * object. For TCG and KVM PR it would probably be sufficient to | |
213 | * require an exact PVR match. However for KVM HV the user is | |
214 | * restricted to a PVR exactly matching the host CPU. The correct way | |
215 | * to handle this is to put the guest into an architected | |
216 | * compatibility mode. However, to allow a more forgiving transition | |
217 | * and migration from before this was widely done, we allow migration | |
218 | * between sufficiently similar PVRs, as determined by the CPU class's | |
219 | * pvr_match() hook. | |
220 | */ | |
221 | static bool pvr_match(PowerPCCPU *cpu, uint32_t pvr) | |
222 | { | |
223 | PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); | |
224 | ||
225 | if (pvr == pcc->pvr) { | |
226 | return true; | |
227 | } | |
228 | return pcc->pvr_match(pcc, pvr); | |
229 | } | |
230 | ||
a90db158 AK |
231 | static int cpu_post_load(void *opaque, int version_id) |
232 | { | |
233 | PowerPCCPU *cpu = opaque; | |
234 | CPUPPCState *env = &cpu->env; | |
235 | int i; | |
2360b6e8 | 236 | target_ulong msr; |
a90db158 | 237 | |
569be9f0 | 238 | /* |
d5fc133e DG |
239 | * If we're operating in compat mode, we should be ok as long as |
240 | * the destination supports the same compatiblity mode. | |
241 | * | |
242 | * Otherwise, however, we require that the destination has exactly | |
243 | * the same CPU model as the source. | |
569be9f0 | 244 | */ |
d5fc133e DG |
245 | |
246 | #if defined(TARGET_PPC64) | |
247 | if (cpu->compat_pvr) { | |
e07cc192 | 248 | uint32_t compat_pvr = cpu->compat_pvr; |
d5fc133e DG |
249 | Error *local_err = NULL; |
250 | ||
e07cc192 SJS |
251 | cpu->compat_pvr = 0; |
252 | ppc_set_compat(cpu, compat_pvr, &local_err); | |
d5fc133e DG |
253 | if (local_err) { |
254 | error_report_err(local_err); | |
d5fc133e DG |
255 | return -1; |
256 | } | |
257 | } else | |
258 | #endif | |
259 | { | |
260 | if (!pvr_match(cpu, env->spr[SPR_PVR])) { | |
261 | return -1; | |
262 | } | |
263 | } | |
264 | ||
c363a37a DHB |
265 | /* |
266 | * If we're running with KVM HV, there is a chance that the guest | |
267 | * is running with KVM HV and its kernel does not have the | |
268 | * capability of dealing with a different PVR other than this | |
269 | * exact host PVR in KVM_SET_SREGS. If that happens, the | |
270 | * guest freezes after migration. | |
271 | * | |
272 | * The function kvmppc_pvr_workaround_required does this verification | |
273 | * by first checking if the kernel has the cap, returning true immediately | |
274 | * if that is the case. Otherwise, it checks if we're running in KVM PR. | |
275 | * If the guest kernel does not have the cap and we're not running KVM-PR | |
276 | * (so, it is running KVM-HV), we need to ensure that KVM_SET_SREGS will | |
277 | * receive the PVR it expects as a workaround. | |
278 | * | |
279 | */ | |
280 | #if defined(CONFIG_KVM) | |
281 | if (kvmppc_pvr_workaround_required(cpu)) { | |
282 | env->spr[SPR_PVR] = env->spr_cb[SPR_PVR].default_value; | |
283 | } | |
284 | #endif | |
285 | ||
a90db158 AK |
286 | env->lr = env->spr[SPR_LR]; |
287 | env->ctr = env->spr[SPR_CTR]; | |
6a9620e6 | 288 | cpu_write_xer(env, env->spr[SPR_XER]); |
a90db158 AK |
289 | #if defined(TARGET_PPC64) |
290 | env->cfar = env->spr[SPR_CFAR]; | |
291 | #endif | |
292 | env->spe_fscr = env->spr[SPR_BOOKE_SPEFSCR]; | |
293 | ||
294 | for (i = 0; (i < 4) && (i < env->nb_BATs); i++) { | |
295 | env->DBAT[0][i] = env->spr[SPR_DBAT0U + 2*i]; | |
296 | env->DBAT[1][i] = env->spr[SPR_DBAT0U + 2*i + 1]; | |
297 | env->IBAT[0][i] = env->spr[SPR_IBAT0U + 2*i]; | |
298 | env->IBAT[1][i] = env->spr[SPR_IBAT0U + 2*i + 1]; | |
299 | } | |
300 | for (i = 0; (i < 4) && ((i+4) < env->nb_BATs); i++) { | |
301 | env->DBAT[0][i+4] = env->spr[SPR_DBAT4U + 2*i]; | |
302 | env->DBAT[1][i+4] = env->spr[SPR_DBAT4U + 2*i + 1]; | |
303 | env->IBAT[0][i+4] = env->spr[SPR_IBAT4U + 2*i]; | |
304 | env->IBAT[1][i+4] = env->spr[SPR_IBAT4U + 2*i + 1]; | |
305 | } | |
306 | ||
e57ca75c | 307 | if (!cpu->vhyp) { |
f3c75d42 AK |
308 | ppc_store_sdr1(env, env->spr[SPR_SDR1]); |
309 | } | |
2360b6e8 | 310 | |
be1b21e8 | 311 | /* Invalidate all supported msr bits except MSR_TGPR/MSR_HVB before restoring */ |
2360b6e8 | 312 | msr = env->msr; |
be1b21e8 | 313 | env->msr ^= env->msr_mask & ~((1ULL << MSR_TGPR) | MSR_HVB); |
2360b6e8 MCA |
314 | ppc_store_msr(env, msr); |
315 | ||
a90db158 AK |
316 | hreg_compute_mem_idx(env); |
317 | ||
318 | return 0; | |
319 | } | |
320 | ||
321 | static bool fpu_needed(void *opaque) | |
322 | { | |
323 | PowerPCCPU *cpu = opaque; | |
324 | ||
325 | return (cpu->env.insns_flags & PPC_FLOAT); | |
326 | } | |
327 | ||
328 | static const VMStateDescription vmstate_fpu = { | |
329 | .name = "cpu/fpu", | |
330 | .version_id = 1, | |
331 | .minimum_version_id = 1, | |
5cd8cada | 332 | .needed = fpu_needed, |
3aff6c2f | 333 | .fields = (VMStateField[]) { |
a90db158 AK |
334 | VMSTATE_FLOAT64_ARRAY(env.fpr, PowerPCCPU, 32), |
335 | VMSTATE_UINTTL(env.fpscr, PowerPCCPU), | |
336 | VMSTATE_END_OF_LIST() | |
337 | }, | |
338 | }; | |
339 | ||
340 | static bool altivec_needed(void *opaque) | |
341 | { | |
342 | PowerPCCPU *cpu = opaque; | |
343 | ||
344 | return (cpu->env.insns_flags & PPC_ALTIVEC); | |
345 | } | |
346 | ||
347 | static const VMStateDescription vmstate_altivec = { | |
348 | .name = "cpu/altivec", | |
349 | .version_id = 1, | |
350 | .minimum_version_id = 1, | |
5cd8cada | 351 | .needed = altivec_needed, |
3aff6c2f | 352 | .fields = (VMStateField[]) { |
a90db158 AK |
353 | VMSTATE_AVR_ARRAY(env.avr, PowerPCCPU, 32), |
354 | VMSTATE_UINT32(env.vscr, PowerPCCPU), | |
355 | VMSTATE_END_OF_LIST() | |
356 | }, | |
357 | }; | |
358 | ||
359 | static bool vsx_needed(void *opaque) | |
360 | { | |
361 | PowerPCCPU *cpu = opaque; | |
362 | ||
363 | return (cpu->env.insns_flags2 & PPC2_VSX); | |
364 | } | |
365 | ||
366 | static const VMStateDescription vmstate_vsx = { | |
367 | .name = "cpu/vsx", | |
368 | .version_id = 1, | |
369 | .minimum_version_id = 1, | |
5cd8cada | 370 | .needed = vsx_needed, |
3aff6c2f | 371 | .fields = (VMStateField[]) { |
a90db158 AK |
372 | VMSTATE_UINT64_ARRAY(env.vsr, PowerPCCPU, 32), |
373 | VMSTATE_END_OF_LIST() | |
374 | }, | |
375 | }; | |
376 | ||
80b3f79b AK |
377 | #ifdef TARGET_PPC64 |
378 | /* Transactional memory state */ | |
379 | static bool tm_needed(void *opaque) | |
380 | { | |
381 | PowerPCCPU *cpu = opaque; | |
382 | CPUPPCState *env = &cpu->env; | |
383 | return msr_ts; | |
384 | } | |
385 | ||
386 | static const VMStateDescription vmstate_tm = { | |
387 | .name = "cpu/tm", | |
388 | .version_id = 1, | |
389 | .minimum_version_id = 1, | |
390 | .minimum_version_id_old = 1, | |
5cd8cada | 391 | .needed = tm_needed, |
80b3f79b AK |
392 | .fields = (VMStateField []) { |
393 | VMSTATE_UINTTL_ARRAY(env.tm_gpr, PowerPCCPU, 32), | |
394 | VMSTATE_AVR_ARRAY(env.tm_vsr, PowerPCCPU, 64), | |
395 | VMSTATE_UINT64(env.tm_cr, PowerPCCPU), | |
396 | VMSTATE_UINT64(env.tm_lr, PowerPCCPU), | |
397 | VMSTATE_UINT64(env.tm_ctr, PowerPCCPU), | |
398 | VMSTATE_UINT64(env.tm_fpscr, PowerPCCPU), | |
399 | VMSTATE_UINT64(env.tm_amr, PowerPCCPU), | |
400 | VMSTATE_UINT64(env.tm_ppr, PowerPCCPU), | |
401 | VMSTATE_UINT64(env.tm_vrsave, PowerPCCPU), | |
402 | VMSTATE_UINT32(env.tm_vscr, PowerPCCPU), | |
403 | VMSTATE_UINT64(env.tm_dscr, PowerPCCPU), | |
404 | VMSTATE_UINT64(env.tm_tar, PowerPCCPU), | |
405 | VMSTATE_END_OF_LIST() | |
406 | }, | |
407 | }; | |
408 | #endif | |
409 | ||
a90db158 AK |
410 | static bool sr_needed(void *opaque) |
411 | { | |
412 | #ifdef TARGET_PPC64 | |
413 | PowerPCCPU *cpu = opaque; | |
414 | ||
415 | return !(cpu->env.mmu_model & POWERPC_MMU_64); | |
416 | #else | |
417 | return true; | |
418 | #endif | |
419 | } | |
420 | ||
421 | static const VMStateDescription vmstate_sr = { | |
422 | .name = "cpu/sr", | |
423 | .version_id = 1, | |
424 | .minimum_version_id = 1, | |
5cd8cada | 425 | .needed = sr_needed, |
3aff6c2f | 426 | .fields = (VMStateField[]) { |
a90db158 AK |
427 | VMSTATE_UINTTL_ARRAY(env.sr, PowerPCCPU, 32), |
428 | VMSTATE_END_OF_LIST() | |
429 | }, | |
430 | }; | |
431 | ||
432 | #ifdef TARGET_PPC64 | |
2c21ee76 | 433 | static int get_slbe(QEMUFile *f, void *pv, size_t size, VMStateField *field) |
a90db158 AK |
434 | { |
435 | ppc_slb_t *v = pv; | |
436 | ||
437 | v->esid = qemu_get_be64(f); | |
438 | v->vsid = qemu_get_be64(f); | |
439 | ||
440 | return 0; | |
441 | } | |
442 | ||
2c21ee76 JD |
443 | static int put_slbe(QEMUFile *f, void *pv, size_t size, VMStateField *field, |
444 | QJSON *vmdesc) | |
a90db158 AK |
445 | { |
446 | ppc_slb_t *v = pv; | |
447 | ||
448 | qemu_put_be64(f, v->esid); | |
449 | qemu_put_be64(f, v->vsid); | |
2c21ee76 | 450 | return 0; |
a90db158 AK |
451 | } |
452 | ||
cfd54a04 | 453 | static const VMStateInfo vmstate_info_slbe = { |
a90db158 AK |
454 | .name = "slbe", |
455 | .get = get_slbe, | |
456 | .put = put_slbe, | |
457 | }; | |
458 | ||
459 | #define VMSTATE_SLB_ARRAY_V(_f, _s, _n, _v) \ | |
460 | VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_slbe, ppc_slb_t) | |
461 | ||
462 | #define VMSTATE_SLB_ARRAY(_f, _s, _n) \ | |
463 | VMSTATE_SLB_ARRAY_V(_f, _s, _n, 0) | |
464 | ||
465 | static bool slb_needed(void *opaque) | |
466 | { | |
467 | PowerPCCPU *cpu = opaque; | |
468 | ||
469 | /* We don't support any of the old segment table based 64-bit CPUs */ | |
470 | return (cpu->env.mmu_model & POWERPC_MMU_64); | |
471 | } | |
472 | ||
cd6a9bb6 DG |
473 | static int slb_post_load(void *opaque, int version_id) |
474 | { | |
475 | PowerPCCPU *cpu = opaque; | |
476 | CPUPPCState *env = &cpu->env; | |
477 | int i; | |
478 | ||
479 | /* We've pulled in the raw esid and vsid values from the migration | |
480 | * stream, but we need to recompute the page size pointers */ | |
481 | for (i = 0; i < env->slb_nr; i++) { | |
482 | if (ppc_store_slb(cpu, i, env->slb[i].esid, env->slb[i].vsid) < 0) { | |
483 | /* Migration source had bad values in its SLB */ | |
484 | return -1; | |
485 | } | |
486 | } | |
487 | ||
488 | return 0; | |
489 | } | |
490 | ||
a90db158 AK |
491 | static const VMStateDescription vmstate_slb = { |
492 | .name = "cpu/slb", | |
493 | .version_id = 1, | |
494 | .minimum_version_id = 1, | |
5cd8cada | 495 | .needed = slb_needed, |
cd6a9bb6 | 496 | .post_load = slb_post_load, |
3aff6c2f | 497 | .fields = (VMStateField[]) { |
d2164ad3 | 498 | VMSTATE_INT32_EQUAL(env.slb_nr, PowerPCCPU, NULL), |
d83af167 | 499 | VMSTATE_SLB_ARRAY(env.slb, PowerPCCPU, MAX_SLB_ENTRIES), |
a90db158 AK |
500 | VMSTATE_END_OF_LIST() |
501 | } | |
502 | }; | |
503 | #endif /* TARGET_PPC64 */ | |
504 | ||
505 | static const VMStateDescription vmstate_tlb6xx_entry = { | |
506 | .name = "cpu/tlb6xx_entry", | |
507 | .version_id = 1, | |
508 | .minimum_version_id = 1, | |
3aff6c2f | 509 | .fields = (VMStateField[]) { |
a90db158 AK |
510 | VMSTATE_UINTTL(pte0, ppc6xx_tlb_t), |
511 | VMSTATE_UINTTL(pte1, ppc6xx_tlb_t), | |
512 | VMSTATE_UINTTL(EPN, ppc6xx_tlb_t), | |
513 | VMSTATE_END_OF_LIST() | |
514 | }, | |
515 | }; | |
516 | ||
517 | static bool tlb6xx_needed(void *opaque) | |
518 | { | |
519 | PowerPCCPU *cpu = opaque; | |
520 | CPUPPCState *env = &cpu->env; | |
521 | ||
522 | return env->nb_tlb && (env->tlb_type == TLB_6XX); | |
523 | } | |
524 | ||
525 | static const VMStateDescription vmstate_tlb6xx = { | |
526 | .name = "cpu/tlb6xx", | |
527 | .version_id = 1, | |
528 | .minimum_version_id = 1, | |
5cd8cada | 529 | .needed = tlb6xx_needed, |
3aff6c2f | 530 | .fields = (VMStateField[]) { |
d2164ad3 | 531 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
532 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlb6, PowerPCCPU, |
533 | env.nb_tlb, | |
534 | vmstate_tlb6xx_entry, | |
535 | ppc6xx_tlb_t), | |
536 | VMSTATE_UINTTL_ARRAY(env.tgpr, PowerPCCPU, 4), | |
537 | VMSTATE_END_OF_LIST() | |
538 | } | |
539 | }; | |
540 | ||
541 | static const VMStateDescription vmstate_tlbemb_entry = { | |
542 | .name = "cpu/tlbemb_entry", | |
543 | .version_id = 1, | |
544 | .minimum_version_id = 1, | |
3aff6c2f | 545 | .fields = (VMStateField[]) { |
a90db158 AK |
546 | VMSTATE_UINT64(RPN, ppcemb_tlb_t), |
547 | VMSTATE_UINTTL(EPN, ppcemb_tlb_t), | |
548 | VMSTATE_UINTTL(PID, ppcemb_tlb_t), | |
549 | VMSTATE_UINTTL(size, ppcemb_tlb_t), | |
550 | VMSTATE_UINT32(prot, ppcemb_tlb_t), | |
551 | VMSTATE_UINT32(attr, ppcemb_tlb_t), | |
552 | VMSTATE_END_OF_LIST() | |
553 | }, | |
554 | }; | |
555 | ||
556 | static bool tlbemb_needed(void *opaque) | |
557 | { | |
558 | PowerPCCPU *cpu = opaque; | |
559 | CPUPPCState *env = &cpu->env; | |
560 | ||
561 | return env->nb_tlb && (env->tlb_type == TLB_EMB); | |
562 | } | |
563 | ||
564 | static bool pbr403_needed(void *opaque) | |
565 | { | |
566 | PowerPCCPU *cpu = opaque; | |
567 | uint32_t pvr = cpu->env.spr[SPR_PVR]; | |
568 | ||
569 | return (pvr & 0xffff0000) == 0x00200000; | |
570 | } | |
571 | ||
572 | static const VMStateDescription vmstate_pbr403 = { | |
573 | .name = "cpu/pbr403", | |
574 | .version_id = 1, | |
575 | .minimum_version_id = 1, | |
5cd8cada | 576 | .needed = pbr403_needed, |
3aff6c2f | 577 | .fields = (VMStateField[]) { |
a90db158 AK |
578 | VMSTATE_UINTTL_ARRAY(env.pb, PowerPCCPU, 4), |
579 | VMSTATE_END_OF_LIST() | |
580 | }, | |
581 | }; | |
582 | ||
583 | static const VMStateDescription vmstate_tlbemb = { | |
584 | .name = "cpu/tlb6xx", | |
585 | .version_id = 1, | |
586 | .minimum_version_id = 1, | |
5cd8cada | 587 | .needed = tlbemb_needed, |
3aff6c2f | 588 | .fields = (VMStateField[]) { |
d2164ad3 | 589 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
590 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbe, PowerPCCPU, |
591 | env.nb_tlb, | |
592 | vmstate_tlbemb_entry, | |
593 | ppcemb_tlb_t), | |
594 | /* 403 protection registers */ | |
595 | VMSTATE_END_OF_LIST() | |
596 | }, | |
5cd8cada JQ |
597 | .subsections = (const VMStateDescription*[]) { |
598 | &vmstate_pbr403, | |
599 | NULL | |
a90db158 AK |
600 | } |
601 | }; | |
602 | ||
603 | static const VMStateDescription vmstate_tlbmas_entry = { | |
604 | .name = "cpu/tlbmas_entry", | |
605 | .version_id = 1, | |
606 | .minimum_version_id = 1, | |
3aff6c2f | 607 | .fields = (VMStateField[]) { |
a90db158 AK |
608 | VMSTATE_UINT32(mas8, ppcmas_tlb_t), |
609 | VMSTATE_UINT32(mas1, ppcmas_tlb_t), | |
610 | VMSTATE_UINT64(mas2, ppcmas_tlb_t), | |
611 | VMSTATE_UINT64(mas7_3, ppcmas_tlb_t), | |
612 | VMSTATE_END_OF_LIST() | |
613 | }, | |
614 | }; | |
615 | ||
616 | static bool tlbmas_needed(void *opaque) | |
617 | { | |
618 | PowerPCCPU *cpu = opaque; | |
619 | CPUPPCState *env = &cpu->env; | |
620 | ||
621 | return env->nb_tlb && (env->tlb_type == TLB_MAS); | |
622 | } | |
623 | ||
624 | static const VMStateDescription vmstate_tlbmas = { | |
625 | .name = "cpu/tlbmas", | |
626 | .version_id = 1, | |
627 | .minimum_version_id = 1, | |
5cd8cada | 628 | .needed = tlbmas_needed, |
3aff6c2f | 629 | .fields = (VMStateField[]) { |
d2164ad3 | 630 | VMSTATE_INT32_EQUAL(env.nb_tlb, PowerPCCPU, NULL), |
a90db158 AK |
631 | VMSTATE_STRUCT_VARRAY_POINTER_INT32(env.tlb.tlbm, PowerPCCPU, |
632 | env.nb_tlb, | |
633 | vmstate_tlbmas_entry, | |
634 | ppcmas_tlb_t), | |
635 | VMSTATE_END_OF_LIST() | |
636 | } | |
637 | }; | |
638 | ||
d5fc133e DG |
639 | static bool compat_needed(void *opaque) |
640 | { | |
641 | PowerPCCPU *cpu = opaque; | |
642 | ||
643 | assert(!(cpu->compat_pvr && !cpu->vhyp)); | |
644 | return !cpu->pre_2_10_migration && cpu->compat_pvr != 0; | |
645 | } | |
646 | ||
647 | static const VMStateDescription vmstate_compat = { | |
648 | .name = "cpu/compat", | |
649 | .version_id = 1, | |
650 | .minimum_version_id = 1, | |
651 | .needed = compat_needed, | |
652 | .fields = (VMStateField[]) { | |
653 | VMSTATE_UINT32(compat_pvr, PowerPCCPU), | |
654 | VMSTATE_END_OF_LIST() | |
655 | } | |
656 | }; | |
657 | ||
a90db158 AK |
658 | const VMStateDescription vmstate_ppc_cpu = { |
659 | .name = "cpu", | |
660 | .version_id = 5, | |
661 | .minimum_version_id = 5, | |
662 | .minimum_version_id_old = 4, | |
663 | .load_state_old = cpu_load_old, | |
664 | .pre_save = cpu_pre_save, | |
665 | .post_load = cpu_post_load, | |
3aff6c2f | 666 | .fields = (VMStateField[]) { |
569be9f0 | 667 | VMSTATE_UNUSED(sizeof(target_ulong)), /* was _EQUAL(env.spr[SPR_PVR]) */ |
a90db158 AK |
668 | |
669 | /* User mode architected state */ | |
670 | VMSTATE_UINTTL_ARRAY(env.gpr, PowerPCCPU, 32), | |
671 | #if !defined(TARGET_PPC64) | |
672 | VMSTATE_UINTTL_ARRAY(env.gprh, PowerPCCPU, 32), | |
673 | #endif | |
674 | VMSTATE_UINT32_ARRAY(env.crf, PowerPCCPU, 8), | |
675 | VMSTATE_UINTTL(env.nip, PowerPCCPU), | |
676 | ||
677 | /* SPRs */ | |
678 | VMSTATE_UINTTL_ARRAY(env.spr, PowerPCCPU, 1024), | |
679 | VMSTATE_UINT64(env.spe_acc, PowerPCCPU), | |
680 | ||
681 | /* Reservation */ | |
682 | VMSTATE_UINTTL(env.reserve_addr, PowerPCCPU), | |
683 | ||
684 | /* Supervisor mode architected state */ | |
685 | VMSTATE_UINTTL(env.msr, PowerPCCPU), | |
686 | ||
687 | /* Internal state */ | |
688 | VMSTATE_UINTTL(env.hflags_nmsr, PowerPCCPU), | |
689 | /* FIXME: access_type? */ | |
690 | ||
691 | /* Sanity checking */ | |
146c11f1 DG |
692 | VMSTATE_UINTTL_TEST(mig_msr_mask, PowerPCCPU, cpu_pre_2_8_migration), |
693 | VMSTATE_UINT64_TEST(mig_insns_flags, PowerPCCPU, cpu_pre_2_8_migration), | |
694 | VMSTATE_UINT64_TEST(mig_insns_flags2, PowerPCCPU, | |
695 | cpu_pre_2_8_migration), | |
696 | VMSTATE_UINT32_TEST(mig_nb_BATs, PowerPCCPU, cpu_pre_2_8_migration), | |
a90db158 AK |
697 | VMSTATE_END_OF_LIST() |
698 | }, | |
5cd8cada JQ |
699 | .subsections = (const VMStateDescription*[]) { |
700 | &vmstate_fpu, | |
701 | &vmstate_altivec, | |
702 | &vmstate_vsx, | |
703 | &vmstate_sr, | |
a90db158 | 704 | #ifdef TARGET_PPC64 |
5cd8cada JQ |
705 | &vmstate_tm, |
706 | &vmstate_slb, | |
a90db158 | 707 | #endif /* TARGET_PPC64 */ |
5cd8cada JQ |
708 | &vmstate_tlb6xx, |
709 | &vmstate_tlbemb, | |
710 | &vmstate_tlbmas, | |
d5fc133e | 711 | &vmstate_compat, |
5cd8cada | 712 | NULL |
a90db158 AK |
713 | } |
714 | }; |