]>
Commit | Line | Data |
---|---|---|
e2132e0b SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: MIPS specific KVM APIs | |
7 | * | |
8 | * Copyright (C) 2012-2014 Imagination Technologies Ltd. | |
9 | * Authors: Sanjay Lal <[email protected]> | |
10 | */ | |
11 | ||
c684822a | 12 | #include "qemu/osdep.h" |
e2132e0b | 13 | #include <sys/ioctl.h> |
e2132e0b SL |
14 | |
15 | #include <linux/kvm.h> | |
16 | ||
17 | #include "qemu-common.h" | |
33c11879 | 18 | #include "cpu.h" |
26aa3d9a | 19 | #include "internal.h" |
e2132e0b | 20 | #include "qemu/error-report.h" |
db725815 | 21 | #include "qemu/main-loop.h" |
e2132e0b SL |
22 | #include "qemu/timer.h" |
23 | #include "sysemu/sysemu.h" | |
24 | #include "sysemu/kvm.h" | |
e2132e0b SL |
25 | #include "sysemu/cpus.h" |
26 | #include "kvm_mips.h" | |
4c663752 | 27 | #include "exec/memattrs.h" |
e2132e0b SL |
28 | |
29 | #define DEBUG_KVM 0 | |
30 | ||
31 | #define DPRINTF(fmt, ...) \ | |
32 | do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0) | |
33 | ||
152db36a | 34 | static int kvm_mips_fpu_cap; |
bee62662 | 35 | static int kvm_mips_msa_cap; |
152db36a | 36 | |
e2132e0b SL |
37 | const KVMCapabilityInfo kvm_arch_required_capabilities[] = { |
38 | KVM_CAP_LAST_INFO | |
39 | }; | |
40 | ||
41 | static void kvm_mips_update_state(void *opaque, int running, RunState state); | |
42 | ||
43 | unsigned long kvm_arch_vcpu_id(CPUState *cs) | |
44 | { | |
45 | return cs->cpu_index; | |
46 | } | |
47 | ||
b16565b3 | 48 | int kvm_arch_init(MachineState *ms, KVMState *s) |
e2132e0b SL |
49 | { |
50 | /* MIPS has 128 signals */ | |
51 | kvm_set_sigmask_len(s, 16); | |
52 | ||
152db36a | 53 | kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU); |
bee62662 | 54 | kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA); |
152db36a | 55 | |
e2132e0b SL |
56 | DPRINTF("%s\n", __func__); |
57 | return 0; | |
58 | } | |
59 | ||
d525ffab PB |
60 | int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) |
61 | { | |
62 | return 0; | |
63 | } | |
64 | ||
e2132e0b SL |
65 | int kvm_arch_init_vcpu(CPUState *cs) |
66 | { | |
152db36a JH |
67 | MIPSCPU *cpu = MIPS_CPU(cs); |
68 | CPUMIPSState *env = &cpu->env; | |
e2132e0b SL |
69 | int ret = 0; |
70 | ||
71 | qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); | |
72 | ||
152db36a JH |
73 | if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { |
74 | ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0); | |
75 | if (ret < 0) { | |
76 | /* mark unsupported so it gets disabled on reset */ | |
77 | kvm_mips_fpu_cap = 0; | |
78 | ret = 0; | |
79 | } | |
80 | } | |
81 | ||
bee62662 JH |
82 | if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { |
83 | ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0); | |
84 | if (ret < 0) { | |
85 | /* mark unsupported so it gets disabled on reset */ | |
86 | kvm_mips_msa_cap = 0; | |
87 | ret = 0; | |
88 | } | |
89 | } | |
90 | ||
e2132e0b SL |
91 | DPRINTF("%s\n", __func__); |
92 | return ret; | |
93 | } | |
94 | ||
b1115c99 LA |
95 | int kvm_arch_destroy_vcpu(CPUState *cs) |
96 | { | |
97 | return 0; | |
98 | } | |
99 | ||
e2132e0b SL |
100 | void kvm_mips_reset_vcpu(MIPSCPU *cpu) |
101 | { | |
0e928b12 JH |
102 | CPUMIPSState *env = &cpu->env; |
103 | ||
152db36a | 104 | if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { |
2ab4b135 | 105 | warn_report("KVM does not support FPU, disabling"); |
0e928b12 JH |
106 | env->CP0_Config1 &= ~(1 << CP0C1_FP); |
107 | } | |
bee62662 | 108 | if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { |
2ab4b135 | 109 | warn_report("KVM does not support MSA, disabling"); |
bee62662 JH |
110 | env->CP0_Config3 &= ~(1 << CP0C3_MSAP); |
111 | } | |
0e928b12 | 112 | |
e2132e0b SL |
113 | DPRINTF("%s\n", __func__); |
114 | } | |
115 | ||
116 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
117 | { | |
118 | DPRINTF("%s\n", __func__); | |
119 | return 0; | |
120 | } | |
121 | ||
122 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
123 | { | |
124 | DPRINTF("%s\n", __func__); | |
125 | return 0; | |
126 | } | |
127 | ||
128 | static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu) | |
129 | { | |
130 | CPUMIPSState *env = &cpu->env; | |
131 | ||
e2132e0b SL |
132 | return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP)); |
133 | } | |
134 | ||
135 | ||
136 | void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) | |
137 | { | |
138 | MIPSCPU *cpu = MIPS_CPU(cs); | |
139 | int r; | |
140 | struct kvm_mips_interrupt intr; | |
141 | ||
4b8523ee JK |
142 | qemu_mutex_lock_iothread(); |
143 | ||
e2132e0b SL |
144 | if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && |
145 | cpu_mips_io_interrupts_pending(cpu)) { | |
146 | intr.cpu = -1; | |
147 | intr.irq = 2; | |
148 | r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
149 | if (r < 0) { | |
150 | error_report("%s: cpu %d: failed to inject IRQ %x", | |
151 | __func__, cs->cpu_index, intr.irq); | |
152 | } | |
153 | } | |
4b8523ee JK |
154 | |
155 | qemu_mutex_unlock_iothread(); | |
e2132e0b SL |
156 | } |
157 | ||
4c663752 | 158 | MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) |
e2132e0b | 159 | { |
4c663752 | 160 | return MEMTXATTRS_UNSPECIFIED; |
e2132e0b SL |
161 | } |
162 | ||
163 | int kvm_arch_process_async_events(CPUState *cs) | |
164 | { | |
165 | return cs->halted; | |
166 | } | |
167 | ||
168 | int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) | |
169 | { | |
170 | int ret; | |
171 | ||
172 | DPRINTF("%s\n", __func__); | |
173 | switch (run->exit_reason) { | |
174 | default: | |
175 | error_report("%s: unknown exit reason %d", | |
176 | __func__, run->exit_reason); | |
177 | ret = -1; | |
178 | break; | |
179 | } | |
180 | ||
181 | return ret; | |
182 | } | |
183 | ||
184 | bool kvm_arch_stop_on_emulation_error(CPUState *cs) | |
185 | { | |
186 | DPRINTF("%s\n", __func__); | |
187 | return true; | |
188 | } | |
189 | ||
e2132e0b SL |
190 | void kvm_arch_init_irq_routing(KVMState *s) |
191 | { | |
192 | } | |
193 | ||
194 | int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level) | |
195 | { | |
196 | CPUState *cs = CPU(cpu); | |
197 | struct kvm_mips_interrupt intr; | |
198 | ||
199 | if (!kvm_enabled()) { | |
200 | return 0; | |
201 | } | |
202 | ||
203 | intr.cpu = -1; | |
204 | ||
205 | if (level) { | |
206 | intr.irq = irq; | |
207 | } else { | |
208 | intr.irq = -irq; | |
209 | } | |
210 | ||
211 | kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
212 | ||
213 | return 0; | |
214 | } | |
215 | ||
216 | int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level) | |
217 | { | |
218 | CPUState *cs = current_cpu; | |
219 | CPUState *dest_cs = CPU(cpu); | |
220 | struct kvm_mips_interrupt intr; | |
221 | ||
222 | if (!kvm_enabled()) { | |
223 | return 0; | |
224 | } | |
225 | ||
226 | intr.cpu = dest_cs->cpu_index; | |
227 | ||
228 | if (level) { | |
229 | intr.irq = irq; | |
230 | } else { | |
231 | intr.irq = -irq; | |
232 | } | |
233 | ||
234 | DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq); | |
235 | ||
236 | kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
237 | ||
238 | return 0; | |
239 | } | |
240 | ||
241 | #define MIPS_CP0_32(_R, _S) \ | |
5a2db896 | 242 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) |
e2132e0b SL |
243 | |
244 | #define MIPS_CP0_64(_R, _S) \ | |
5a2db896 | 245 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) |
e2132e0b SL |
246 | |
247 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) | |
248 | #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) | |
249 | #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) | |
250 | #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) | |
251 | #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) | |
252 | #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) | |
253 | #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) | |
254 | #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) | |
255 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) | |
256 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) | |
257 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) | |
258 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) | |
259 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) | |
461a1582 | 260 | #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) |
03cbfd7b JH |
261 | #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) |
262 | #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) | |
263 | #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) | |
264 | #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) | |
265 | #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) | |
266 | #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) | |
e2132e0b SL |
267 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) |
268 | ||
e2132e0b SL |
269 | static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, |
270 | int32_t *addr) | |
271 | { | |
e2132e0b SL |
272 | struct kvm_one_reg cp0reg = { |
273 | .id = reg_id, | |
f8b3e48b | 274 | .addr = (uintptr_t)addr |
e2132e0b SL |
275 | }; |
276 | ||
277 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
278 | } | |
279 | ||
0759487b JH |
280 | static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id, |
281 | uint32_t *addr) | |
282 | { | |
283 | struct kvm_one_reg cp0reg = { | |
284 | .id = reg_id, | |
285 | .addr = (uintptr_t)addr | |
286 | }; | |
287 | ||
288 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
289 | } | |
290 | ||
e2132e0b SL |
291 | static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, |
292 | target_ulong *addr) | |
293 | { | |
294 | uint64_t val64 = *addr; | |
295 | struct kvm_one_reg cp0reg = { | |
296 | .id = reg_id, | |
297 | .addr = (uintptr_t)&val64 | |
298 | }; | |
299 | ||
300 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
301 | } | |
302 | ||
303 | static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id, | |
d319f83f JH |
304 | int64_t *addr) |
305 | { | |
306 | struct kvm_one_reg cp0reg = { | |
307 | .id = reg_id, | |
308 | .addr = (uintptr_t)addr | |
309 | }; | |
310 | ||
311 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
312 | } | |
313 | ||
314 | static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id, | |
315 | uint64_t *addr) | |
e2132e0b SL |
316 | { |
317 | struct kvm_one_reg cp0reg = { | |
318 | .id = reg_id, | |
319 | .addr = (uintptr_t)addr | |
320 | }; | |
321 | ||
322 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
323 | } | |
324 | ||
325 | static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id, | |
326 | int32_t *addr) | |
327 | { | |
e2132e0b SL |
328 | struct kvm_one_reg cp0reg = { |
329 | .id = reg_id, | |
f8b3e48b | 330 | .addr = (uintptr_t)addr |
e2132e0b SL |
331 | }; |
332 | ||
f8b3e48b | 333 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); |
e2132e0b SL |
334 | } |
335 | ||
0759487b JH |
336 | static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id, |
337 | uint32_t *addr) | |
338 | { | |
339 | struct kvm_one_reg cp0reg = { | |
340 | .id = reg_id, | |
341 | .addr = (uintptr_t)addr | |
342 | }; | |
343 | ||
344 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
345 | } | |
346 | ||
182f42fd | 347 | static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id, |
e2132e0b SL |
348 | target_ulong *addr) |
349 | { | |
350 | int ret; | |
351 | uint64_t val64 = 0; | |
352 | struct kvm_one_reg cp0reg = { | |
353 | .id = reg_id, | |
354 | .addr = (uintptr_t)&val64 | |
355 | }; | |
356 | ||
357 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
358 | if (ret >= 0) { | |
359 | *addr = val64; | |
360 | } | |
361 | return ret; | |
362 | } | |
363 | ||
182f42fd | 364 | static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id, |
d319f83f JH |
365 | int64_t *addr) |
366 | { | |
367 | struct kvm_one_reg cp0reg = { | |
368 | .id = reg_id, | |
369 | .addr = (uintptr_t)addr | |
370 | }; | |
371 | ||
372 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
373 | } | |
374 | ||
375 | static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id, | |
376 | uint64_t *addr) | |
e2132e0b SL |
377 | { |
378 | struct kvm_one_reg cp0reg = { | |
379 | .id = reg_id, | |
380 | .addr = (uintptr_t)addr | |
381 | }; | |
382 | ||
383 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
384 | } | |
385 | ||
03cbfd7b | 386 | #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M) |
152db36a JH |
387 | #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \ |
388 | (1U << CP0C1_FP)) | |
03cbfd7b | 389 | #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M) |
bee62662 JH |
390 | #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \ |
391 | (1U << CP0C3_MSAP)) | |
03cbfd7b | 392 | #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M) |
bee62662 JH |
393 | #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \ |
394 | (1U << CP0C5_UFE) | \ | |
152db36a JH |
395 | (1U << CP0C5_FRE) | \ |
396 | (1U << CP0C5_UFR)) | |
03cbfd7b JH |
397 | |
398 | static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id, | |
399 | int32_t *addr, int32_t mask) | |
400 | { | |
401 | int err; | |
402 | int32_t tmp, change; | |
403 | ||
404 | err = kvm_mips_get_one_reg(cs, reg_id, &tmp); | |
405 | if (err < 0) { | |
406 | return err; | |
407 | } | |
408 | ||
409 | /* only change bits in mask */ | |
410 | change = (*addr ^ tmp) & mask; | |
411 | if (!change) { | |
412 | return 0; | |
413 | } | |
414 | ||
415 | tmp = tmp ^ change; | |
416 | return kvm_mips_put_one_reg(cs, reg_id, &tmp); | |
417 | } | |
418 | ||
e2132e0b SL |
419 | /* |
420 | * We freeze the KVM timer when either the VM clock is stopped or the state is | |
421 | * saved (the state is dirty). | |
422 | */ | |
423 | ||
424 | /* | |
425 | * Save the state of the KVM timer when VM clock is stopped or state is synced | |
426 | * to QEMU. | |
427 | */ | |
428 | static int kvm_mips_save_count(CPUState *cs) | |
429 | { | |
430 | MIPSCPU *cpu = MIPS_CPU(cs); | |
431 | CPUMIPSState *env = &cpu->env; | |
432 | uint64_t count_ctl; | |
433 | int err, ret = 0; | |
434 | ||
435 | /* freeze KVM timer */ | |
d319f83f | 436 | err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
437 | if (err < 0) { |
438 | DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err); | |
439 | ret = err; | |
440 | } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { | |
441 | count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; | |
d319f83f | 442 | err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
443 | if (err < 0) { |
444 | DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); | |
445 | ret = err; | |
446 | } | |
447 | } | |
448 | ||
449 | /* read CP0_Cause */ | |
450 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); | |
451 | if (err < 0) { | |
452 | DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err); | |
453 | ret = err; | |
454 | } | |
455 | ||
456 | /* read CP0_Count */ | |
457 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); | |
458 | if (err < 0) { | |
459 | DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err); | |
460 | ret = err; | |
461 | } | |
462 | ||
463 | return ret; | |
464 | } | |
465 | ||
466 | /* | |
467 | * Restore the state of the KVM timer when VM clock is restarted or state is | |
468 | * synced to KVM. | |
469 | */ | |
470 | static int kvm_mips_restore_count(CPUState *cs) | |
471 | { | |
472 | MIPSCPU *cpu = MIPS_CPU(cs); | |
473 | CPUMIPSState *env = &cpu->env; | |
474 | uint64_t count_ctl; | |
475 | int err_dc, err, ret = 0; | |
476 | ||
477 | /* check the timer is frozen */ | |
d319f83f | 478 | err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
479 | if (err_dc < 0) { |
480 | DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc); | |
481 | ret = err_dc; | |
482 | } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { | |
483 | /* freeze timer (sets COUNT_RESUME for us) */ | |
484 | count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; | |
d319f83f | 485 | err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
486 | if (err < 0) { |
487 | DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); | |
488 | ret = err; | |
489 | } | |
490 | } | |
491 | ||
492 | /* load CP0_Cause */ | |
493 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); | |
494 | if (err < 0) { | |
495 | DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err); | |
496 | ret = err; | |
497 | } | |
498 | ||
499 | /* load CP0_Count */ | |
500 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); | |
501 | if (err < 0) { | |
502 | DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err); | |
503 | ret = err; | |
504 | } | |
505 | ||
506 | /* resume KVM timer */ | |
507 | if (err_dc >= 0) { | |
508 | count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC; | |
d319f83f | 509 | err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
510 | if (err < 0) { |
511 | DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err); | |
512 | ret = err; | |
513 | } | |
514 | } | |
515 | ||
516 | return ret; | |
517 | } | |
518 | ||
519 | /* | |
520 | * Handle the VM clock being started or stopped | |
521 | */ | |
522 | static void kvm_mips_update_state(void *opaque, int running, RunState state) | |
523 | { | |
524 | CPUState *cs = opaque; | |
525 | int ret; | |
526 | uint64_t count_resume; | |
527 | ||
528 | /* | |
529 | * If state is already dirty (synced to QEMU) then the KVM timer state is | |
530 | * already saved and can be restored when it is synced back to KVM. | |
531 | */ | |
532 | if (!running) { | |
99f31832 | 533 | if (!cs->vcpu_dirty) { |
e2132e0b SL |
534 | ret = kvm_mips_save_count(cs); |
535 | if (ret < 0) { | |
288cb949 | 536 | warn_report("Failed saving count"); |
e2132e0b SL |
537 | } |
538 | } | |
539 | } else { | |
540 | /* Set clock restore time to now */ | |
906b53a2 | 541 | count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
d319f83f JH |
542 | ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME, |
543 | &count_resume); | |
e2132e0b | 544 | if (ret < 0) { |
288cb949 | 545 | warn_report("Failed setting COUNT_RESUME"); |
e2132e0b SL |
546 | return; |
547 | } | |
548 | ||
99f31832 | 549 | if (!cs->vcpu_dirty) { |
e2132e0b SL |
550 | ret = kvm_mips_restore_count(cs); |
551 | if (ret < 0) { | |
288cb949 | 552 | warn_report("Failed restoring count"); |
e2132e0b SL |
553 | } |
554 | } | |
555 | } | |
556 | } | |
557 | ||
152db36a JH |
558 | static int kvm_mips_put_fpu_registers(CPUState *cs, int level) |
559 | { | |
560 | MIPSCPU *cpu = MIPS_CPU(cs); | |
561 | CPUMIPSState *env = &cpu->env; | |
562 | int err, ret = 0; | |
563 | unsigned int i; | |
564 | ||
565 | /* Only put FPU state if we're emulating a CPU with an FPU */ | |
566 | if (env->CP0_Config1 & (1 << CP0C1_FP)) { | |
567 | /* FPU Control Registers */ | |
568 | if (level == KVM_PUT_FULL_STATE) { | |
569 | err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR, | |
570 | &env->active_fpu.fcr0); | |
571 | if (err < 0) { | |
572 | DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err); | |
573 | ret = err; | |
574 | } | |
575 | } | |
576 | err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, | |
577 | &env->active_fpu.fcr31); | |
578 | if (err < 0) { | |
579 | DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err); | |
580 | ret = err; | |
581 | } | |
582 | ||
bee62662 JH |
583 | /* |
584 | * FPU register state is a subset of MSA vector state, so don't put FPU | |
585 | * registers if we're emulating a CPU with MSA. | |
586 | */ | |
587 | if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { | |
588 | /* Floating point registers */ | |
589 | for (i = 0; i < 32; ++i) { | |
590 | if (env->CP0_Status & (1 << CP0St_FR)) { | |
591 | err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), | |
592 | &env->active_fpu.fpr[i].d); | |
593 | } else { | |
594 | err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), | |
595 | &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); | |
596 | } | |
597 | if (err < 0) { | |
598 | DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err); | |
599 | ret = err; | |
600 | } | |
152db36a | 601 | } |
bee62662 JH |
602 | } |
603 | } | |
604 | ||
605 | /* Only put MSA state if we're emulating a CPU with MSA */ | |
606 | if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { | |
607 | /* MSA Control Registers */ | |
608 | if (level == KVM_PUT_FULL_STATE) { | |
609 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR, | |
610 | &env->msair); | |
611 | if (err < 0) { | |
612 | DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err); | |
613 | ret = err; | |
614 | } | |
615 | } | |
616 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR, | |
617 | &env->active_tc.msacsr); | |
618 | if (err < 0) { | |
619 | DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err); | |
620 | ret = err; | |
621 | } | |
622 | ||
623 | /* Vector registers (includes FP registers) */ | |
624 | for (i = 0; i < 32; ++i) { | |
625 | /* Big endian MSA not supported by QEMU yet anyway */ | |
626 | err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), | |
627 | env->active_fpu.fpr[i].wr.d); | |
152db36a | 628 | if (err < 0) { |
bee62662 | 629 | DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err); |
152db36a JH |
630 | ret = err; |
631 | } | |
632 | } | |
633 | } | |
634 | ||
635 | return ret; | |
636 | } | |
637 | ||
638 | static int kvm_mips_get_fpu_registers(CPUState *cs) | |
639 | { | |
640 | MIPSCPU *cpu = MIPS_CPU(cs); | |
641 | CPUMIPSState *env = &cpu->env; | |
642 | int err, ret = 0; | |
643 | unsigned int i; | |
644 | ||
645 | /* Only get FPU state if we're emulating a CPU with an FPU */ | |
646 | if (env->CP0_Config1 & (1 << CP0C1_FP)) { | |
647 | /* FPU Control Registers */ | |
648 | err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR, | |
649 | &env->active_fpu.fcr0); | |
650 | if (err < 0) { | |
651 | DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err); | |
652 | ret = err; | |
653 | } | |
654 | err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, | |
655 | &env->active_fpu.fcr31); | |
656 | if (err < 0) { | |
657 | DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err); | |
658 | ret = err; | |
659 | } else { | |
660 | restore_fp_status(env); | |
661 | } | |
662 | ||
bee62662 JH |
663 | /* |
664 | * FPU register state is a subset of MSA vector state, so don't save FPU | |
665 | * registers if we're emulating a CPU with MSA. | |
666 | */ | |
667 | if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { | |
668 | /* Floating point registers */ | |
669 | for (i = 0; i < 32; ++i) { | |
670 | if (env->CP0_Status & (1 << CP0St_FR)) { | |
671 | err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), | |
672 | &env->active_fpu.fpr[i].d); | |
673 | } else { | |
674 | err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), | |
675 | &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); | |
676 | } | |
677 | if (err < 0) { | |
678 | DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err); | |
679 | ret = err; | |
680 | } | |
152db36a | 681 | } |
bee62662 JH |
682 | } |
683 | } | |
684 | ||
685 | /* Only get MSA state if we're emulating a CPU with MSA */ | |
686 | if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { | |
687 | /* MSA Control Registers */ | |
688 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR, | |
689 | &env->msair); | |
690 | if (err < 0) { | |
691 | DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err); | |
692 | ret = err; | |
693 | } | |
694 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR, | |
695 | &env->active_tc.msacsr); | |
696 | if (err < 0) { | |
697 | DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err); | |
698 | ret = err; | |
699 | } else { | |
700 | restore_msa_fp_status(env); | |
701 | } | |
702 | ||
703 | /* Vector registers (includes FP registers) */ | |
704 | for (i = 0; i < 32; ++i) { | |
705 | /* Big endian MSA not supported by QEMU yet anyway */ | |
706 | err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), | |
707 | env->active_fpu.fpr[i].wr.d); | |
152db36a | 708 | if (err < 0) { |
bee62662 | 709 | DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err); |
152db36a JH |
710 | ret = err; |
711 | } | |
712 | } | |
713 | } | |
714 | ||
715 | return ret; | |
716 | } | |
717 | ||
718 | ||
e2132e0b SL |
719 | static int kvm_mips_put_cp0_registers(CPUState *cs, int level) |
720 | { | |
721 | MIPSCPU *cpu = MIPS_CPU(cs); | |
722 | CPUMIPSState *env = &cpu->env; | |
723 | int err, ret = 0; | |
724 | ||
725 | (void)level; | |
726 | ||
727 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); | |
728 | if (err < 0) { | |
729 | DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err); | |
730 | ret = err; | |
731 | } | |
732 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, | |
733 | &env->CP0_Context); | |
734 | if (err < 0) { | |
735 | DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err); | |
736 | ret = err; | |
737 | } | |
738 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, | |
739 | &env->active_tc.CP0_UserLocal); | |
740 | if (err < 0) { | |
741 | DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err); | |
742 | ret = err; | |
743 | } | |
744 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, | |
745 | &env->CP0_PageMask); | |
746 | if (err < 0) { | |
747 | DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err); | |
748 | ret = err; | |
749 | } | |
750 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); | |
751 | if (err < 0) { | |
752 | DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err); | |
753 | ret = err; | |
754 | } | |
755 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); | |
756 | if (err < 0) { | |
757 | DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err); | |
758 | ret = err; | |
759 | } | |
760 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, | |
761 | &env->CP0_BadVAddr); | |
762 | if (err < 0) { | |
763 | DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err); | |
764 | ret = err; | |
765 | } | |
766 | ||
767 | /* If VM clock stopped then state will be restored when it is restarted */ | |
768 | if (runstate_is_running()) { | |
769 | err = kvm_mips_restore_count(cs); | |
770 | if (err < 0) { | |
771 | ret = err; | |
772 | } | |
773 | } | |
774 | ||
775 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, | |
776 | &env->CP0_EntryHi); | |
777 | if (err < 0) { | |
778 | DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err); | |
779 | ret = err; | |
780 | } | |
781 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, | |
782 | &env->CP0_Compare); | |
783 | if (err < 0) { | |
784 | DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err); | |
785 | ret = err; | |
786 | } | |
787 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); | |
788 | if (err < 0) { | |
789 | DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err); | |
790 | ret = err; | |
791 | } | |
792 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); | |
793 | if (err < 0) { | |
794 | DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err); | |
795 | ret = err; | |
796 | } | |
461a1582 JH |
797 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); |
798 | if (err < 0) { | |
799 | DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err); | |
800 | ret = err; | |
801 | } | |
03cbfd7b JH |
802 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, |
803 | &env->CP0_Config0, | |
804 | KVM_REG_MIPS_CP0_CONFIG_MASK); | |
805 | if (err < 0) { | |
806 | DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err); | |
807 | ret = err; | |
808 | } | |
809 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, | |
810 | &env->CP0_Config1, | |
811 | KVM_REG_MIPS_CP0_CONFIG1_MASK); | |
812 | if (err < 0) { | |
813 | DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err); | |
814 | ret = err; | |
815 | } | |
816 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, | |
817 | &env->CP0_Config2, | |
818 | KVM_REG_MIPS_CP0_CONFIG2_MASK); | |
819 | if (err < 0) { | |
820 | DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err); | |
821 | ret = err; | |
822 | } | |
823 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, | |
824 | &env->CP0_Config3, | |
825 | KVM_REG_MIPS_CP0_CONFIG3_MASK); | |
826 | if (err < 0) { | |
827 | DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err); | |
828 | ret = err; | |
829 | } | |
830 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, | |
831 | &env->CP0_Config4, | |
832 | KVM_REG_MIPS_CP0_CONFIG4_MASK); | |
833 | if (err < 0) { | |
834 | DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err); | |
835 | ret = err; | |
836 | } | |
837 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, | |
838 | &env->CP0_Config5, | |
839 | KVM_REG_MIPS_CP0_CONFIG5_MASK); | |
840 | if (err < 0) { | |
841 | DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err); | |
842 | ret = err; | |
843 | } | |
e2132e0b SL |
844 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, |
845 | &env->CP0_ErrorEPC); | |
846 | if (err < 0) { | |
847 | DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err); | |
848 | ret = err; | |
849 | } | |
850 | ||
851 | return ret; | |
852 | } | |
853 | ||
854 | static int kvm_mips_get_cp0_registers(CPUState *cs) | |
855 | { | |
856 | MIPSCPU *cpu = MIPS_CPU(cs); | |
857 | CPUMIPSState *env = &cpu->env; | |
858 | int err, ret = 0; | |
859 | ||
860 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); | |
861 | if (err < 0) { | |
862 | DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err); | |
863 | ret = err; | |
864 | } | |
865 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, | |
866 | &env->CP0_Context); | |
867 | if (err < 0) { | |
868 | DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err); | |
869 | ret = err; | |
870 | } | |
871 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, | |
872 | &env->active_tc.CP0_UserLocal); | |
873 | if (err < 0) { | |
874 | DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err); | |
875 | ret = err; | |
876 | } | |
877 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, | |
878 | &env->CP0_PageMask); | |
879 | if (err < 0) { | |
880 | DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err); | |
881 | ret = err; | |
882 | } | |
883 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); | |
884 | if (err < 0) { | |
885 | DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err); | |
886 | ret = err; | |
887 | } | |
888 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); | |
889 | if (err < 0) { | |
890 | DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err); | |
891 | ret = err; | |
892 | } | |
893 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, | |
894 | &env->CP0_BadVAddr); | |
895 | if (err < 0) { | |
896 | DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err); | |
897 | ret = err; | |
898 | } | |
899 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, | |
900 | &env->CP0_EntryHi); | |
901 | if (err < 0) { | |
902 | DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err); | |
903 | ret = err; | |
904 | } | |
905 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, | |
906 | &env->CP0_Compare); | |
907 | if (err < 0) { | |
908 | DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err); | |
909 | ret = err; | |
910 | } | |
911 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); | |
912 | if (err < 0) { | |
913 | DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err); | |
914 | ret = err; | |
915 | } | |
916 | ||
917 | /* If VM clock stopped then state was already saved when it was stopped */ | |
918 | if (runstate_is_running()) { | |
919 | err = kvm_mips_save_count(cs); | |
920 | if (err < 0) { | |
921 | ret = err; | |
922 | } | |
923 | } | |
924 | ||
925 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); | |
926 | if (err < 0) { | |
927 | DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err); | |
928 | ret = err; | |
929 | } | |
461a1582 JH |
930 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); |
931 | if (err < 0) { | |
932 | DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err); | |
933 | ret = err; | |
934 | } | |
03cbfd7b JH |
935 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0); |
936 | if (err < 0) { | |
937 | DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err); | |
938 | ret = err; | |
939 | } | |
940 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1); | |
941 | if (err < 0) { | |
942 | DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err); | |
943 | ret = err; | |
944 | } | |
945 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2); | |
946 | if (err < 0) { | |
947 | DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err); | |
948 | ret = err; | |
949 | } | |
950 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3); | |
951 | if (err < 0) { | |
952 | DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err); | |
953 | ret = err; | |
954 | } | |
955 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4); | |
956 | if (err < 0) { | |
957 | DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err); | |
958 | ret = err; | |
959 | } | |
960 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5); | |
961 | if (err < 0) { | |
962 | DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err); | |
963 | ret = err; | |
964 | } | |
e2132e0b SL |
965 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, |
966 | &env->CP0_ErrorEPC); | |
967 | if (err < 0) { | |
968 | DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err); | |
969 | ret = err; | |
970 | } | |
971 | ||
972 | return ret; | |
973 | } | |
974 | ||
975 | int kvm_arch_put_registers(CPUState *cs, int level) | |
976 | { | |
977 | MIPSCPU *cpu = MIPS_CPU(cs); | |
978 | CPUMIPSState *env = &cpu->env; | |
979 | struct kvm_regs regs; | |
980 | int ret; | |
981 | int i; | |
982 | ||
983 | /* Set the registers based on QEMU's view of things */ | |
984 | for (i = 0; i < 32; i++) { | |
02dae26a | 985 | regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i]; |
e2132e0b SL |
986 | } |
987 | ||
02dae26a JH |
988 | regs.hi = (int64_t)(target_long)env->active_tc.HI[0]; |
989 | regs.lo = (int64_t)(target_long)env->active_tc.LO[0]; | |
990 | regs.pc = (int64_t)(target_long)env->active_tc.PC; | |
e2132e0b SL |
991 | |
992 | ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); | |
993 | ||
994 | if (ret < 0) { | |
995 | return ret; | |
996 | } | |
997 | ||
998 | ret = kvm_mips_put_cp0_registers(cs, level); | |
999 | if (ret < 0) { | |
1000 | return ret; | |
1001 | } | |
1002 | ||
152db36a JH |
1003 | ret = kvm_mips_put_fpu_registers(cs, level); |
1004 | if (ret < 0) { | |
1005 | return ret; | |
1006 | } | |
1007 | ||
e2132e0b SL |
1008 | return ret; |
1009 | } | |
1010 | ||
1011 | int kvm_arch_get_registers(CPUState *cs) | |
1012 | { | |
1013 | MIPSCPU *cpu = MIPS_CPU(cs); | |
1014 | CPUMIPSState *env = &cpu->env; | |
1015 | int ret = 0; | |
1016 | struct kvm_regs regs; | |
1017 | int i; | |
1018 | ||
1019 | /* Get the current register set as KVM seems it */ | |
1020 | ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); | |
1021 | ||
1022 | if (ret < 0) { | |
1023 | return ret; | |
1024 | } | |
1025 | ||
1026 | for (i = 0; i < 32; i++) { | |
1027 | env->active_tc.gpr[i] = regs.gpr[i]; | |
1028 | } | |
1029 | ||
1030 | env->active_tc.HI[0] = regs.hi; | |
1031 | env->active_tc.LO[0] = regs.lo; | |
1032 | env->active_tc.PC = regs.pc; | |
1033 | ||
1034 | kvm_mips_get_cp0_registers(cs); | |
152db36a | 1035 | kvm_mips_get_fpu_registers(cs); |
e2132e0b SL |
1036 | |
1037 | return ret; | |
1038 | } | |
9e03a040 FB |
1039 | |
1040 | int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, | |
dc9f06ca | 1041 | uint64_t address, uint32_t data, PCIDevice *dev) |
9e03a040 FB |
1042 | { |
1043 | return 0; | |
1044 | } | |
1850b6b7 | 1045 | |
38d87493 PX |
1046 | int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, |
1047 | int vector, PCIDevice *dev) | |
1048 | { | |
1049 | return 0; | |
1050 | } | |
1051 | ||
1052 | int kvm_arch_release_virq_post(int virq) | |
1053 | { | |
1054 | return 0; | |
1055 | } | |
1056 | ||
1850b6b7 EA |
1057 | int kvm_arch_msi_data_to_gsi(uint32_t data) |
1058 | { | |
1059 | abort(); | |
1060 | } |