]>
Commit | Line | Data |
---|---|---|
e2132e0b SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: MIPS specific KVM APIs | |
7 | * | |
8 | * Copyright (C) 2012-2014 Imagination Technologies Ltd. | |
9 | * Authors: Sanjay Lal <[email protected]> | |
10 | */ | |
11 | ||
c684822a | 12 | #include "qemu/osdep.h" |
e2132e0b | 13 | #include <sys/ioctl.h> |
e2132e0b SL |
14 | |
15 | #include <linux/kvm.h> | |
16 | ||
17 | #include "qemu-common.h" | |
33c11879 | 18 | #include "cpu.h" |
e2132e0b SL |
19 | #include "qemu/error-report.h" |
20 | #include "qemu/timer.h" | |
21 | #include "sysemu/sysemu.h" | |
22 | #include "sysemu/kvm.h" | |
e2132e0b SL |
23 | #include "sysemu/cpus.h" |
24 | #include "kvm_mips.h" | |
4c663752 | 25 | #include "exec/memattrs.h" |
e2132e0b SL |
26 | |
27 | #define DEBUG_KVM 0 | |
28 | ||
29 | #define DPRINTF(fmt, ...) \ | |
30 | do { if (DEBUG_KVM) { fprintf(stderr, fmt, ## __VA_ARGS__); } } while (0) | |
31 | ||
152db36a | 32 | static int kvm_mips_fpu_cap; |
bee62662 | 33 | static int kvm_mips_msa_cap; |
152db36a | 34 | |
e2132e0b SL |
35 | const KVMCapabilityInfo kvm_arch_required_capabilities[] = { |
36 | KVM_CAP_LAST_INFO | |
37 | }; | |
38 | ||
39 | static void kvm_mips_update_state(void *opaque, int running, RunState state); | |
40 | ||
41 | unsigned long kvm_arch_vcpu_id(CPUState *cs) | |
42 | { | |
43 | return cs->cpu_index; | |
44 | } | |
45 | ||
b16565b3 | 46 | int kvm_arch_init(MachineState *ms, KVMState *s) |
e2132e0b SL |
47 | { |
48 | /* MIPS has 128 signals */ | |
49 | kvm_set_sigmask_len(s, 16); | |
50 | ||
152db36a | 51 | kvm_mips_fpu_cap = kvm_check_extension(s, KVM_CAP_MIPS_FPU); |
bee62662 | 52 | kvm_mips_msa_cap = kvm_check_extension(s, KVM_CAP_MIPS_MSA); |
152db36a | 53 | |
e2132e0b SL |
54 | DPRINTF("%s\n", __func__); |
55 | return 0; | |
56 | } | |
57 | ||
d525ffab PB |
58 | int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) |
59 | { | |
60 | return 0; | |
61 | } | |
62 | ||
e2132e0b SL |
63 | int kvm_arch_init_vcpu(CPUState *cs) |
64 | { | |
152db36a JH |
65 | MIPSCPU *cpu = MIPS_CPU(cs); |
66 | CPUMIPSState *env = &cpu->env; | |
e2132e0b SL |
67 | int ret = 0; |
68 | ||
69 | qemu_add_vm_change_state_handler(kvm_mips_update_state, cs); | |
70 | ||
152db36a JH |
71 | if (kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { |
72 | ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_FPU, 0, 0); | |
73 | if (ret < 0) { | |
74 | /* mark unsupported so it gets disabled on reset */ | |
75 | kvm_mips_fpu_cap = 0; | |
76 | ret = 0; | |
77 | } | |
78 | } | |
79 | ||
bee62662 JH |
80 | if (kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { |
81 | ret = kvm_vcpu_enable_cap(cs, KVM_CAP_MIPS_MSA, 0, 0); | |
82 | if (ret < 0) { | |
83 | /* mark unsupported so it gets disabled on reset */ | |
84 | kvm_mips_msa_cap = 0; | |
85 | ret = 0; | |
86 | } | |
87 | } | |
88 | ||
e2132e0b SL |
89 | DPRINTF("%s\n", __func__); |
90 | return ret; | |
91 | } | |
92 | ||
93 | void kvm_mips_reset_vcpu(MIPSCPU *cpu) | |
94 | { | |
0e928b12 JH |
95 | CPUMIPSState *env = &cpu->env; |
96 | ||
152db36a JH |
97 | if (!kvm_mips_fpu_cap && env->CP0_Config1 & (1 << CP0C1_FP)) { |
98 | fprintf(stderr, "Warning: KVM does not support FPU, disabling\n"); | |
0e928b12 JH |
99 | env->CP0_Config1 &= ~(1 << CP0C1_FP); |
100 | } | |
bee62662 JH |
101 | if (!kvm_mips_msa_cap && env->CP0_Config3 & (1 << CP0C3_MSAP)) { |
102 | fprintf(stderr, "Warning: KVM does not support MSA, disabling\n"); | |
103 | env->CP0_Config3 &= ~(1 << CP0C3_MSAP); | |
104 | } | |
0e928b12 | 105 | |
e2132e0b SL |
106 | DPRINTF("%s\n", __func__); |
107 | } | |
108 | ||
109 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
110 | { | |
111 | DPRINTF("%s\n", __func__); | |
112 | return 0; | |
113 | } | |
114 | ||
115 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) | |
116 | { | |
117 | DPRINTF("%s\n", __func__); | |
118 | return 0; | |
119 | } | |
120 | ||
121 | static inline int cpu_mips_io_interrupts_pending(MIPSCPU *cpu) | |
122 | { | |
123 | CPUMIPSState *env = &cpu->env; | |
124 | ||
e2132e0b SL |
125 | return env->CP0_Cause & (0x1 << (2 + CP0Ca_IP)); |
126 | } | |
127 | ||
128 | ||
129 | void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) | |
130 | { | |
131 | MIPSCPU *cpu = MIPS_CPU(cs); | |
132 | int r; | |
133 | struct kvm_mips_interrupt intr; | |
134 | ||
4b8523ee JK |
135 | qemu_mutex_lock_iothread(); |
136 | ||
e2132e0b SL |
137 | if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && |
138 | cpu_mips_io_interrupts_pending(cpu)) { | |
139 | intr.cpu = -1; | |
140 | intr.irq = 2; | |
141 | r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
142 | if (r < 0) { | |
143 | error_report("%s: cpu %d: failed to inject IRQ %x", | |
144 | __func__, cs->cpu_index, intr.irq); | |
145 | } | |
146 | } | |
4b8523ee JK |
147 | |
148 | qemu_mutex_unlock_iothread(); | |
e2132e0b SL |
149 | } |
150 | ||
4c663752 | 151 | MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) |
e2132e0b | 152 | { |
4c663752 | 153 | return MEMTXATTRS_UNSPECIFIED; |
e2132e0b SL |
154 | } |
155 | ||
156 | int kvm_arch_process_async_events(CPUState *cs) | |
157 | { | |
158 | return cs->halted; | |
159 | } | |
160 | ||
161 | int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) | |
162 | { | |
163 | int ret; | |
164 | ||
165 | DPRINTF("%s\n", __func__); | |
166 | switch (run->exit_reason) { | |
167 | default: | |
168 | error_report("%s: unknown exit reason %d", | |
169 | __func__, run->exit_reason); | |
170 | ret = -1; | |
171 | break; | |
172 | } | |
173 | ||
174 | return ret; | |
175 | } | |
176 | ||
177 | bool kvm_arch_stop_on_emulation_error(CPUState *cs) | |
178 | { | |
179 | DPRINTF("%s\n", __func__); | |
180 | return true; | |
181 | } | |
182 | ||
e2132e0b SL |
183 | void kvm_arch_init_irq_routing(KVMState *s) |
184 | { | |
185 | } | |
186 | ||
187 | int kvm_mips_set_interrupt(MIPSCPU *cpu, int irq, int level) | |
188 | { | |
189 | CPUState *cs = CPU(cpu); | |
190 | struct kvm_mips_interrupt intr; | |
191 | ||
192 | if (!kvm_enabled()) { | |
193 | return 0; | |
194 | } | |
195 | ||
196 | intr.cpu = -1; | |
197 | ||
198 | if (level) { | |
199 | intr.irq = irq; | |
200 | } else { | |
201 | intr.irq = -irq; | |
202 | } | |
203 | ||
204 | kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
209 | int kvm_mips_set_ipi_interrupt(MIPSCPU *cpu, int irq, int level) | |
210 | { | |
211 | CPUState *cs = current_cpu; | |
212 | CPUState *dest_cs = CPU(cpu); | |
213 | struct kvm_mips_interrupt intr; | |
214 | ||
215 | if (!kvm_enabled()) { | |
216 | return 0; | |
217 | } | |
218 | ||
219 | intr.cpu = dest_cs->cpu_index; | |
220 | ||
221 | if (level) { | |
222 | intr.irq = irq; | |
223 | } else { | |
224 | intr.irq = -irq; | |
225 | } | |
226 | ||
227 | DPRINTF("%s: CPU %d, IRQ: %d\n", __func__, intr.cpu, intr.irq); | |
228 | ||
229 | kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); | |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
234 | #define MIPS_CP0_32(_R, _S) \ | |
5a2db896 | 235 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) |
e2132e0b SL |
236 | |
237 | #define MIPS_CP0_64(_R, _S) \ | |
5a2db896 | 238 | (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) |
e2132e0b SL |
239 | |
240 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) | |
241 | #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) | |
242 | #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) | |
243 | #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) | |
244 | #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) | |
245 | #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) | |
246 | #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) | |
247 | #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) | |
248 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) | |
249 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) | |
250 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) | |
251 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) | |
252 | #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0) | |
461a1582 | 253 | #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0) |
03cbfd7b JH |
254 | #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) |
255 | #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) | |
256 | #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) | |
257 | #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) | |
258 | #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4) | |
259 | #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5) | |
e2132e0b SL |
260 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) |
261 | ||
e2132e0b SL |
262 | static inline int kvm_mips_put_one_reg(CPUState *cs, uint64_t reg_id, |
263 | int32_t *addr) | |
264 | { | |
e2132e0b SL |
265 | struct kvm_one_reg cp0reg = { |
266 | .id = reg_id, | |
f8b3e48b | 267 | .addr = (uintptr_t)addr |
e2132e0b SL |
268 | }; |
269 | ||
270 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
271 | } | |
272 | ||
0759487b JH |
273 | static inline int kvm_mips_put_one_ureg(CPUState *cs, uint64_t reg_id, |
274 | uint32_t *addr) | |
275 | { | |
276 | struct kvm_one_reg cp0reg = { | |
277 | .id = reg_id, | |
278 | .addr = (uintptr_t)addr | |
279 | }; | |
280 | ||
281 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
282 | } | |
283 | ||
e2132e0b SL |
284 | static inline int kvm_mips_put_one_ulreg(CPUState *cs, uint64_t reg_id, |
285 | target_ulong *addr) | |
286 | { | |
287 | uint64_t val64 = *addr; | |
288 | struct kvm_one_reg cp0reg = { | |
289 | .id = reg_id, | |
290 | .addr = (uintptr_t)&val64 | |
291 | }; | |
292 | ||
293 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
294 | } | |
295 | ||
296 | static inline int kvm_mips_put_one_reg64(CPUState *cs, uint64_t reg_id, | |
d319f83f JH |
297 | int64_t *addr) |
298 | { | |
299 | struct kvm_one_reg cp0reg = { | |
300 | .id = reg_id, | |
301 | .addr = (uintptr_t)addr | |
302 | }; | |
303 | ||
304 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
305 | } | |
306 | ||
307 | static inline int kvm_mips_put_one_ureg64(CPUState *cs, uint64_t reg_id, | |
308 | uint64_t *addr) | |
e2132e0b SL |
309 | { |
310 | struct kvm_one_reg cp0reg = { | |
311 | .id = reg_id, | |
312 | .addr = (uintptr_t)addr | |
313 | }; | |
314 | ||
315 | return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &cp0reg); | |
316 | } | |
317 | ||
318 | static inline int kvm_mips_get_one_reg(CPUState *cs, uint64_t reg_id, | |
319 | int32_t *addr) | |
320 | { | |
e2132e0b SL |
321 | struct kvm_one_reg cp0reg = { |
322 | .id = reg_id, | |
f8b3e48b | 323 | .addr = (uintptr_t)addr |
e2132e0b SL |
324 | }; |
325 | ||
f8b3e48b | 326 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); |
e2132e0b SL |
327 | } |
328 | ||
0759487b JH |
329 | static inline int kvm_mips_get_one_ureg(CPUState *cs, uint64_t reg_id, |
330 | uint32_t *addr) | |
331 | { | |
332 | struct kvm_one_reg cp0reg = { | |
333 | .id = reg_id, | |
334 | .addr = (uintptr_t)addr | |
335 | }; | |
336 | ||
337 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
338 | } | |
339 | ||
182f42fd | 340 | static inline int kvm_mips_get_one_ulreg(CPUState *cs, uint64_t reg_id, |
e2132e0b SL |
341 | target_ulong *addr) |
342 | { | |
343 | int ret; | |
344 | uint64_t val64 = 0; | |
345 | struct kvm_one_reg cp0reg = { | |
346 | .id = reg_id, | |
347 | .addr = (uintptr_t)&val64 | |
348 | }; | |
349 | ||
350 | ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
351 | if (ret >= 0) { | |
352 | *addr = val64; | |
353 | } | |
354 | return ret; | |
355 | } | |
356 | ||
182f42fd | 357 | static inline int kvm_mips_get_one_reg64(CPUState *cs, uint64_t reg_id, |
d319f83f JH |
358 | int64_t *addr) |
359 | { | |
360 | struct kvm_one_reg cp0reg = { | |
361 | .id = reg_id, | |
362 | .addr = (uintptr_t)addr | |
363 | }; | |
364 | ||
365 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
366 | } | |
367 | ||
368 | static inline int kvm_mips_get_one_ureg64(CPUState *cs, uint64_t reg_id, | |
369 | uint64_t *addr) | |
e2132e0b SL |
370 | { |
371 | struct kvm_one_reg cp0reg = { | |
372 | .id = reg_id, | |
373 | .addr = (uintptr_t)addr | |
374 | }; | |
375 | ||
376 | return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &cp0reg); | |
377 | } | |
378 | ||
03cbfd7b | 379 | #define KVM_REG_MIPS_CP0_CONFIG_MASK (1U << CP0C0_M) |
152db36a JH |
380 | #define KVM_REG_MIPS_CP0_CONFIG1_MASK ((1U << CP0C1_M) | \ |
381 | (1U << CP0C1_FP)) | |
03cbfd7b | 382 | #define KVM_REG_MIPS_CP0_CONFIG2_MASK (1U << CP0C2_M) |
bee62662 JH |
383 | #define KVM_REG_MIPS_CP0_CONFIG3_MASK ((1U << CP0C3_M) | \ |
384 | (1U << CP0C3_MSAP)) | |
03cbfd7b | 385 | #define KVM_REG_MIPS_CP0_CONFIG4_MASK (1U << CP0C4_M) |
bee62662 JH |
386 | #define KVM_REG_MIPS_CP0_CONFIG5_MASK ((1U << CP0C5_MSAEn) | \ |
387 | (1U << CP0C5_UFE) | \ | |
152db36a JH |
388 | (1U << CP0C5_FRE) | \ |
389 | (1U << CP0C5_UFR)) | |
03cbfd7b JH |
390 | |
391 | static inline int kvm_mips_change_one_reg(CPUState *cs, uint64_t reg_id, | |
392 | int32_t *addr, int32_t mask) | |
393 | { | |
394 | int err; | |
395 | int32_t tmp, change; | |
396 | ||
397 | err = kvm_mips_get_one_reg(cs, reg_id, &tmp); | |
398 | if (err < 0) { | |
399 | return err; | |
400 | } | |
401 | ||
402 | /* only change bits in mask */ | |
403 | change = (*addr ^ tmp) & mask; | |
404 | if (!change) { | |
405 | return 0; | |
406 | } | |
407 | ||
408 | tmp = tmp ^ change; | |
409 | return kvm_mips_put_one_reg(cs, reg_id, &tmp); | |
410 | } | |
411 | ||
e2132e0b SL |
412 | /* |
413 | * We freeze the KVM timer when either the VM clock is stopped or the state is | |
414 | * saved (the state is dirty). | |
415 | */ | |
416 | ||
417 | /* | |
418 | * Save the state of the KVM timer when VM clock is stopped or state is synced | |
419 | * to QEMU. | |
420 | */ | |
421 | static int kvm_mips_save_count(CPUState *cs) | |
422 | { | |
423 | MIPSCPU *cpu = MIPS_CPU(cs); | |
424 | CPUMIPSState *env = &cpu->env; | |
425 | uint64_t count_ctl; | |
426 | int err, ret = 0; | |
427 | ||
428 | /* freeze KVM timer */ | |
d319f83f | 429 | err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
430 | if (err < 0) { |
431 | DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err); | |
432 | ret = err; | |
433 | } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { | |
434 | count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; | |
d319f83f | 435 | err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
436 | if (err < 0) { |
437 | DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); | |
438 | ret = err; | |
439 | } | |
440 | } | |
441 | ||
442 | /* read CP0_Cause */ | |
443 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); | |
444 | if (err < 0) { | |
445 | DPRINTF("%s: Failed to get CP0_CAUSE (%d)\n", __func__, err); | |
446 | ret = err; | |
447 | } | |
448 | ||
449 | /* read CP0_Count */ | |
450 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); | |
451 | if (err < 0) { | |
452 | DPRINTF("%s: Failed to get CP0_COUNT (%d)\n", __func__, err); | |
453 | ret = err; | |
454 | } | |
455 | ||
456 | return ret; | |
457 | } | |
458 | ||
459 | /* | |
460 | * Restore the state of the KVM timer when VM clock is restarted or state is | |
461 | * synced to KVM. | |
462 | */ | |
463 | static int kvm_mips_restore_count(CPUState *cs) | |
464 | { | |
465 | MIPSCPU *cpu = MIPS_CPU(cs); | |
466 | CPUMIPSState *env = &cpu->env; | |
467 | uint64_t count_ctl; | |
468 | int err_dc, err, ret = 0; | |
469 | ||
470 | /* check the timer is frozen */ | |
d319f83f | 471 | err_dc = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
472 | if (err_dc < 0) { |
473 | DPRINTF("%s: Failed to get COUNT_CTL (%d)\n", __func__, err_dc); | |
474 | ret = err_dc; | |
475 | } else if (!(count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) { | |
476 | /* freeze timer (sets COUNT_RESUME for us) */ | |
477 | count_ctl |= KVM_REG_MIPS_COUNT_CTL_DC; | |
d319f83f | 478 | err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
479 | if (err < 0) { |
480 | DPRINTF("%s: Failed to set COUNT_CTL.DC=1 (%d)\n", __func__, err); | |
481 | ret = err; | |
482 | } | |
483 | } | |
484 | ||
485 | /* load CP0_Cause */ | |
486 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_CAUSE, &env->CP0_Cause); | |
487 | if (err < 0) { | |
488 | DPRINTF("%s: Failed to put CP0_CAUSE (%d)\n", __func__, err); | |
489 | ret = err; | |
490 | } | |
491 | ||
492 | /* load CP0_Count */ | |
493 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COUNT, &env->CP0_Count); | |
494 | if (err < 0) { | |
495 | DPRINTF("%s: Failed to put CP0_COUNT (%d)\n", __func__, err); | |
496 | ret = err; | |
497 | } | |
498 | ||
499 | /* resume KVM timer */ | |
500 | if (err_dc >= 0) { | |
501 | count_ctl &= ~KVM_REG_MIPS_COUNT_CTL_DC; | |
d319f83f | 502 | err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_CTL, &count_ctl); |
e2132e0b SL |
503 | if (err < 0) { |
504 | DPRINTF("%s: Failed to set COUNT_CTL.DC=0 (%d)\n", __func__, err); | |
505 | ret = err; | |
506 | } | |
507 | } | |
508 | ||
509 | return ret; | |
510 | } | |
511 | ||
512 | /* | |
513 | * Handle the VM clock being started or stopped | |
514 | */ | |
515 | static void kvm_mips_update_state(void *opaque, int running, RunState state) | |
516 | { | |
517 | CPUState *cs = opaque; | |
518 | int ret; | |
519 | uint64_t count_resume; | |
520 | ||
521 | /* | |
522 | * If state is already dirty (synced to QEMU) then the KVM timer state is | |
523 | * already saved and can be restored when it is synced back to KVM. | |
524 | */ | |
525 | if (!running) { | |
99f31832 | 526 | if (!cs->vcpu_dirty) { |
e2132e0b SL |
527 | ret = kvm_mips_save_count(cs); |
528 | if (ret < 0) { | |
529 | fprintf(stderr, "Failed saving count\n"); | |
530 | } | |
531 | } | |
532 | } else { | |
533 | /* Set clock restore time to now */ | |
906b53a2 | 534 | count_resume = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
d319f83f JH |
535 | ret = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_COUNT_RESUME, |
536 | &count_resume); | |
e2132e0b SL |
537 | if (ret < 0) { |
538 | fprintf(stderr, "Failed setting COUNT_RESUME\n"); | |
539 | return; | |
540 | } | |
541 | ||
99f31832 | 542 | if (!cs->vcpu_dirty) { |
e2132e0b SL |
543 | ret = kvm_mips_restore_count(cs); |
544 | if (ret < 0) { | |
545 | fprintf(stderr, "Failed restoring count\n"); | |
546 | } | |
547 | } | |
548 | } | |
549 | } | |
550 | ||
152db36a JH |
551 | static int kvm_mips_put_fpu_registers(CPUState *cs, int level) |
552 | { | |
553 | MIPSCPU *cpu = MIPS_CPU(cs); | |
554 | CPUMIPSState *env = &cpu->env; | |
555 | int err, ret = 0; | |
556 | unsigned int i; | |
557 | ||
558 | /* Only put FPU state if we're emulating a CPU with an FPU */ | |
559 | if (env->CP0_Config1 & (1 << CP0C1_FP)) { | |
560 | /* FPU Control Registers */ | |
561 | if (level == KVM_PUT_FULL_STATE) { | |
562 | err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_IR, | |
563 | &env->active_fpu.fcr0); | |
564 | if (err < 0) { | |
565 | DPRINTF("%s: Failed to put FCR_IR (%d)\n", __func__, err); | |
566 | ret = err; | |
567 | } | |
568 | } | |
569 | err = kvm_mips_put_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, | |
570 | &env->active_fpu.fcr31); | |
571 | if (err < 0) { | |
572 | DPRINTF("%s: Failed to put FCR_CSR (%d)\n", __func__, err); | |
573 | ret = err; | |
574 | } | |
575 | ||
bee62662 JH |
576 | /* |
577 | * FPU register state is a subset of MSA vector state, so don't put FPU | |
578 | * registers if we're emulating a CPU with MSA. | |
579 | */ | |
580 | if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { | |
581 | /* Floating point registers */ | |
582 | for (i = 0; i < 32; ++i) { | |
583 | if (env->CP0_Status & (1 << CP0St_FR)) { | |
584 | err = kvm_mips_put_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), | |
585 | &env->active_fpu.fpr[i].d); | |
586 | } else { | |
587 | err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), | |
588 | &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); | |
589 | } | |
590 | if (err < 0) { | |
591 | DPRINTF("%s: Failed to put FPR%u (%d)\n", __func__, i, err); | |
592 | ret = err; | |
593 | } | |
152db36a | 594 | } |
bee62662 JH |
595 | } |
596 | } | |
597 | ||
598 | /* Only put MSA state if we're emulating a CPU with MSA */ | |
599 | if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { | |
600 | /* MSA Control Registers */ | |
601 | if (level == KVM_PUT_FULL_STATE) { | |
602 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_IR, | |
603 | &env->msair); | |
604 | if (err < 0) { | |
605 | DPRINTF("%s: Failed to put MSA_IR (%d)\n", __func__, err); | |
606 | ret = err; | |
607 | } | |
608 | } | |
609 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_MSA_CSR, | |
610 | &env->active_tc.msacsr); | |
611 | if (err < 0) { | |
612 | DPRINTF("%s: Failed to put MSA_CSR (%d)\n", __func__, err); | |
613 | ret = err; | |
614 | } | |
615 | ||
616 | /* Vector registers (includes FP registers) */ | |
617 | for (i = 0; i < 32; ++i) { | |
618 | /* Big endian MSA not supported by QEMU yet anyway */ | |
619 | err = kvm_mips_put_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), | |
620 | env->active_fpu.fpr[i].wr.d); | |
152db36a | 621 | if (err < 0) { |
bee62662 | 622 | DPRINTF("%s: Failed to put VEC%u (%d)\n", __func__, i, err); |
152db36a JH |
623 | ret = err; |
624 | } | |
625 | } | |
626 | } | |
627 | ||
628 | return ret; | |
629 | } | |
630 | ||
631 | static int kvm_mips_get_fpu_registers(CPUState *cs) | |
632 | { | |
633 | MIPSCPU *cpu = MIPS_CPU(cs); | |
634 | CPUMIPSState *env = &cpu->env; | |
635 | int err, ret = 0; | |
636 | unsigned int i; | |
637 | ||
638 | /* Only get FPU state if we're emulating a CPU with an FPU */ | |
639 | if (env->CP0_Config1 & (1 << CP0C1_FP)) { | |
640 | /* FPU Control Registers */ | |
641 | err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_IR, | |
642 | &env->active_fpu.fcr0); | |
643 | if (err < 0) { | |
644 | DPRINTF("%s: Failed to get FCR_IR (%d)\n", __func__, err); | |
645 | ret = err; | |
646 | } | |
647 | err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FCR_CSR, | |
648 | &env->active_fpu.fcr31); | |
649 | if (err < 0) { | |
650 | DPRINTF("%s: Failed to get FCR_CSR (%d)\n", __func__, err); | |
651 | ret = err; | |
652 | } else { | |
653 | restore_fp_status(env); | |
654 | } | |
655 | ||
bee62662 JH |
656 | /* |
657 | * FPU register state is a subset of MSA vector state, so don't save FPU | |
658 | * registers if we're emulating a CPU with MSA. | |
659 | */ | |
660 | if (!(env->CP0_Config3 & (1 << CP0C3_MSAP))) { | |
661 | /* Floating point registers */ | |
662 | for (i = 0; i < 32; ++i) { | |
663 | if (env->CP0_Status & (1 << CP0St_FR)) { | |
664 | err = kvm_mips_get_one_ureg64(cs, KVM_REG_MIPS_FPR_64(i), | |
665 | &env->active_fpu.fpr[i].d); | |
666 | } else { | |
667 | err = kvm_mips_get_one_ureg(cs, KVM_REG_MIPS_FPR_32(i), | |
668 | &env->active_fpu.fpr[i].w[FP_ENDIAN_IDX]); | |
669 | } | |
670 | if (err < 0) { | |
671 | DPRINTF("%s: Failed to get FPR%u (%d)\n", __func__, i, err); | |
672 | ret = err; | |
673 | } | |
152db36a | 674 | } |
bee62662 JH |
675 | } |
676 | } | |
677 | ||
678 | /* Only get MSA state if we're emulating a CPU with MSA */ | |
679 | if (env->CP0_Config3 & (1 << CP0C3_MSAP)) { | |
680 | /* MSA Control Registers */ | |
681 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_IR, | |
682 | &env->msair); | |
683 | if (err < 0) { | |
684 | DPRINTF("%s: Failed to get MSA_IR (%d)\n", __func__, err); | |
685 | ret = err; | |
686 | } | |
687 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_MSA_CSR, | |
688 | &env->active_tc.msacsr); | |
689 | if (err < 0) { | |
690 | DPRINTF("%s: Failed to get MSA_CSR (%d)\n", __func__, err); | |
691 | ret = err; | |
692 | } else { | |
693 | restore_msa_fp_status(env); | |
694 | } | |
695 | ||
696 | /* Vector registers (includes FP registers) */ | |
697 | for (i = 0; i < 32; ++i) { | |
698 | /* Big endian MSA not supported by QEMU yet anyway */ | |
699 | err = kvm_mips_get_one_reg64(cs, KVM_REG_MIPS_VEC_128(i), | |
700 | env->active_fpu.fpr[i].wr.d); | |
152db36a | 701 | if (err < 0) { |
bee62662 | 702 | DPRINTF("%s: Failed to get VEC%u (%d)\n", __func__, i, err); |
152db36a JH |
703 | ret = err; |
704 | } | |
705 | } | |
706 | } | |
707 | ||
708 | return ret; | |
709 | } | |
710 | ||
711 | ||
e2132e0b SL |
712 | static int kvm_mips_put_cp0_registers(CPUState *cs, int level) |
713 | { | |
714 | MIPSCPU *cpu = MIPS_CPU(cs); | |
715 | CPUMIPSState *env = &cpu->env; | |
716 | int err, ret = 0; | |
717 | ||
718 | (void)level; | |
719 | ||
720 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); | |
721 | if (err < 0) { | |
722 | DPRINTF("%s: Failed to put CP0_INDEX (%d)\n", __func__, err); | |
723 | ret = err; | |
724 | } | |
725 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, | |
726 | &env->CP0_Context); | |
727 | if (err < 0) { | |
728 | DPRINTF("%s: Failed to put CP0_CONTEXT (%d)\n", __func__, err); | |
729 | ret = err; | |
730 | } | |
731 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, | |
732 | &env->active_tc.CP0_UserLocal); | |
733 | if (err < 0) { | |
734 | DPRINTF("%s: Failed to put CP0_USERLOCAL (%d)\n", __func__, err); | |
735 | ret = err; | |
736 | } | |
737 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, | |
738 | &env->CP0_PageMask); | |
739 | if (err < 0) { | |
740 | DPRINTF("%s: Failed to put CP0_PAGEMASK (%d)\n", __func__, err); | |
741 | ret = err; | |
742 | } | |
743 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); | |
744 | if (err < 0) { | |
745 | DPRINTF("%s: Failed to put CP0_WIRED (%d)\n", __func__, err); | |
746 | ret = err; | |
747 | } | |
748 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); | |
749 | if (err < 0) { | |
750 | DPRINTF("%s: Failed to put CP0_HWRENA (%d)\n", __func__, err); | |
751 | ret = err; | |
752 | } | |
753 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, | |
754 | &env->CP0_BadVAddr); | |
755 | if (err < 0) { | |
756 | DPRINTF("%s: Failed to put CP0_BADVADDR (%d)\n", __func__, err); | |
757 | ret = err; | |
758 | } | |
759 | ||
760 | /* If VM clock stopped then state will be restored when it is restarted */ | |
761 | if (runstate_is_running()) { | |
762 | err = kvm_mips_restore_count(cs); | |
763 | if (err < 0) { | |
764 | ret = err; | |
765 | } | |
766 | } | |
767 | ||
768 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, | |
769 | &env->CP0_EntryHi); | |
770 | if (err < 0) { | |
771 | DPRINTF("%s: Failed to put CP0_ENTRYHI (%d)\n", __func__, err); | |
772 | ret = err; | |
773 | } | |
774 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, | |
775 | &env->CP0_Compare); | |
776 | if (err < 0) { | |
777 | DPRINTF("%s: Failed to put CP0_COMPARE (%d)\n", __func__, err); | |
778 | ret = err; | |
779 | } | |
780 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); | |
781 | if (err < 0) { | |
782 | DPRINTF("%s: Failed to put CP0_STATUS (%d)\n", __func__, err); | |
783 | ret = err; | |
784 | } | |
785 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); | |
786 | if (err < 0) { | |
787 | DPRINTF("%s: Failed to put CP0_EPC (%d)\n", __func__, err); | |
788 | ret = err; | |
789 | } | |
461a1582 JH |
790 | err = kvm_mips_put_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); |
791 | if (err < 0) { | |
792 | DPRINTF("%s: Failed to put CP0_PRID (%d)\n", __func__, err); | |
793 | ret = err; | |
794 | } | |
03cbfd7b JH |
795 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, |
796 | &env->CP0_Config0, | |
797 | KVM_REG_MIPS_CP0_CONFIG_MASK); | |
798 | if (err < 0) { | |
799 | DPRINTF("%s: Failed to change CP0_CONFIG (%d)\n", __func__, err); | |
800 | ret = err; | |
801 | } | |
802 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, | |
803 | &env->CP0_Config1, | |
804 | KVM_REG_MIPS_CP0_CONFIG1_MASK); | |
805 | if (err < 0) { | |
806 | DPRINTF("%s: Failed to change CP0_CONFIG1 (%d)\n", __func__, err); | |
807 | ret = err; | |
808 | } | |
809 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, | |
810 | &env->CP0_Config2, | |
811 | KVM_REG_MIPS_CP0_CONFIG2_MASK); | |
812 | if (err < 0) { | |
813 | DPRINTF("%s: Failed to change CP0_CONFIG2 (%d)\n", __func__, err); | |
814 | ret = err; | |
815 | } | |
816 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, | |
817 | &env->CP0_Config3, | |
818 | KVM_REG_MIPS_CP0_CONFIG3_MASK); | |
819 | if (err < 0) { | |
820 | DPRINTF("%s: Failed to change CP0_CONFIG3 (%d)\n", __func__, err); | |
821 | ret = err; | |
822 | } | |
823 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, | |
824 | &env->CP0_Config4, | |
825 | KVM_REG_MIPS_CP0_CONFIG4_MASK); | |
826 | if (err < 0) { | |
827 | DPRINTF("%s: Failed to change CP0_CONFIG4 (%d)\n", __func__, err); | |
828 | ret = err; | |
829 | } | |
830 | err = kvm_mips_change_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, | |
831 | &env->CP0_Config5, | |
832 | KVM_REG_MIPS_CP0_CONFIG5_MASK); | |
833 | if (err < 0) { | |
834 | DPRINTF("%s: Failed to change CP0_CONFIG5 (%d)\n", __func__, err); | |
835 | ret = err; | |
836 | } | |
e2132e0b SL |
837 | err = kvm_mips_put_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, |
838 | &env->CP0_ErrorEPC); | |
839 | if (err < 0) { | |
840 | DPRINTF("%s: Failed to put CP0_ERROREPC (%d)\n", __func__, err); | |
841 | ret = err; | |
842 | } | |
843 | ||
844 | return ret; | |
845 | } | |
846 | ||
847 | static int kvm_mips_get_cp0_registers(CPUState *cs) | |
848 | { | |
849 | MIPSCPU *cpu = MIPS_CPU(cs); | |
850 | CPUMIPSState *env = &cpu->env; | |
851 | int err, ret = 0; | |
852 | ||
853 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_INDEX, &env->CP0_Index); | |
854 | if (err < 0) { | |
855 | DPRINTF("%s: Failed to get CP0_INDEX (%d)\n", __func__, err); | |
856 | ret = err; | |
857 | } | |
858 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_CONTEXT, | |
859 | &env->CP0_Context); | |
860 | if (err < 0) { | |
861 | DPRINTF("%s: Failed to get CP0_CONTEXT (%d)\n", __func__, err); | |
862 | ret = err; | |
863 | } | |
864 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_USERLOCAL, | |
865 | &env->active_tc.CP0_UserLocal); | |
866 | if (err < 0) { | |
867 | DPRINTF("%s: Failed to get CP0_USERLOCAL (%d)\n", __func__, err); | |
868 | ret = err; | |
869 | } | |
870 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PAGEMASK, | |
871 | &env->CP0_PageMask); | |
872 | if (err < 0) { | |
873 | DPRINTF("%s: Failed to get CP0_PAGEMASK (%d)\n", __func__, err); | |
874 | ret = err; | |
875 | } | |
876 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_WIRED, &env->CP0_Wired); | |
877 | if (err < 0) { | |
878 | DPRINTF("%s: Failed to get CP0_WIRED (%d)\n", __func__, err); | |
879 | ret = err; | |
880 | } | |
881 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_HWRENA, &env->CP0_HWREna); | |
882 | if (err < 0) { | |
883 | DPRINTF("%s: Failed to get CP0_HWRENA (%d)\n", __func__, err); | |
884 | ret = err; | |
885 | } | |
886 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_BADVADDR, | |
887 | &env->CP0_BadVAddr); | |
888 | if (err < 0) { | |
889 | DPRINTF("%s: Failed to get CP0_BADVADDR (%d)\n", __func__, err); | |
890 | ret = err; | |
891 | } | |
892 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ENTRYHI, | |
893 | &env->CP0_EntryHi); | |
894 | if (err < 0) { | |
895 | DPRINTF("%s: Failed to get CP0_ENTRYHI (%d)\n", __func__, err); | |
896 | ret = err; | |
897 | } | |
898 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_COMPARE, | |
899 | &env->CP0_Compare); | |
900 | if (err < 0) { | |
901 | DPRINTF("%s: Failed to get CP0_COMPARE (%d)\n", __func__, err); | |
902 | ret = err; | |
903 | } | |
904 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_STATUS, &env->CP0_Status); | |
905 | if (err < 0) { | |
906 | DPRINTF("%s: Failed to get CP0_STATUS (%d)\n", __func__, err); | |
907 | ret = err; | |
908 | } | |
909 | ||
910 | /* If VM clock stopped then state was already saved when it was stopped */ | |
911 | if (runstate_is_running()) { | |
912 | err = kvm_mips_save_count(cs); | |
913 | if (err < 0) { | |
914 | ret = err; | |
915 | } | |
916 | } | |
917 | ||
918 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_EPC, &env->CP0_EPC); | |
919 | if (err < 0) { | |
920 | DPRINTF("%s: Failed to get CP0_EPC (%d)\n", __func__, err); | |
921 | ret = err; | |
922 | } | |
461a1582 JH |
923 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_PRID, &env->CP0_PRid); |
924 | if (err < 0) { | |
925 | DPRINTF("%s: Failed to get CP0_PRID (%d)\n", __func__, err); | |
926 | ret = err; | |
927 | } | |
03cbfd7b JH |
928 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG, &env->CP0_Config0); |
929 | if (err < 0) { | |
930 | DPRINTF("%s: Failed to get CP0_CONFIG (%d)\n", __func__, err); | |
931 | ret = err; | |
932 | } | |
933 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG1, &env->CP0_Config1); | |
934 | if (err < 0) { | |
935 | DPRINTF("%s: Failed to get CP0_CONFIG1 (%d)\n", __func__, err); | |
936 | ret = err; | |
937 | } | |
938 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG2, &env->CP0_Config2); | |
939 | if (err < 0) { | |
940 | DPRINTF("%s: Failed to get CP0_CONFIG2 (%d)\n", __func__, err); | |
941 | ret = err; | |
942 | } | |
943 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG3, &env->CP0_Config3); | |
944 | if (err < 0) { | |
945 | DPRINTF("%s: Failed to get CP0_CONFIG3 (%d)\n", __func__, err); | |
946 | ret = err; | |
947 | } | |
948 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG4, &env->CP0_Config4); | |
949 | if (err < 0) { | |
950 | DPRINTF("%s: Failed to get CP0_CONFIG4 (%d)\n", __func__, err); | |
951 | ret = err; | |
952 | } | |
953 | err = kvm_mips_get_one_reg(cs, KVM_REG_MIPS_CP0_CONFIG5, &env->CP0_Config5); | |
954 | if (err < 0) { | |
955 | DPRINTF("%s: Failed to get CP0_CONFIG5 (%d)\n", __func__, err); | |
956 | ret = err; | |
957 | } | |
e2132e0b SL |
958 | err = kvm_mips_get_one_ulreg(cs, KVM_REG_MIPS_CP0_ERROREPC, |
959 | &env->CP0_ErrorEPC); | |
960 | if (err < 0) { | |
961 | DPRINTF("%s: Failed to get CP0_ERROREPC (%d)\n", __func__, err); | |
962 | ret = err; | |
963 | } | |
964 | ||
965 | return ret; | |
966 | } | |
967 | ||
968 | int kvm_arch_put_registers(CPUState *cs, int level) | |
969 | { | |
970 | MIPSCPU *cpu = MIPS_CPU(cs); | |
971 | CPUMIPSState *env = &cpu->env; | |
972 | struct kvm_regs regs; | |
973 | int ret; | |
974 | int i; | |
975 | ||
976 | /* Set the registers based on QEMU's view of things */ | |
977 | for (i = 0; i < 32; i++) { | |
02dae26a | 978 | regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i]; |
e2132e0b SL |
979 | } |
980 | ||
02dae26a JH |
981 | regs.hi = (int64_t)(target_long)env->active_tc.HI[0]; |
982 | regs.lo = (int64_t)(target_long)env->active_tc.LO[0]; | |
983 | regs.pc = (int64_t)(target_long)env->active_tc.PC; | |
e2132e0b SL |
984 | |
985 | ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); | |
986 | ||
987 | if (ret < 0) { | |
988 | return ret; | |
989 | } | |
990 | ||
991 | ret = kvm_mips_put_cp0_registers(cs, level); | |
992 | if (ret < 0) { | |
993 | return ret; | |
994 | } | |
995 | ||
152db36a JH |
996 | ret = kvm_mips_put_fpu_registers(cs, level); |
997 | if (ret < 0) { | |
998 | return ret; | |
999 | } | |
1000 | ||
e2132e0b SL |
1001 | return ret; |
1002 | } | |
1003 | ||
1004 | int kvm_arch_get_registers(CPUState *cs) | |
1005 | { | |
1006 | MIPSCPU *cpu = MIPS_CPU(cs); | |
1007 | CPUMIPSState *env = &cpu->env; | |
1008 | int ret = 0; | |
1009 | struct kvm_regs regs; | |
1010 | int i; | |
1011 | ||
1012 | /* Get the current register set as KVM seems it */ | |
1013 | ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); | |
1014 | ||
1015 | if (ret < 0) { | |
1016 | return ret; | |
1017 | } | |
1018 | ||
1019 | for (i = 0; i < 32; i++) { | |
1020 | env->active_tc.gpr[i] = regs.gpr[i]; | |
1021 | } | |
1022 | ||
1023 | env->active_tc.HI[0] = regs.hi; | |
1024 | env->active_tc.LO[0] = regs.lo; | |
1025 | env->active_tc.PC = regs.pc; | |
1026 | ||
1027 | kvm_mips_get_cp0_registers(cs); | |
152db36a | 1028 | kvm_mips_get_fpu_registers(cs); |
e2132e0b SL |
1029 | |
1030 | return ret; | |
1031 | } | |
9e03a040 FB |
1032 | |
1033 | int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, | |
dc9f06ca | 1034 | uint64_t address, uint32_t data, PCIDevice *dev) |
9e03a040 FB |
1035 | { |
1036 | return 0; | |
1037 | } | |
1850b6b7 | 1038 | |
38d87493 PX |
1039 | int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, |
1040 | int vector, PCIDevice *dev) | |
1041 | { | |
1042 | return 0; | |
1043 | } | |
1044 | ||
1045 | int kvm_arch_release_virq_post(int virq) | |
1046 | { | |
1047 | return 0; | |
1048 | } | |
1049 | ||
1850b6b7 EA |
1050 | int kvm_arch_msi_data_to_gsi(uint32_t data) |
1051 | { | |
1052 | abort(); | |
1053 | } |