]>
Commit | Line | Data |
---|---|---|
0d75590d | 1 | #include "qemu/osdep.h" |
da34e65c | 2 | #include "qapi/error.h" |
b3946626 | 3 | #include "sysemu/hw_accel.h" |
9c17d615 | 4 | #include "sysemu/sysemu.h" |
03dd024f | 5 | #include "qemu/log.h" |
0b0b8310 | 6 | #include "qemu/error-report.h" |
9fdf0c29 | 7 | #include "cpu.h" |
63c91552 | 8 | #include "exec/exec-all.h" |
ed120055 | 9 | #include "helper_regs.h" |
0d09e41a | 10 | #include "hw/ppc/spapr.h" |
d5aea6f3 | 11 | #include "mmu-hash64.h" |
3794d548 AK |
12 | #include "cpu-models.h" |
13 | #include "trace.h" | |
14 | #include "kvm_ppc.h" | |
facdb8b6 | 15 | #include "hw/ppc/spapr_ovec.h" |
b4db5413 | 16 | #include "mmu-book3s-v3.h" |
f43e3525 | 17 | |
a46622fd | 18 | struct SPRSyncState { |
a46622fd AK |
19 | int spr; |
20 | target_ulong value; | |
21 | target_ulong mask; | |
22 | }; | |
23 | ||
14e6fe12 | 24 | static void do_spr_sync(CPUState *cs, run_on_cpu_data arg) |
a46622fd | 25 | { |
14e6fe12 | 26 | struct SPRSyncState *s = arg.host_ptr; |
e0eeb4a2 | 27 | PowerPCCPU *cpu = POWERPC_CPU(cs); |
a46622fd AK |
28 | CPUPPCState *env = &cpu->env; |
29 | ||
e0eeb4a2 | 30 | cpu_synchronize_state(cs); |
a46622fd AK |
31 | env->spr[s->spr] &= ~s->mask; |
32 | env->spr[s->spr] |= s->value; | |
33 | } | |
34 | ||
35 | static void set_spr(CPUState *cs, int spr, target_ulong value, | |
36 | target_ulong mask) | |
37 | { | |
38 | struct SPRSyncState s = { | |
a46622fd AK |
39 | .spr = spr, |
40 | .value = value, | |
41 | .mask = mask | |
42 | }; | |
14e6fe12 | 43 | run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s)); |
a46622fd AK |
44 | } |
45 | ||
af08a58f TH |
46 | static bool has_spr(PowerPCCPU *cpu, int spr) |
47 | { | |
48 | /* We can test whether the SPR is defined by checking for a valid name */ | |
49 | return cpu->env.spr_cb[spr].name != NULL; | |
50 | } | |
51 | ||
c6404ade | 52 | static inline bool valid_ptex(PowerPCCPU *cpu, target_ulong ptex) |
f3c75d42 AK |
53 | { |
54 | /* | |
36778660 | 55 | * hash value/pteg group index is normalized by HPT mask |
f3c75d42 | 56 | */ |
36778660 | 57 | if (((ptex & ~7ULL) / HPTES_PER_GROUP) & ~ppc_hash64_hpt_mask(cpu)) { |
f3c75d42 AK |
58 | return false; |
59 | } | |
60 | return true; | |
61 | } | |
62 | ||
ecbc25fa DG |
63 | static bool is_ram_address(sPAPRMachineState *spapr, hwaddr addr) |
64 | { | |
65 | MachineState *machine = MACHINE(spapr); | |
66 | MemoryHotplugState *hpms = &spapr->hotplug_memory; | |
67 | ||
68 | if (addr < machine->ram_size) { | |
69 | return true; | |
70 | } | |
71 | if ((addr >= hpms->base) | |
72 | && ((addr - hpms->base) < memory_region_size(&hpms->mr))) { | |
73 | return true; | |
74 | } | |
75 | ||
76 | return false; | |
77 | } | |
78 | ||
28e02042 | 79 | static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
f43e3525 DG |
80 | target_ulong opcode, target_ulong *args) |
81 | { | |
82 | target_ulong flags = args[0]; | |
c6404ade | 83 | target_ulong ptex = args[1]; |
f43e3525 DG |
84 | target_ulong pteh = args[2]; |
85 | target_ulong ptel = args[3]; | |
1f0252e6 | 86 | unsigned apshift; |
f73a2575 | 87 | target_ulong raddr; |
c6404ade | 88 | target_ulong slot; |
7222b94a | 89 | const ppc_hash_pte64_t *hptes; |
f43e3525 | 90 | |
1f0252e6 | 91 | apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel); |
1114e712 DG |
92 | if (!apshift) { |
93 | /* Bad page size encoding */ | |
94 | return H_PARAMETER; | |
f43e3525 DG |
95 | } |
96 | ||
1114e712 | 97 | raddr = (ptel & HPTE64_R_RPN) & ~((1ULL << apshift) - 1); |
f43e3525 | 98 | |
ecbc25fa | 99 | if (is_ram_address(spapr, raddr)) { |
f73a2575 | 100 | /* Regular RAM - should have WIMG=0010 */ |
d5aea6f3 | 101 | if ((ptel & HPTE64_R_WIMG) != HPTE64_R_M) { |
f73a2575 DG |
102 | return H_PARAMETER; |
103 | } | |
104 | } else { | |
c1175907 | 105 | target_ulong wimg_flags; |
f73a2575 DG |
106 | /* Looks like an IO address */ |
107 | /* FIXME: What WIMG combinations could be sensible for IO? | |
108 | * For now we allow WIMG=010x, but are there others? */ | |
109 | /* FIXME: Should we check against registered IO addresses? */ | |
c1175907 AK |
110 | wimg_flags = (ptel & (HPTE64_R_W | HPTE64_R_I | HPTE64_R_M)); |
111 | ||
112 | if (wimg_flags != HPTE64_R_I && | |
113 | wimg_flags != (HPTE64_R_I | HPTE64_R_M)) { | |
f73a2575 DG |
114 | return H_PARAMETER; |
115 | } | |
f43e3525 | 116 | } |
f73a2575 | 117 | |
f43e3525 DG |
118 | pteh &= ~0x60ULL; |
119 | ||
c6404ade | 120 | if (!valid_ptex(cpu, ptex)) { |
f43e3525 DG |
121 | return H_PARAMETER; |
122 | } | |
7c43bca0 | 123 | |
c6404ade DG |
124 | slot = ptex & 7ULL; |
125 | ptex = ptex & ~7ULL; | |
126 | ||
f43e3525 | 127 | if (likely((flags & H_EXACT) == 0)) { |
7222b94a | 128 | hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); |
c6404ade | 129 | for (slot = 0; slot < 8; slot++) { |
7222b94a | 130 | if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) { |
f43e3525 DG |
131 | break; |
132 | } | |
7aaf4957 | 133 | } |
7222b94a | 134 | ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP); |
c6404ade | 135 | if (slot == 8) { |
7aaf4957 AK |
136 | return H_PTEG_FULL; |
137 | } | |
f43e3525 | 138 | } else { |
7222b94a DG |
139 | hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1); |
140 | if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) { | |
141 | ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1); | |
f43e3525 DG |
142 | return H_PTEG_FULL; |
143 | } | |
7222b94a | 144 | ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); |
f43e3525 | 145 | } |
7c43bca0 | 146 | |
c6404ade | 147 | ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel); |
f43e3525 | 148 | |
c6404ade | 149 | args[0] = ptex + slot; |
f43e3525 DG |
150 | return H_SUCCESS; |
151 | } | |
152 | ||
a3801402 | 153 | typedef enum { |
a3d0abae DG |
154 | REMOVE_SUCCESS = 0, |
155 | REMOVE_NOT_FOUND = 1, | |
156 | REMOVE_PARM = 2, | |
157 | REMOVE_HW = 3, | |
a3801402 | 158 | } RemoveResult; |
a3d0abae | 159 | |
7ef23068 | 160 | static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, |
a3d0abae DG |
161 | target_ulong avpn, |
162 | target_ulong flags, | |
163 | target_ulong *vp, target_ulong *rp) | |
f43e3525 | 164 | { |
7222b94a | 165 | const ppc_hash_pte64_t *hptes; |
61a36c9b | 166 | target_ulong v, r; |
f43e3525 | 167 | |
c6404ade | 168 | if (!valid_ptex(cpu, ptex)) { |
a3d0abae | 169 | return REMOVE_PARM; |
f43e3525 DG |
170 | } |
171 | ||
7222b94a DG |
172 | hptes = ppc_hash64_map_hptes(cpu, ptex, 1); |
173 | v = ppc_hash64_hpte0(cpu, hptes, 0); | |
174 | r = ppc_hash64_hpte1(cpu, hptes, 0); | |
175 | ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); | |
f43e3525 | 176 | |
d5aea6f3 | 177 | if ((v & HPTE64_V_VALID) == 0 || |
f43e3525 DG |
178 | ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || |
179 | ((flags & H_ANDCOND) && (v & avpn) != 0)) { | |
a3d0abae | 180 | return REMOVE_NOT_FOUND; |
f43e3525 | 181 | } |
35f9304d | 182 | *vp = v; |
a3d0abae | 183 | *rp = r; |
7ef23068 | 184 | ppc_hash64_store_hpte(cpu, ptex, HPTE64_V_HPTE_DIRTY, 0); |
61a36c9b | 185 | ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); |
a3d0abae DG |
186 | return REMOVE_SUCCESS; |
187 | } | |
188 | ||
28e02042 | 189 | static target_ulong h_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
a3d0abae DG |
190 | target_ulong opcode, target_ulong *args) |
191 | { | |
cd0c6f47 | 192 | CPUPPCState *env = &cpu->env; |
a3d0abae | 193 | target_ulong flags = args[0]; |
c6404ade | 194 | target_ulong ptex = args[1]; |
a3d0abae | 195 | target_ulong avpn = args[2]; |
a3801402 | 196 | RemoveResult ret; |
a3d0abae | 197 | |
c6404ade | 198 | ret = remove_hpte(cpu, ptex, avpn, flags, |
a3d0abae DG |
199 | &args[0], &args[1]); |
200 | ||
201 | switch (ret) { | |
202 | case REMOVE_SUCCESS: | |
e3cffe6f | 203 | check_tlb_flush(env, true); |
a3d0abae DG |
204 | return H_SUCCESS; |
205 | ||
206 | case REMOVE_NOT_FOUND: | |
207 | return H_NOT_FOUND; | |
208 | ||
209 | case REMOVE_PARM: | |
210 | return H_PARAMETER; | |
211 | ||
212 | case REMOVE_HW: | |
213 | return H_HARDWARE; | |
214 | } | |
215 | ||
9a39970d | 216 | g_assert_not_reached(); |
a3d0abae DG |
217 | } |
218 | ||
219 | #define H_BULK_REMOVE_TYPE 0xc000000000000000ULL | |
220 | #define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL | |
221 | #define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL | |
222 | #define H_BULK_REMOVE_END 0xc000000000000000ULL | |
223 | #define H_BULK_REMOVE_CODE 0x3000000000000000ULL | |
224 | #define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL | |
225 | #define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL | |
226 | #define H_BULK_REMOVE_PARM 0x2000000000000000ULL | |
227 | #define H_BULK_REMOVE_HW 0x3000000000000000ULL | |
228 | #define H_BULK_REMOVE_RC 0x0c00000000000000ULL | |
229 | #define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL | |
230 | #define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL | |
231 | #define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL | |
232 | #define H_BULK_REMOVE_AVPN 0x0200000000000000ULL | |
233 | #define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL | |
234 | ||
235 | #define H_BULK_REMOVE_MAX_BATCH 4 | |
236 | ||
28e02042 | 237 | static target_ulong h_bulk_remove(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
a3d0abae DG |
238 | target_ulong opcode, target_ulong *args) |
239 | { | |
cd0c6f47 | 240 | CPUPPCState *env = &cpu->env; |
a3d0abae | 241 | int i; |
cd0c6f47 | 242 | target_ulong rc = H_SUCCESS; |
a3d0abae DG |
243 | |
244 | for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { | |
245 | target_ulong *tsh = &args[i*2]; | |
246 | target_ulong tsl = args[i*2 + 1]; | |
247 | target_ulong v, r, ret; | |
248 | ||
249 | if ((*tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { | |
250 | break; | |
251 | } else if ((*tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { | |
252 | return H_PARAMETER; | |
253 | } | |
254 | ||
255 | *tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; | |
256 | *tsh |= H_BULK_REMOVE_RESPONSE; | |
257 | ||
258 | if ((*tsh & H_BULK_REMOVE_ANDCOND) && (*tsh & H_BULK_REMOVE_AVPN)) { | |
259 | *tsh |= H_BULK_REMOVE_PARM; | |
260 | return H_PARAMETER; | |
261 | } | |
262 | ||
7ef23068 | 263 | ret = remove_hpte(cpu, *tsh & H_BULK_REMOVE_PTEX, tsl, |
a3d0abae DG |
264 | (*tsh & H_BULK_REMOVE_FLAGS) >> 26, |
265 | &v, &r); | |
266 | ||
267 | *tsh |= ret << 60; | |
268 | ||
269 | switch (ret) { | |
270 | case REMOVE_SUCCESS: | |
d5aea6f3 | 271 | *tsh |= (r & (HPTE64_R_C | HPTE64_R_R)) << 43; |
a3d0abae DG |
272 | break; |
273 | ||
274 | case REMOVE_PARM: | |
cd0c6f47 BH |
275 | rc = H_PARAMETER; |
276 | goto exit; | |
a3d0abae DG |
277 | |
278 | case REMOVE_HW: | |
cd0c6f47 BH |
279 | rc = H_HARDWARE; |
280 | goto exit; | |
a3d0abae DG |
281 | } |
282 | } | |
cd0c6f47 | 283 | exit: |
e3cffe6f | 284 | check_tlb_flush(env, true); |
a3d0abae | 285 | |
cd0c6f47 | 286 | return rc; |
f43e3525 DG |
287 | } |
288 | ||
28e02042 | 289 | static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
f43e3525 DG |
290 | target_ulong opcode, target_ulong *args) |
291 | { | |
b13ce26d | 292 | CPUPPCState *env = &cpu->env; |
f43e3525 | 293 | target_ulong flags = args[0]; |
c6404ade | 294 | target_ulong ptex = args[1]; |
f43e3525 | 295 | target_ulong avpn = args[2]; |
7222b94a | 296 | const ppc_hash_pte64_t *hptes; |
61a36c9b | 297 | target_ulong v, r; |
f43e3525 | 298 | |
c6404ade | 299 | if (!valid_ptex(cpu, ptex)) { |
f43e3525 DG |
300 | return H_PARAMETER; |
301 | } | |
302 | ||
7222b94a DG |
303 | hptes = ppc_hash64_map_hptes(cpu, ptex, 1); |
304 | v = ppc_hash64_hpte0(cpu, hptes, 0); | |
305 | r = ppc_hash64_hpte1(cpu, hptes, 0); | |
306 | ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); | |
f43e3525 | 307 | |
d5aea6f3 | 308 | if ((v & HPTE64_V_VALID) == 0 || |
f43e3525 | 309 | ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { |
f43e3525 DG |
310 | return H_NOT_FOUND; |
311 | } | |
312 | ||
d5aea6f3 DG |
313 | r &= ~(HPTE64_R_PP0 | HPTE64_R_PP | HPTE64_R_N | |
314 | HPTE64_R_KEY_HI | HPTE64_R_KEY_LO); | |
315 | r |= (flags << 55) & HPTE64_R_PP0; | |
316 | r |= (flags << 48) & HPTE64_R_KEY_HI; | |
317 | r |= flags & (HPTE64_R_PP | HPTE64_R_N | HPTE64_R_KEY_LO); | |
c6404ade | 318 | ppc_hash64_store_hpte(cpu, ptex, |
3f94170b | 319 | (v & ~HPTE64_V_VALID) | HPTE64_V_HPTE_DIRTY, 0); |
c6404ade | 320 | ppc_hash64_tlb_flush_hpte(cpu, ptex, v, r); |
d76ab5e1 ND |
321 | /* Flush the tlb */ |
322 | check_tlb_flush(env, true); | |
f43e3525 | 323 | /* Don't need a memory barrier, due to qemu's global lock */ |
c6404ade | 324 | ppc_hash64_store_hpte(cpu, ptex, v | HPTE64_V_HPTE_DIRTY, r); |
f43e3525 DG |
325 | return H_SUCCESS; |
326 | } | |
327 | ||
28e02042 | 328 | static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
6bbd5dde EC |
329 | target_ulong opcode, target_ulong *args) |
330 | { | |
6bbd5dde | 331 | target_ulong flags = args[0]; |
c6404ade | 332 | target_ulong ptex = args[1]; |
6bbd5dde EC |
333 | uint8_t *hpte; |
334 | int i, ridx, n_entries = 1; | |
335 | ||
c6404ade | 336 | if (!valid_ptex(cpu, ptex)) { |
6bbd5dde EC |
337 | return H_PARAMETER; |
338 | } | |
339 | ||
340 | if (flags & H_READ_4) { | |
341 | /* Clear the two low order bits */ | |
c6404ade | 342 | ptex &= ~(3ULL); |
6bbd5dde EC |
343 | n_entries = 4; |
344 | } | |
345 | ||
e57ca75c | 346 | hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64); |
6bbd5dde EC |
347 | |
348 | for (i = 0, ridx = 0; i < n_entries; i++) { | |
349 | args[ridx++] = ldq_p(hpte); | |
350 | args[ridx++] = ldq_p(hpte + (HASH_PTE_SIZE_64/2)); | |
351 | hpte += HASH_PTE_SIZE_64; | |
352 | } | |
353 | ||
354 | return H_SUCCESS; | |
355 | } | |
356 | ||
0b0b8310 DG |
357 | struct sPAPRPendingHPT { |
358 | /* These fields are read-only after initialization */ | |
359 | int shift; | |
360 | QemuThread thread; | |
361 | ||
362 | /* These fields are protected by the BQL */ | |
363 | bool complete; | |
364 | ||
365 | /* These fields are private to the preparation thread if | |
366 | * !complete, otherwise protected by the BQL */ | |
367 | int ret; | |
368 | void *hpt; | |
369 | }; | |
370 | ||
371 | static void free_pending_hpt(sPAPRPendingHPT *pending) | |
372 | { | |
373 | if (pending->hpt) { | |
374 | qemu_vfree(pending->hpt); | |
375 | } | |
376 | ||
377 | g_free(pending); | |
378 | } | |
379 | ||
380 | static void *hpt_prepare_thread(void *opaque) | |
381 | { | |
382 | sPAPRPendingHPT *pending = opaque; | |
383 | size_t size = 1ULL << pending->shift; | |
384 | ||
385 | pending->hpt = qemu_memalign(size, size); | |
386 | if (pending->hpt) { | |
387 | memset(pending->hpt, 0, size); | |
388 | pending->ret = H_SUCCESS; | |
389 | } else { | |
390 | pending->ret = H_NO_MEM; | |
391 | } | |
392 | ||
393 | qemu_mutex_lock_iothread(); | |
394 | ||
395 | if (SPAPR_MACHINE(qdev_get_machine())->pending_hpt == pending) { | |
396 | /* Ready to go */ | |
397 | pending->complete = true; | |
398 | } else { | |
399 | /* We've been cancelled, clean ourselves up */ | |
400 | free_pending_hpt(pending); | |
401 | } | |
402 | ||
403 | qemu_mutex_unlock_iothread(); | |
404 | return NULL; | |
405 | } | |
406 | ||
407 | /* Must be called with BQL held */ | |
408 | static void cancel_hpt_prepare(sPAPRMachineState *spapr) | |
409 | { | |
410 | sPAPRPendingHPT *pending = spapr->pending_hpt; | |
411 | ||
412 | /* Let the thread know it's cancelled */ | |
413 | spapr->pending_hpt = NULL; | |
414 | ||
415 | if (!pending) { | |
416 | /* Nothing to do */ | |
417 | return; | |
418 | } | |
419 | ||
420 | if (!pending->complete) { | |
421 | /* thread will clean itself up */ | |
422 | return; | |
423 | } | |
424 | ||
425 | free_pending_hpt(pending); | |
426 | } | |
427 | ||
b55d295e DG |
428 | /* Convert a return code from the KVM ioctl()s implementing resize HPT |
429 | * into a PAPR hypercall return code */ | |
430 | static target_ulong resize_hpt_convert_rc(int ret) | |
431 | { | |
432 | if (ret >= 100000) { | |
433 | return H_LONG_BUSY_ORDER_100_SEC; | |
434 | } else if (ret >= 10000) { | |
435 | return H_LONG_BUSY_ORDER_10_SEC; | |
436 | } else if (ret >= 1000) { | |
437 | return H_LONG_BUSY_ORDER_1_SEC; | |
438 | } else if (ret >= 100) { | |
439 | return H_LONG_BUSY_ORDER_100_MSEC; | |
440 | } else if (ret >= 10) { | |
441 | return H_LONG_BUSY_ORDER_10_MSEC; | |
442 | } else if (ret > 0) { | |
443 | return H_LONG_BUSY_ORDER_1_MSEC; | |
444 | } | |
445 | ||
446 | switch (ret) { | |
447 | case 0: | |
448 | return H_SUCCESS; | |
449 | case -EPERM: | |
450 | return H_AUTHORITY; | |
451 | case -EINVAL: | |
452 | return H_PARAMETER; | |
453 | case -ENXIO: | |
454 | return H_CLOSED; | |
455 | case -ENOSPC: | |
456 | return H_PTEG_FULL; | |
457 | case -EBUSY: | |
458 | return H_BUSY; | |
459 | case -ENOMEM: | |
460 | return H_NO_MEM; | |
461 | default: | |
462 | return H_HARDWARE; | |
463 | } | |
464 | } | |
465 | ||
30f4b05b DG |
466 | static target_ulong h_resize_hpt_prepare(PowerPCCPU *cpu, |
467 | sPAPRMachineState *spapr, | |
468 | target_ulong opcode, | |
469 | target_ulong *args) | |
470 | { | |
471 | target_ulong flags = args[0]; | |
0b0b8310 DG |
472 | int shift = args[1]; |
473 | sPAPRPendingHPT *pending = spapr->pending_hpt; | |
db50f280 | 474 | uint64_t current_ram_size; |
b55d295e | 475 | int rc; |
30f4b05b DG |
476 | |
477 | if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { | |
478 | return H_AUTHORITY; | |
479 | } | |
480 | ||
0b0b8310 DG |
481 | if (!spapr->htab_shift) { |
482 | /* Radix guest, no HPT */ | |
483 | return H_NOT_AVAILABLE; | |
484 | } | |
485 | ||
30f4b05b | 486 | trace_spapr_h_resize_hpt_prepare(flags, shift); |
0b0b8310 DG |
487 | |
488 | if (flags != 0) { | |
489 | return H_PARAMETER; | |
490 | } | |
491 | ||
492 | if (shift && ((shift < 18) || (shift > 46))) { | |
493 | return H_PARAMETER; | |
494 | } | |
495 | ||
db50f280 | 496 | current_ram_size = MACHINE(spapr)->ram_size + get_plugged_memory_size(); |
0b0b8310 DG |
497 | |
498 | /* We only allow the guest to allocate an HPT one order above what | |
499 | * we'd normally give them (to stop a small guest claiming a huge | |
500 | * chunk of resources in the HPT */ | |
501 | if (shift > (spapr_hpt_shift_for_ramsize(current_ram_size) + 1)) { | |
502 | return H_RESOURCE; | |
503 | } | |
504 | ||
b55d295e DG |
505 | rc = kvmppc_resize_hpt_prepare(cpu, flags, shift); |
506 | if (rc != -ENOSYS) { | |
507 | return resize_hpt_convert_rc(rc); | |
508 | } | |
509 | ||
0b0b8310 DG |
510 | if (pending) { |
511 | /* something already in progress */ | |
512 | if (pending->shift == shift) { | |
513 | /* and it's suitable */ | |
514 | if (pending->complete) { | |
515 | return pending->ret; | |
516 | } else { | |
517 | return H_LONG_BUSY_ORDER_100_MSEC; | |
518 | } | |
519 | } | |
520 | ||
521 | /* not suitable, cancel and replace */ | |
522 | cancel_hpt_prepare(spapr); | |
523 | } | |
524 | ||
525 | if (!shift) { | |
526 | /* nothing to do */ | |
527 | return H_SUCCESS; | |
528 | } | |
529 | ||
530 | /* start new prepare */ | |
531 | ||
532 | pending = g_new0(sPAPRPendingHPT, 1); | |
533 | pending->shift = shift; | |
534 | pending->ret = H_HARDWARE; | |
535 | ||
536 | qemu_thread_create(&pending->thread, "sPAPR HPT prepare", | |
537 | hpt_prepare_thread, pending, QEMU_THREAD_DETACHED); | |
538 | ||
539 | spapr->pending_hpt = pending; | |
540 | ||
541 | /* In theory we could estimate the time more accurately based on | |
542 | * the new size, but there's not much point */ | |
543 | return H_LONG_BUSY_ORDER_100_MSEC; | |
544 | } | |
545 | ||
546 | static uint64_t new_hpte_load0(void *htab, uint64_t pteg, int slot) | |
547 | { | |
548 | uint8_t *addr = htab; | |
549 | ||
550 | addr += pteg * HASH_PTEG_SIZE_64; | |
551 | addr += slot * HASH_PTE_SIZE_64; | |
552 | return ldq_p(addr); | |
553 | } | |
554 | ||
555 | static void new_hpte_store(void *htab, uint64_t pteg, int slot, | |
556 | uint64_t pte0, uint64_t pte1) | |
557 | { | |
558 | uint8_t *addr = htab; | |
559 | ||
560 | addr += pteg * HASH_PTEG_SIZE_64; | |
561 | addr += slot * HASH_PTE_SIZE_64; | |
562 | ||
563 | stq_p(addr, pte0); | |
564 | stq_p(addr + HASH_PTE_SIZE_64 / 2, pte1); | |
565 | } | |
566 | ||
567 | static int rehash_hpte(PowerPCCPU *cpu, | |
568 | const ppc_hash_pte64_t *hptes, | |
569 | void *old_hpt, uint64_t oldsize, | |
570 | void *new_hpt, uint64_t newsize, | |
571 | uint64_t pteg, int slot) | |
572 | { | |
573 | uint64_t old_hash_mask = (oldsize >> 7) - 1; | |
574 | uint64_t new_hash_mask = (newsize >> 7) - 1; | |
575 | target_ulong pte0 = ppc_hash64_hpte0(cpu, hptes, slot); | |
576 | target_ulong pte1; | |
577 | uint64_t avpn; | |
578 | unsigned base_pg_shift; | |
579 | uint64_t hash, new_pteg, replace_pte0; | |
580 | ||
581 | if (!(pte0 & HPTE64_V_VALID) || !(pte0 & HPTE64_V_BOLTED)) { | |
582 | return H_SUCCESS; | |
583 | } | |
584 | ||
585 | pte1 = ppc_hash64_hpte1(cpu, hptes, slot); | |
586 | ||
587 | base_pg_shift = ppc_hash64_hpte_page_shift_noslb(cpu, pte0, pte1); | |
588 | assert(base_pg_shift); /* H_ENTER shouldn't allow a bad encoding */ | |
589 | avpn = HPTE64_V_AVPN_VAL(pte0) & ~(((1ULL << base_pg_shift) - 1) >> 23); | |
590 | ||
591 | if (pte0 & HPTE64_V_SECONDARY) { | |
592 | pteg = ~pteg; | |
593 | } | |
594 | ||
595 | if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_256M) { | |
596 | uint64_t offset, vsid; | |
597 | ||
598 | /* We only have 28 - 23 bits of offset in avpn */ | |
599 | offset = (avpn & 0x1f) << 23; | |
600 | vsid = avpn >> 5; | |
601 | /* We can find more bits from the pteg value */ | |
602 | if (base_pg_shift < 23) { | |
603 | offset |= ((vsid ^ pteg) & old_hash_mask) << base_pg_shift; | |
604 | } | |
605 | ||
606 | hash = vsid ^ (offset >> base_pg_shift); | |
607 | } else if ((pte0 & HPTE64_V_SSIZE) == HPTE64_V_SSIZE_1T) { | |
608 | uint64_t offset, vsid; | |
609 | ||
610 | /* We only have 40 - 23 bits of seg_off in avpn */ | |
611 | offset = (avpn & 0x1ffff) << 23; | |
612 | vsid = avpn >> 17; | |
613 | if (base_pg_shift < 23) { | |
614 | offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) | |
615 | << base_pg_shift; | |
616 | } | |
617 | ||
618 | hash = vsid ^ (vsid << 25) ^ (offset >> base_pg_shift); | |
619 | } else { | |
620 | error_report("rehash_pte: Bad segment size in HPTE"); | |
621 | return H_HARDWARE; | |
622 | } | |
623 | ||
624 | new_pteg = hash & new_hash_mask; | |
625 | if (pte0 & HPTE64_V_SECONDARY) { | |
626 | assert(~pteg == (hash & old_hash_mask)); | |
627 | new_pteg = ~new_pteg; | |
628 | } else { | |
629 | assert(pteg == (hash & old_hash_mask)); | |
630 | } | |
631 | assert((oldsize != newsize) || (pteg == new_pteg)); | |
632 | replace_pte0 = new_hpte_load0(new_hpt, new_pteg, slot); | |
633 | /* | |
634 | * Strictly speaking, we don't need all these tests, since we only | |
635 | * ever rehash bolted HPTEs. We might in future handle non-bolted | |
636 | * HPTEs, though so make the logic correct for those cases as | |
637 | * well. | |
638 | */ | |
639 | if (replace_pte0 & HPTE64_V_VALID) { | |
640 | assert(newsize < oldsize); | |
641 | if (replace_pte0 & HPTE64_V_BOLTED) { | |
642 | if (pte0 & HPTE64_V_BOLTED) { | |
643 | /* Bolted collision, nothing we can do */ | |
644 | return H_PTEG_FULL; | |
645 | } else { | |
646 | /* Discard this hpte */ | |
647 | return H_SUCCESS; | |
648 | } | |
649 | } | |
650 | } | |
651 | ||
652 | new_hpte_store(new_hpt, new_pteg, slot, pte0, pte1); | |
653 | return H_SUCCESS; | |
654 | } | |
655 | ||
656 | static int rehash_hpt(PowerPCCPU *cpu, | |
657 | void *old_hpt, uint64_t oldsize, | |
658 | void *new_hpt, uint64_t newsize) | |
659 | { | |
660 | uint64_t n_ptegs = oldsize >> 7; | |
661 | uint64_t pteg; | |
662 | int slot; | |
663 | int rc; | |
664 | ||
665 | for (pteg = 0; pteg < n_ptegs; pteg++) { | |
666 | hwaddr ptex = pteg * HPTES_PER_GROUP; | |
667 | const ppc_hash_pte64_t *hptes | |
668 | = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); | |
669 | ||
670 | if (!hptes) { | |
671 | return H_HARDWARE; | |
672 | } | |
673 | ||
674 | for (slot = 0; slot < HPTES_PER_GROUP; slot++) { | |
675 | rc = rehash_hpte(cpu, hptes, old_hpt, oldsize, new_hpt, newsize, | |
676 | pteg, slot); | |
677 | if (rc != H_SUCCESS) { | |
678 | ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP); | |
679 | return rc; | |
680 | } | |
681 | } | |
682 | ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP); | |
683 | } | |
684 | ||
685 | return H_SUCCESS; | |
30f4b05b DG |
686 | } |
687 | ||
1ec26c75 GK |
688 | static void do_push_sregs_to_kvm_pr(CPUState *cs, run_on_cpu_data data) |
689 | { | |
690 | int ret; | |
691 | ||
692 | cpu_synchronize_state(cs); | |
693 | ||
694 | ret = kvmppc_put_books_sregs(POWERPC_CPU(cs)); | |
695 | if (ret < 0) { | |
696 | error_report("failed to push sregs to KVM: %s", strerror(-ret)); | |
697 | exit(1); | |
698 | } | |
699 | } | |
700 | ||
701 | static void push_sregs_to_kvm_pr(sPAPRMachineState *spapr) | |
702 | { | |
703 | CPUState *cs; | |
704 | ||
705 | /* | |
706 | * This is a hack for the benefit of KVM PR - it abuses the SDR1 | |
707 | * slot in kvm_sregs to communicate the userspace address of the | |
708 | * HPT | |
709 | */ | |
710 | if (!kvm_enabled() || !spapr->htab) { | |
711 | return; | |
712 | } | |
713 | ||
714 | CPU_FOREACH(cs) { | |
715 | run_on_cpu(cs, do_push_sregs_to_kvm_pr, RUN_ON_CPU_NULL); | |
716 | } | |
717 | } | |
718 | ||
30f4b05b DG |
719 | static target_ulong h_resize_hpt_commit(PowerPCCPU *cpu, |
720 | sPAPRMachineState *spapr, | |
721 | target_ulong opcode, | |
722 | target_ulong *args) | |
723 | { | |
724 | target_ulong flags = args[0]; | |
725 | target_ulong shift = args[1]; | |
0b0b8310 DG |
726 | sPAPRPendingHPT *pending = spapr->pending_hpt; |
727 | int rc; | |
728 | size_t newsize; | |
30f4b05b DG |
729 | |
730 | if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DISABLED) { | |
731 | return H_AUTHORITY; | |
732 | } | |
733 | ||
94789567 DHB |
734 | if (!spapr->htab_shift) { |
735 | /* Radix guest, no HPT */ | |
736 | return H_NOT_AVAILABLE; | |
737 | } | |
738 | ||
30f4b05b | 739 | trace_spapr_h_resize_hpt_commit(flags, shift); |
0b0b8310 | 740 | |
b55d295e DG |
741 | rc = kvmppc_resize_hpt_commit(cpu, flags, shift); |
742 | if (rc != -ENOSYS) { | |
94789567 DHB |
743 | rc = resize_hpt_convert_rc(rc); |
744 | if (rc == H_SUCCESS) { | |
745 | /* Need to set the new htab_shift in the machine state */ | |
746 | spapr->htab_shift = shift; | |
747 | } | |
748 | return rc; | |
b55d295e DG |
749 | } |
750 | ||
0b0b8310 DG |
751 | if (flags != 0) { |
752 | return H_PARAMETER; | |
753 | } | |
754 | ||
755 | if (!pending || (pending->shift != shift)) { | |
756 | /* no matching prepare */ | |
757 | return H_CLOSED; | |
758 | } | |
759 | ||
760 | if (!pending->complete) { | |
761 | /* prepare has not completed */ | |
762 | return H_BUSY; | |
763 | } | |
764 | ||
765 | /* Shouldn't have got past PREPARE without an HPT */ | |
766 | g_assert(spapr->htab_shift); | |
767 | ||
768 | newsize = 1ULL << pending->shift; | |
769 | rc = rehash_hpt(cpu, spapr->htab, HTAB_SIZE(spapr), | |
770 | pending->hpt, newsize); | |
771 | if (rc == H_SUCCESS) { | |
772 | qemu_vfree(spapr->htab); | |
773 | spapr->htab = pending->hpt; | |
774 | spapr->htab_shift = pending->shift; | |
775 | ||
1ec26c75 | 776 | push_sregs_to_kvm_pr(spapr); |
b55d295e | 777 | |
0b0b8310 DG |
778 | pending->hpt = NULL; /* so it's not free()d */ |
779 | } | |
780 | ||
781 | /* Clean up */ | |
782 | spapr->pending_hpt = NULL; | |
783 | free_pending_hpt(pending); | |
784 | ||
785 | return rc; | |
30f4b05b DG |
786 | } |
787 | ||
423576f7 TH |
788 | static target_ulong h_set_sprg0(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
789 | target_ulong opcode, target_ulong *args) | |
790 | { | |
791 | cpu_synchronize_state(CPU(cpu)); | |
792 | cpu->env.spr[SPR_SPRG0] = args[0]; | |
793 | ||
794 | return H_SUCCESS; | |
795 | } | |
796 | ||
28e02042 | 797 | static target_ulong h_set_dabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
821303f5 DG |
798 | target_ulong opcode, target_ulong *args) |
799 | { | |
af08a58f TH |
800 | if (!has_spr(cpu, SPR_DABR)) { |
801 | return H_HARDWARE; /* DABR register not available */ | |
802 | } | |
803 | cpu_synchronize_state(CPU(cpu)); | |
804 | ||
805 | if (has_spr(cpu, SPR_DABRX)) { | |
806 | cpu->env.spr[SPR_DABRX] = 0x3; /* Use Problem and Privileged state */ | |
807 | } else if (!(args[0] & 0x4)) { /* Breakpoint Translation set? */ | |
808 | return H_RESERVED_DABR; | |
809 | } | |
810 | ||
811 | cpu->env.spr[SPR_DABR] = args[0]; | |
812 | return H_SUCCESS; | |
821303f5 DG |
813 | } |
814 | ||
e49ff266 TH |
815 | static target_ulong h_set_xdabr(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
816 | target_ulong opcode, target_ulong *args) | |
817 | { | |
818 | target_ulong dabrx = args[1]; | |
819 | ||
820 | if (!has_spr(cpu, SPR_DABR) || !has_spr(cpu, SPR_DABRX)) { | |
821 | return H_HARDWARE; | |
822 | } | |
823 | ||
824 | if ((dabrx & ~0xfULL) != 0 || (dabrx & H_DABRX_HYPERVISOR) != 0 | |
825 | || (dabrx & (H_DABRX_KERNEL | H_DABRX_USER)) == 0) { | |
826 | return H_PARAMETER; | |
827 | } | |
828 | ||
829 | cpu_synchronize_state(CPU(cpu)); | |
830 | cpu->env.spr[SPR_DABRX] = dabrx; | |
831 | cpu->env.spr[SPR_DABR] = args[0]; | |
832 | ||
833 | return H_SUCCESS; | |
834 | } | |
835 | ||
3240dd9a TH |
836 | static target_ulong h_page_init(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
837 | target_ulong opcode, target_ulong *args) | |
838 | { | |
839 | target_ulong flags = args[0]; | |
840 | hwaddr dst = args[1]; | |
841 | hwaddr src = args[2]; | |
842 | hwaddr len = TARGET_PAGE_SIZE; | |
843 | uint8_t *pdst, *psrc; | |
844 | target_long ret = H_SUCCESS; | |
845 | ||
846 | if (flags & ~(H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE | |
847 | | H_COPY_PAGE | H_ZERO_PAGE)) { | |
848 | qemu_log_mask(LOG_UNIMP, "h_page_init: Bad flags (" TARGET_FMT_lx "\n", | |
849 | flags); | |
850 | return H_PARAMETER; | |
851 | } | |
852 | ||
853 | /* Map-in destination */ | |
854 | if (!is_ram_address(spapr, dst) || (dst & ~TARGET_PAGE_MASK) != 0) { | |
855 | return H_PARAMETER; | |
856 | } | |
857 | pdst = cpu_physical_memory_map(dst, &len, 1); | |
858 | if (!pdst || len != TARGET_PAGE_SIZE) { | |
859 | return H_PARAMETER; | |
860 | } | |
861 | ||
862 | if (flags & H_COPY_PAGE) { | |
863 | /* Map-in source, copy to destination, and unmap source again */ | |
864 | if (!is_ram_address(spapr, src) || (src & ~TARGET_PAGE_MASK) != 0) { | |
865 | ret = H_PARAMETER; | |
866 | goto unmap_out; | |
867 | } | |
868 | psrc = cpu_physical_memory_map(src, &len, 0); | |
869 | if (!psrc || len != TARGET_PAGE_SIZE) { | |
870 | ret = H_PARAMETER; | |
871 | goto unmap_out; | |
872 | } | |
873 | memcpy(pdst, psrc, len); | |
874 | cpu_physical_memory_unmap(psrc, len, 0, len); | |
875 | } else if (flags & H_ZERO_PAGE) { | |
876 | memset(pdst, 0, len); /* Just clear the destination page */ | |
877 | } | |
878 | ||
879 | if (kvm_enabled() && (flags & H_ICACHE_SYNCHRONIZE) != 0) { | |
880 | kvmppc_dcbst_range(cpu, pdst, len); | |
881 | } | |
882 | if (flags & (H_ICACHE_SYNCHRONIZE | H_ICACHE_INVALIDATE)) { | |
883 | if (kvm_enabled()) { | |
884 | kvmppc_icbi_range(cpu, pdst, len); | |
885 | } else { | |
886 | tb_flush(CPU(cpu)); | |
887 | } | |
888 | } | |
889 | ||
890 | unmap_out: | |
891 | cpu_physical_memory_unmap(pdst, TARGET_PAGE_SIZE, 1, len); | |
892 | return ret; | |
893 | } | |
894 | ||
ed120055 DG |
895 | #define FLAGS_REGISTER_VPA 0x0000200000000000ULL |
896 | #define FLAGS_REGISTER_DTL 0x0000400000000000ULL | |
897 | #define FLAGS_REGISTER_SLBSHADOW 0x0000600000000000ULL | |
898 | #define FLAGS_DEREGISTER_VPA 0x0000a00000000000ULL | |
899 | #define FLAGS_DEREGISTER_DTL 0x0000c00000000000ULL | |
900 | #define FLAGS_DEREGISTER_SLBSHADOW 0x0000e00000000000ULL | |
901 | ||
902 | #define VPA_MIN_SIZE 640 | |
903 | #define VPA_SIZE_OFFSET 0x4 | |
904 | #define VPA_SHARED_PROC_OFFSET 0x9 | |
905 | #define VPA_SHARED_PROC_VAL 0x2 | |
906 | ||
e2684c0b | 907 | static target_ulong register_vpa(CPUPPCState *env, target_ulong vpa) |
ed120055 | 908 | { |
33276f1b | 909 | CPUState *cs = CPU(ppc_env_get_cpu(env)); |
ed120055 DG |
910 | uint16_t size; |
911 | uint8_t tmp; | |
912 | ||
913 | if (vpa == 0) { | |
914 | hcall_dprintf("Can't cope with registering a VPA at logical 0\n"); | |
915 | return H_HARDWARE; | |
916 | } | |
917 | ||
918 | if (vpa % env->dcache_line_size) { | |
919 | return H_PARAMETER; | |
920 | } | |
921 | /* FIXME: bounds check the address */ | |
922 | ||
41701aa4 | 923 | size = lduw_be_phys(cs->as, vpa + 0x4); |
ed120055 DG |
924 | |
925 | if (size < VPA_MIN_SIZE) { | |
926 | return H_PARAMETER; | |
927 | } | |
928 | ||
929 | /* VPA is not allowed to cross a page boundary */ | |
930 | if ((vpa / 4096) != ((vpa + size - 1) / 4096)) { | |
931 | return H_PARAMETER; | |
932 | } | |
933 | ||
1bfb37d1 | 934 | env->vpa_addr = vpa; |
ed120055 | 935 | |
2c17449b | 936 | tmp = ldub_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET); |
ed120055 | 937 | tmp |= VPA_SHARED_PROC_VAL; |
db3be60d | 938 | stb_phys(cs->as, env->vpa_addr + VPA_SHARED_PROC_OFFSET, tmp); |
ed120055 DG |
939 | |
940 | return H_SUCCESS; | |
941 | } | |
942 | ||
e2684c0b | 943 | static target_ulong deregister_vpa(CPUPPCState *env, target_ulong vpa) |
ed120055 | 944 | { |
1bfb37d1 | 945 | if (env->slb_shadow_addr) { |
ed120055 DG |
946 | return H_RESOURCE; |
947 | } | |
948 | ||
1bfb37d1 | 949 | if (env->dtl_addr) { |
ed120055 DG |
950 | return H_RESOURCE; |
951 | } | |
952 | ||
1bfb37d1 | 953 | env->vpa_addr = 0; |
ed120055 DG |
954 | return H_SUCCESS; |
955 | } | |
956 | ||
e2684c0b | 957 | static target_ulong register_slb_shadow(CPUPPCState *env, target_ulong addr) |
ed120055 | 958 | { |
33276f1b | 959 | CPUState *cs = CPU(ppc_env_get_cpu(env)); |
ed120055 DG |
960 | uint32_t size; |
961 | ||
962 | if (addr == 0) { | |
963 | hcall_dprintf("Can't cope with SLB shadow at logical 0\n"); | |
964 | return H_HARDWARE; | |
965 | } | |
966 | ||
fdfba1a2 | 967 | size = ldl_be_phys(cs->as, addr + 0x4); |
ed120055 DG |
968 | if (size < 0x8) { |
969 | return H_PARAMETER; | |
970 | } | |
971 | ||
972 | if ((addr / 4096) != ((addr + size - 1) / 4096)) { | |
973 | return H_PARAMETER; | |
974 | } | |
975 | ||
1bfb37d1 | 976 | if (!env->vpa_addr) { |
ed120055 DG |
977 | return H_RESOURCE; |
978 | } | |
979 | ||
1bfb37d1 DG |
980 | env->slb_shadow_addr = addr; |
981 | env->slb_shadow_size = size; | |
ed120055 DG |
982 | |
983 | return H_SUCCESS; | |
984 | } | |
985 | ||
e2684c0b | 986 | static target_ulong deregister_slb_shadow(CPUPPCState *env, target_ulong addr) |
ed120055 | 987 | { |
1bfb37d1 DG |
988 | env->slb_shadow_addr = 0; |
989 | env->slb_shadow_size = 0; | |
ed120055 DG |
990 | return H_SUCCESS; |
991 | } | |
992 | ||
e2684c0b | 993 | static target_ulong register_dtl(CPUPPCState *env, target_ulong addr) |
ed120055 | 994 | { |
33276f1b | 995 | CPUState *cs = CPU(ppc_env_get_cpu(env)); |
ed120055 DG |
996 | uint32_t size; |
997 | ||
998 | if (addr == 0) { | |
999 | hcall_dprintf("Can't cope with DTL at logical 0\n"); | |
1000 | return H_HARDWARE; | |
1001 | } | |
1002 | ||
fdfba1a2 | 1003 | size = ldl_be_phys(cs->as, addr + 0x4); |
ed120055 DG |
1004 | |
1005 | if (size < 48) { | |
1006 | return H_PARAMETER; | |
1007 | } | |
1008 | ||
1bfb37d1 | 1009 | if (!env->vpa_addr) { |
ed120055 DG |
1010 | return H_RESOURCE; |
1011 | } | |
1012 | ||
1bfb37d1 | 1013 | env->dtl_addr = addr; |
ed120055 DG |
1014 | env->dtl_size = size; |
1015 | ||
1016 | return H_SUCCESS; | |
1017 | } | |
1018 | ||
73f7821b | 1019 | static target_ulong deregister_dtl(CPUPPCState *env, target_ulong addr) |
ed120055 | 1020 | { |
1bfb37d1 | 1021 | env->dtl_addr = 0; |
ed120055 DG |
1022 | env->dtl_size = 0; |
1023 | ||
1024 | return H_SUCCESS; | |
1025 | } | |
1026 | ||
28e02042 | 1027 | static target_ulong h_register_vpa(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
ed120055 DG |
1028 | target_ulong opcode, target_ulong *args) |
1029 | { | |
1030 | target_ulong flags = args[0]; | |
1031 | target_ulong procno = args[1]; | |
1032 | target_ulong vpa = args[2]; | |
1033 | target_ulong ret = H_PARAMETER; | |
e2684c0b | 1034 | CPUPPCState *tenv; |
0f20ba62 | 1035 | PowerPCCPU *tcpu; |
ed120055 | 1036 | |
2e886fb3 | 1037 | tcpu = spapr_find_cpu(procno); |
5353d03d | 1038 | if (!tcpu) { |
ed120055 DG |
1039 | return H_PARAMETER; |
1040 | } | |
0f20ba62 | 1041 | tenv = &tcpu->env; |
ed120055 DG |
1042 | |
1043 | switch (flags) { | |
1044 | case FLAGS_REGISTER_VPA: | |
1045 | ret = register_vpa(tenv, vpa); | |
1046 | break; | |
1047 | ||
1048 | case FLAGS_DEREGISTER_VPA: | |
1049 | ret = deregister_vpa(tenv, vpa); | |
1050 | break; | |
1051 | ||
1052 | case FLAGS_REGISTER_SLBSHADOW: | |
1053 | ret = register_slb_shadow(tenv, vpa); | |
1054 | break; | |
1055 | ||
1056 | case FLAGS_DEREGISTER_SLBSHADOW: | |
1057 | ret = deregister_slb_shadow(tenv, vpa); | |
1058 | break; | |
1059 | ||
1060 | case FLAGS_REGISTER_DTL: | |
1061 | ret = register_dtl(tenv, vpa); | |
1062 | break; | |
1063 | ||
1064 | case FLAGS_DEREGISTER_DTL: | |
1065 | ret = deregister_dtl(tenv, vpa); | |
1066 | break; | |
1067 | } | |
1068 | ||
1069 | return ret; | |
1070 | } | |
1071 | ||
28e02042 | 1072 | static target_ulong h_cede(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
ed120055 DG |
1073 | target_ulong opcode, target_ulong *args) |
1074 | { | |
b13ce26d | 1075 | CPUPPCState *env = &cpu->env; |
fcd7d003 | 1076 | CPUState *cs = CPU(cpu); |
b13ce26d | 1077 | |
ed120055 DG |
1078 | env->msr |= (1ULL << MSR_EE); |
1079 | hreg_compute_hflags(env); | |
fcd7d003 | 1080 | if (!cpu_has_work(cs)) { |
259186a7 | 1081 | cs->halted = 1; |
27103424 | 1082 | cs->exception_index = EXCP_HLT; |
fcd7d003 | 1083 | cs->exit_request = 1; |
ed120055 DG |
1084 | } |
1085 | return H_SUCCESS; | |
1086 | } | |
1087 | ||
28e02042 | 1088 | static target_ulong h_rtas(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
39ac8455 DG |
1089 | target_ulong opcode, target_ulong *args) |
1090 | { | |
1091 | target_ulong rtas_r3 = args[0]; | |
4fe822e0 AK |
1092 | uint32_t token = rtas_ld(rtas_r3, 0); |
1093 | uint32_t nargs = rtas_ld(rtas_r3, 1); | |
1094 | uint32_t nret = rtas_ld(rtas_r3, 2); | |
39ac8455 | 1095 | |
210b580b | 1096 | return spapr_rtas_call(cpu, spapr, token, nargs, rtas_r3 + 12, |
39ac8455 DG |
1097 | nret, rtas_r3 + 12 + 4*nargs); |
1098 | } | |
1099 | ||
28e02042 | 1100 | static target_ulong h_logical_load(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
827200a2 DG |
1101 | target_ulong opcode, target_ulong *args) |
1102 | { | |
fdfba1a2 | 1103 | CPUState *cs = CPU(cpu); |
827200a2 DG |
1104 | target_ulong size = args[0]; |
1105 | target_ulong addr = args[1]; | |
1106 | ||
1107 | switch (size) { | |
1108 | case 1: | |
2c17449b | 1109 | args[0] = ldub_phys(cs->as, addr); |
827200a2 DG |
1110 | return H_SUCCESS; |
1111 | case 2: | |
41701aa4 | 1112 | args[0] = lduw_phys(cs->as, addr); |
827200a2 DG |
1113 | return H_SUCCESS; |
1114 | case 4: | |
fdfba1a2 | 1115 | args[0] = ldl_phys(cs->as, addr); |
827200a2 DG |
1116 | return H_SUCCESS; |
1117 | case 8: | |
2c17449b | 1118 | args[0] = ldq_phys(cs->as, addr); |
827200a2 DG |
1119 | return H_SUCCESS; |
1120 | } | |
1121 | return H_PARAMETER; | |
1122 | } | |
1123 | ||
28e02042 | 1124 | static target_ulong h_logical_store(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
827200a2 DG |
1125 | target_ulong opcode, target_ulong *args) |
1126 | { | |
f606604f EI |
1127 | CPUState *cs = CPU(cpu); |
1128 | ||
827200a2 DG |
1129 | target_ulong size = args[0]; |
1130 | target_ulong addr = args[1]; | |
1131 | target_ulong val = args[2]; | |
1132 | ||
1133 | switch (size) { | |
1134 | case 1: | |
db3be60d | 1135 | stb_phys(cs->as, addr, val); |
827200a2 DG |
1136 | return H_SUCCESS; |
1137 | case 2: | |
5ce5944d | 1138 | stw_phys(cs->as, addr, val); |
827200a2 DG |
1139 | return H_SUCCESS; |
1140 | case 4: | |
ab1da857 | 1141 | stl_phys(cs->as, addr, val); |
827200a2 DG |
1142 | return H_SUCCESS; |
1143 | case 8: | |
f606604f | 1144 | stq_phys(cs->as, addr, val); |
827200a2 DG |
1145 | return H_SUCCESS; |
1146 | } | |
1147 | return H_PARAMETER; | |
1148 | } | |
1149 | ||
28e02042 | 1150 | static target_ulong h_logical_memop(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
c73e3771 BH |
1151 | target_ulong opcode, target_ulong *args) |
1152 | { | |
fdfba1a2 EI |
1153 | CPUState *cs = CPU(cpu); |
1154 | ||
c73e3771 BH |
1155 | target_ulong dst = args[0]; /* Destination address */ |
1156 | target_ulong src = args[1]; /* Source address */ | |
1157 | target_ulong esize = args[2]; /* Element size (0=1,1=2,2=4,3=8) */ | |
1158 | target_ulong count = args[3]; /* Element count */ | |
1159 | target_ulong op = args[4]; /* 0 = copy, 1 = invert */ | |
1160 | uint64_t tmp; | |
1161 | unsigned int mask = (1 << esize) - 1; | |
1162 | int step = 1 << esize; | |
1163 | ||
1164 | if (count > 0x80000000) { | |
1165 | return H_PARAMETER; | |
1166 | } | |
1167 | ||
1168 | if ((dst & mask) || (src & mask) || (op > 1)) { | |
1169 | return H_PARAMETER; | |
1170 | } | |
1171 | ||
1172 | if (dst >= src && dst < (src + (count << esize))) { | |
1173 | dst = dst + ((count - 1) << esize); | |
1174 | src = src + ((count - 1) << esize); | |
1175 | step = -step; | |
1176 | } | |
1177 | ||
1178 | while (count--) { | |
1179 | switch (esize) { | |
1180 | case 0: | |
2c17449b | 1181 | tmp = ldub_phys(cs->as, src); |
c73e3771 BH |
1182 | break; |
1183 | case 1: | |
41701aa4 | 1184 | tmp = lduw_phys(cs->as, src); |
c73e3771 BH |
1185 | break; |
1186 | case 2: | |
fdfba1a2 | 1187 | tmp = ldl_phys(cs->as, src); |
c73e3771 BH |
1188 | break; |
1189 | case 3: | |
2c17449b | 1190 | tmp = ldq_phys(cs->as, src); |
c73e3771 BH |
1191 | break; |
1192 | default: | |
1193 | return H_PARAMETER; | |
1194 | } | |
1195 | if (op == 1) { | |
1196 | tmp = ~tmp; | |
1197 | } | |
1198 | switch (esize) { | |
1199 | case 0: | |
db3be60d | 1200 | stb_phys(cs->as, dst, tmp); |
c73e3771 BH |
1201 | break; |
1202 | case 1: | |
5ce5944d | 1203 | stw_phys(cs->as, dst, tmp); |
c73e3771 BH |
1204 | break; |
1205 | case 2: | |
ab1da857 | 1206 | stl_phys(cs->as, dst, tmp); |
c73e3771 BH |
1207 | break; |
1208 | case 3: | |
f606604f | 1209 | stq_phys(cs->as, dst, tmp); |
c73e3771 BH |
1210 | break; |
1211 | } | |
1212 | dst = dst + step; | |
1213 | src = src + step; | |
1214 | } | |
1215 | ||
1216 | return H_SUCCESS; | |
1217 | } | |
1218 | ||
28e02042 | 1219 | static target_ulong h_logical_icbi(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
827200a2 DG |
1220 | target_ulong opcode, target_ulong *args) |
1221 | { | |
1222 | /* Nothing to do on emulation, KVM will trap this in the kernel */ | |
1223 | return H_SUCCESS; | |
1224 | } | |
1225 | ||
28e02042 | 1226 | static target_ulong h_logical_dcbf(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
827200a2 DG |
1227 | target_ulong opcode, target_ulong *args) |
1228 | { | |
1229 | /* Nothing to do on emulation, KVM will trap this in the kernel */ | |
1230 | return H_SUCCESS; | |
1231 | } | |
1232 | ||
7d0cd464 PM |
1233 | static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu, |
1234 | target_ulong mflags, | |
1235 | target_ulong value1, | |
1236 | target_ulong value2) | |
42561bf2 AB |
1237 | { |
1238 | CPUState *cs; | |
42561bf2 | 1239 | |
c4015bbd AK |
1240 | if (value1) { |
1241 | return H_P3; | |
1242 | } | |
1243 | if (value2) { | |
1244 | return H_P4; | |
1245 | } | |
1246 | ||
1247 | switch (mflags) { | |
1248 | case H_SET_MODE_ENDIAN_BIG: | |
1249 | CPU_FOREACH(cs) { | |
1250 | set_spr(cs, SPR_LPCR, 0, LPCR_ILE); | |
42561bf2 | 1251 | } |
eefaccc0 | 1252 | spapr_pci_switch_vga(true); |
c4015bbd AK |
1253 | return H_SUCCESS; |
1254 | ||
1255 | case H_SET_MODE_ENDIAN_LITTLE: | |
1256 | CPU_FOREACH(cs) { | |
1257 | set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE); | |
42561bf2 | 1258 | } |
eefaccc0 | 1259 | spapr_pci_switch_vga(false); |
c4015bbd AK |
1260 | return H_SUCCESS; |
1261 | } | |
42561bf2 | 1262 | |
c4015bbd AK |
1263 | return H_UNSUPPORTED_FLAG; |
1264 | } | |
42561bf2 | 1265 | |
7d0cd464 PM |
1266 | static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu, |
1267 | target_ulong mflags, | |
1268 | target_ulong value1, | |
1269 | target_ulong value2) | |
d5ac4f54 AK |
1270 | { |
1271 | CPUState *cs; | |
1272 | PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); | |
d5ac4f54 AK |
1273 | |
1274 | if (!(pcc->insns_flags2 & PPC2_ISA207S)) { | |
1275 | return H_P2; | |
1276 | } | |
1277 | if (value1) { | |
1278 | return H_P3; | |
1279 | } | |
1280 | if (value2) { | |
1281 | return H_P4; | |
1282 | } | |
1283 | ||
5c94b2a5 | 1284 | if (mflags == AIL_RESERVED) { |
d5ac4f54 AK |
1285 | return H_UNSUPPORTED_FLAG; |
1286 | } | |
1287 | ||
1288 | CPU_FOREACH(cs) { | |
d5ac4f54 | 1289 | set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL); |
d5ac4f54 AK |
1290 | } |
1291 | ||
1292 | return H_SUCCESS; | |
1293 | } | |
1294 | ||
28e02042 | 1295 | static target_ulong h_set_mode(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
c4015bbd AK |
1296 | target_ulong opcode, target_ulong *args) |
1297 | { | |
1298 | target_ulong resource = args[1]; | |
1299 | target_ulong ret = H_P2; | |
1300 | ||
1301 | switch (resource) { | |
1302 | case H_SET_MODE_RESOURCE_LE: | |
7d0cd464 | 1303 | ret = h_set_mode_resource_le(cpu, args[0], args[2], args[3]); |
c4015bbd | 1304 | break; |
d5ac4f54 | 1305 | case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: |
7d0cd464 PM |
1306 | ret = h_set_mode_resource_addr_trans_mode(cpu, args[0], |
1307 | args[2], args[3]); | |
d5ac4f54 | 1308 | break; |
42561bf2 AB |
1309 | } |
1310 | ||
42561bf2 AB |
1311 | return ret; |
1312 | } | |
1313 | ||
d77a98b0 SJS |
1314 | static target_ulong h_clean_slb(PowerPCCPU *cpu, sPAPRMachineState *spapr, |
1315 | target_ulong opcode, target_ulong *args) | |
1316 | { | |
1317 | qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n", | |
1318 | opcode, " (H_CLEAN_SLB)"); | |
1319 | return H_FUNCTION; | |
1320 | } | |
1321 | ||
1322 | static target_ulong h_invalidate_pid(PowerPCCPU *cpu, sPAPRMachineState *spapr, | |
1323 | target_ulong opcode, target_ulong *args) | |
1324 | { | |
1325 | qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x"TARGET_FMT_lx"%s\n", | |
1326 | opcode, " (H_INVALIDATE_PID)"); | |
1327 | return H_FUNCTION; | |
1328 | } | |
1329 | ||
b4db5413 SJS |
1330 | static void spapr_check_setup_free_hpt(sPAPRMachineState *spapr, |
1331 | uint64_t patbe_old, uint64_t patbe_new) | |
1332 | { | |
1333 | /* | |
1334 | * We have 4 Options: | |
1335 | * HASH->HASH || RADIX->RADIX || NOTHING->RADIX : Do Nothing | |
1336 | * HASH->RADIX : Free HPT | |
1337 | * RADIX->HASH : Allocate HPT | |
1338 | * NOTHING->HASH : Allocate HPT | |
1339 | * Note: NOTHING implies the case where we said the guest could choose | |
1340 | * later and so assumed radix and now it's called H_REG_PROC_TBL | |
1341 | */ | |
1342 | ||
1343 | if ((patbe_old & PATBE1_GR) == (patbe_new & PATBE1_GR)) { | |
1344 | /* We assume RADIX, so this catches all the "Do Nothing" cases */ | |
1345 | } else if (!(patbe_old & PATBE1_GR)) { | |
1346 | /* HASH->RADIX : Free HPT */ | |
06ec79e8 | 1347 | spapr_free_hpt(spapr); |
b4db5413 SJS |
1348 | } else if (!(patbe_new & PATBE1_GR)) { |
1349 | /* RADIX->HASH || NOTHING->HASH : Allocate HPT */ | |
1350 | spapr_setup_hpt_and_vrma(spapr); | |
1351 | } | |
1352 | return; | |
1353 | } | |
1354 | ||
1355 | #define FLAGS_MASK 0x01FULL | |
1356 | #define FLAG_MODIFY 0x10 | |
1357 | #define FLAG_REGISTER 0x08 | |
1358 | #define FLAG_RADIX 0x04 | |
1359 | #define FLAG_HASH_PROC_TBL 0x02 | |
1360 | #define FLAG_GTSE 0x01 | |
1361 | ||
d77a98b0 SJS |
1362 | static target_ulong h_register_process_table(PowerPCCPU *cpu, |
1363 | sPAPRMachineState *spapr, | |
1364 | target_ulong opcode, | |
1365 | target_ulong *args) | |
1366 | { | |
6de83307 | 1367 | CPUState *cs; |
b4db5413 SJS |
1368 | target_ulong flags = args[0]; |
1369 | target_ulong proc_tbl = args[1]; | |
1370 | target_ulong page_size = args[2]; | |
1371 | target_ulong table_size = args[3]; | |
1372 | uint64_t cproc; | |
1373 | ||
1374 | if (flags & ~FLAGS_MASK) { /* Check no reserved bits are set */ | |
1375 | return H_PARAMETER; | |
1376 | } | |
1377 | if (flags & FLAG_MODIFY) { | |
1378 | if (flags & FLAG_REGISTER) { | |
1379 | if (flags & FLAG_RADIX) { /* Register new RADIX process table */ | |
1380 | if (proc_tbl & 0xfff || proc_tbl >> 60) { | |
1381 | return H_P2; | |
1382 | } else if (page_size) { | |
1383 | return H_P3; | |
1384 | } else if (table_size > 24) { | |
1385 | return H_P4; | |
1386 | } | |
1387 | cproc = PATBE1_GR | proc_tbl | table_size; | |
1388 | } else { /* Register new HPT process table */ | |
1389 | if (flags & FLAG_HASH_PROC_TBL) { /* Hash with Segment Tables */ | |
1390 | /* TODO - Not Supported */ | |
1391 | /* Technically caused by flag bits => H_PARAMETER */ | |
1392 | return H_PARAMETER; | |
1393 | } else { /* Hash with SLB */ | |
1394 | if (proc_tbl >> 38) { | |
1395 | return H_P2; | |
1396 | } else if (page_size & ~0x7) { | |
1397 | return H_P3; | |
1398 | } else if (table_size > 24) { | |
1399 | return H_P4; | |
1400 | } | |
1401 | } | |
1402 | cproc = (proc_tbl << 25) | page_size << 5 | table_size; | |
1403 | } | |
1404 | ||
1405 | } else { /* Deregister current process table */ | |
1406 | /* Set to benign value: (current GR) | 0. This allows | |
1407 | * deregistration in KVM to succeed even if the radix bit in flags | |
1408 | * doesn't match the radix bit in the old PATB. */ | |
1409 | cproc = spapr->patb_entry & PATBE1_GR; | |
1410 | } | |
1411 | } else { /* Maintain current registration */ | |
1412 | if (!(flags & FLAG_RADIX) != !(spapr->patb_entry & PATBE1_GR)) { | |
1413 | /* Technically caused by flag bits => H_PARAMETER */ | |
1414 | return H_PARAMETER; /* Existing Process Table Mismatch */ | |
1415 | } | |
1416 | cproc = spapr->patb_entry; | |
1417 | } | |
1418 | ||
1419 | /* Check if we need to setup OR free the hpt */ | |
1420 | spapr_check_setup_free_hpt(spapr, spapr->patb_entry, cproc); | |
1421 | ||
1422 | spapr->patb_entry = cproc; /* Save new process table */ | |
6de83307 SJS |
1423 | |
1424 | /* Update the UPRT and GTSE bits in the LPCR for all cpus */ | |
1425 | CPU_FOREACH(cs) { | |
60694bc6 | 1426 | set_spr(cs, SPR_LPCR, |
6de83307 | 1427 | ((flags & (FLAG_RADIX | FLAG_HASH_PROC_TBL)) ? LPCR_UPRT : 0) | |
60694bc6 SJS |
1428 | ((flags & FLAG_GTSE) ? LPCR_GTSE : 0), |
1429 | LPCR_UPRT | LPCR_GTSE); | |
b4db5413 SJS |
1430 | } |
1431 | ||
1432 | if (kvm_enabled()) { | |
1433 | return kvmppc_configure_v3_mmu(cpu, flags & FLAG_RADIX, | |
1434 | flags & FLAG_GTSE, cproc); | |
1435 | } | |
1436 | return H_SUCCESS; | |
d77a98b0 SJS |
1437 | } |
1438 | ||
1c7ad77e NP |
1439 | #define H_SIGNAL_SYS_RESET_ALL -1 |
1440 | #define H_SIGNAL_SYS_RESET_ALLBUTSELF -2 | |
1441 | ||
1442 | static target_ulong h_signal_sys_reset(PowerPCCPU *cpu, | |
1443 | sPAPRMachineState *spapr, | |
1444 | target_ulong opcode, target_ulong *args) | |
1445 | { | |
1446 | target_long target = args[0]; | |
1447 | CPUState *cs; | |
1448 | ||
1449 | if (target < 0) { | |
1450 | /* Broadcast */ | |
1451 | if (target < H_SIGNAL_SYS_RESET_ALLBUTSELF) { | |
1452 | return H_PARAMETER; | |
1453 | } | |
1454 | ||
1455 | CPU_FOREACH(cs) { | |
1456 | PowerPCCPU *c = POWERPC_CPU(cs); | |
1457 | ||
1458 | if (target == H_SIGNAL_SYS_RESET_ALLBUTSELF) { | |
1459 | if (c == cpu) { | |
1460 | continue; | |
1461 | } | |
1462 | } | |
1463 | run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); | |
1464 | } | |
1465 | return H_SUCCESS; | |
1466 | ||
1467 | } else { | |
1468 | /* Unicast */ | |
2e886fb3 | 1469 | cs = CPU(spapr_find_cpu(target)); |
f57467e3 SB |
1470 | if (cs) { |
1471 | run_on_cpu(cs, spapr_do_system_reset_on_cpu, RUN_ON_CPU_NULL); | |
1472 | return H_SUCCESS; | |
1c7ad77e NP |
1473 | } |
1474 | return H_PARAMETER; | |
1475 | } | |
1476 | } | |
1477 | ||
7843c0d6 | 1478 | static uint32_t cas_check_pvr(sPAPRMachineState *spapr, PowerPCCPU *cpu, |
cc7b35b1 GK |
1479 | target_ulong *addr, bool *raw_mode_supported, |
1480 | Error **errp) | |
2a6593cb | 1481 | { |
152ef803 | 1482 | bool explicit_match = false; /* Matched the CPU's real PVR */ |
7843c0d6 | 1483 | uint32_t max_compat = spapr->max_compat_pvr; |
152ef803 DG |
1484 | uint32_t best_compat = 0; |
1485 | int i; | |
3794d548 | 1486 | |
152ef803 DG |
1487 | /* |
1488 | * We scan the supplied table of PVRs looking for two things | |
1489 | * 1. Is our real CPU PVR in the list? | |
1490 | * 2. What's the "best" listed logical PVR | |
1491 | */ | |
1492 | for (i = 0; i < 512; ++i) { | |
3794d548 AK |
1493 | uint32_t pvr, pvr_mask; |
1494 | ||
80c33d34 DG |
1495 | pvr_mask = ldl_be_phys(&address_space_memory, *addr); |
1496 | pvr = ldl_be_phys(&address_space_memory, *addr + 4); | |
1497 | *addr += 8; | |
152ef803 | 1498 | |
3794d548 | 1499 | if (~pvr_mask & pvr) { |
152ef803 | 1500 | break; /* Terminator record */ |
3794d548 | 1501 | } |
152ef803 DG |
1502 | |
1503 | if ((cpu->env.spr[SPR_PVR] & pvr_mask) == (pvr & pvr_mask)) { | |
1504 | explicit_match = true; | |
1505 | } else { | |
1506 | if (ppc_check_compat(cpu, pvr, best_compat, max_compat)) { | |
1507 | best_compat = pvr; | |
1508 | } | |
1509 | } | |
1510 | } | |
1511 | ||
1512 | if ((best_compat == 0) && (!explicit_match || max_compat)) { | |
1513 | /* We couldn't find a suitable compatibility mode, and either | |
1514 | * the guest doesn't support "raw" mode for this CPU, or raw | |
1515 | * mode is disabled because a maximum compat mode is set */ | |
80c33d34 DG |
1516 | error_setg(errp, "Couldn't negotiate a suitable PVR during CAS"); |
1517 | return 0; | |
3794d548 AK |
1518 | } |
1519 | ||
cc7b35b1 GK |
1520 | *raw_mode_supported = explicit_match; |
1521 | ||
3794d548 | 1522 | /* Parsing finished */ |
152ef803 | 1523 | trace_spapr_cas_pvr(cpu->compat_pvr, explicit_match, best_compat); |
3794d548 | 1524 | |
80c33d34 DG |
1525 | return best_compat; |
1526 | } | |
3794d548 | 1527 | |
80c33d34 DG |
1528 | static target_ulong h_client_architecture_support(PowerPCCPU *cpu, |
1529 | sPAPRMachineState *spapr, | |
1530 | target_ulong opcode, | |
1531 | target_ulong *args) | |
1532 | { | |
1533 | /* Working address in data buffer */ | |
1534 | target_ulong addr = ppc64_phys_to_real(args[0]); | |
1535 | target_ulong ov_table; | |
1536 | uint32_t cas_pvr; | |
1537 | sPAPROptionVector *ov1_guest, *ov5_guest, *ov5_cas_old, *ov5_updates; | |
1538 | bool guest_radix; | |
1539 | Error *local_err = NULL; | |
cc7b35b1 | 1540 | bool raw_mode_supported = false; |
80c33d34 | 1541 | |
cc7b35b1 | 1542 | cas_pvr = cas_check_pvr(spapr, cpu, &addr, &raw_mode_supported, &local_err); |
80c33d34 DG |
1543 | if (local_err) { |
1544 | error_report_err(local_err); | |
1545 | return H_HARDWARE; | |
1546 | } | |
1547 | ||
1548 | /* Update CPUs */ | |
1549 | if (cpu->compat_pvr != cas_pvr) { | |
1550 | ppc_set_compat_all(cas_pvr, &local_err); | |
f6f242c7 | 1551 | if (local_err) { |
cc7b35b1 GK |
1552 | /* We fail to set compat mode (likely because running with KVM PR), |
1553 | * but maybe we can fallback to raw mode if the guest supports it. | |
1554 | */ | |
1555 | if (!raw_mode_supported) { | |
1556 | error_report_err(local_err); | |
1557 | return H_HARDWARE; | |
1558 | } | |
1559 | local_err = NULL; | |
3794d548 AK |
1560 | } |
1561 | } | |
1562 | ||
03d196b7 | 1563 | /* For the future use: here @ov_table points to the first option vector */ |
80c33d34 | 1564 | ov_table = addr; |
03d196b7 | 1565 | |
e957f6a9 | 1566 | ov1_guest = spapr_ovec_parse_vector(ov_table, 1); |
facdb8b6 | 1567 | ov5_guest = spapr_ovec_parse_vector(ov_table, 5); |
9fb4541f SB |
1568 | if (spapr_ovec_test(ov5_guest, OV5_MMU_BOTH)) { |
1569 | error_report("guest requested hash and radix MMU, which is invalid."); | |
1570 | exit(EXIT_FAILURE); | |
1571 | } | |
1572 | /* The radix/hash bit in byte 24 requires special handling: */ | |
1573 | guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300); | |
1574 | spapr_ovec_clear(ov5_guest, OV5_MMU_RADIX_300); | |
2a6593cb | 1575 | |
2772cf6b DG |
1576 | /* |
1577 | * HPT resizing is a bit of a special case, because when enabled | |
1578 | * we assume an HPT guest will support it until it says it | |
1579 | * doesn't, instead of assuming it won't support it until it says | |
1580 | * it does. Strictly speaking that approach could break for | |
1581 | * guests which don't make a CAS call, but those are so old we | |
1582 | * don't care about them. Without that assumption we'd have to | |
1583 | * make at least a temporary allocation of an HPT sized for max | |
1584 | * memory, which could be impossibly difficult under KVM HV if | |
1585 | * maxram is large. | |
1586 | */ | |
1587 | if (!guest_radix && !spapr_ovec_test(ov5_guest, OV5_HPT_RESIZE)) { | |
1588 | int maxshift = spapr_hpt_shift_for_ramsize(MACHINE(spapr)->maxram_size); | |
1589 | ||
1590 | if (spapr->resize_hpt == SPAPR_RESIZE_HPT_REQUIRED) { | |
1591 | error_report( | |
1592 | "h_client_architecture_support: Guest doesn't support HPT resizing, but resize-hpt=required"); | |
1593 | exit(1); | |
1594 | } | |
1595 | ||
1596 | if (spapr->htab_shift < maxshift) { | |
1597 | /* Guest doesn't know about HPT resizing, so we | |
1598 | * pre-emptively resize for the maximum permitted RAM. At | |
1599 | * the point this is called, nothing should have been | |
1600 | * entered into the existing HPT */ | |
1601 | spapr_reallocate_hpt(spapr, maxshift, &error_fatal); | |
1ec26c75 | 1602 | push_sregs_to_kvm_pr(spapr); |
2772cf6b DG |
1603 | } |
1604 | } | |
1605 | ||
facdb8b6 MR |
1606 | /* NOTE: there are actually a number of ov5 bits where input from the |
1607 | * guest is always zero, and the platform/QEMU enables them independently | |
1608 | * of guest input. To model these properly we'd want some sort of mask, | |
1609 | * but since they only currently apply to memory migration as defined | |
1610 | * by LoPAPR 1.1, 14.5.4.8, which QEMU doesn't implement, we don't need | |
6787d27b | 1611 | * to worry about this for now. |
facdb8b6 | 1612 | */ |
6787d27b | 1613 | ov5_cas_old = spapr_ovec_clone(spapr->ov5_cas); |
30bf9ed1 CLG |
1614 | |
1615 | /* also clear the radix/hash bit from the current ov5_cas bits to | |
1616 | * be in sync with the newly ov5 bits. Else the radix bit will be | |
1617 | * seen as being removed and this will generate a reset loop | |
1618 | */ | |
1619 | spapr_ovec_clear(ov5_cas_old, OV5_MMU_RADIX_300); | |
1620 | ||
6787d27b | 1621 | /* full range of negotiated ov5 capabilities */ |
facdb8b6 MR |
1622 | spapr_ovec_intersect(spapr->ov5_cas, spapr->ov5, ov5_guest); |
1623 | spapr_ovec_cleanup(ov5_guest); | |
6787d27b MR |
1624 | /* capabilities that have been added since CAS-generated guest reset. |
1625 | * if capabilities have since been removed, generate another reset | |
1626 | */ | |
1627 | ov5_updates = spapr_ovec_new(); | |
1628 | spapr->cas_reboot = spapr_ovec_diff(ov5_updates, | |
1629 | ov5_cas_old, spapr->ov5_cas); | |
9fb4541f SB |
1630 | /* Now that processing is finished, set the radix/hash bit for the |
1631 | * guest if it requested a valid mode; otherwise terminate the boot. */ | |
1632 | if (guest_radix) { | |
1633 | if (kvm_enabled() && !kvmppc_has_cap_mmu_radix()) { | |
1634 | error_report("Guest requested unavailable MMU mode (radix)."); | |
1635 | exit(EXIT_FAILURE); | |
1636 | } | |
1637 | spapr_ovec_set(spapr->ov5_cas, OV5_MMU_RADIX_300); | |
1638 | } else { | |
1639 | if (kvm_enabled() && kvmppc_has_cap_mmu_radix() | |
1640 | && !kvmppc_has_cap_mmu_hash_v3()) { | |
1641 | error_report("Guest requested unavailable MMU mode (hash)."); | |
1642 | exit(EXIT_FAILURE); | |
1643 | } | |
1644 | } | |
e957f6a9 SB |
1645 | spapr->cas_legacy_guest_workaround = !spapr_ovec_test(ov1_guest, |
1646 | OV1_PPC_3_00); | |
6787d27b | 1647 | if (!spapr->cas_reboot) { |
b472b1a7 | 1648 | /* If spapr_machine_reset() did not set up a HPT but one is necessary |
e05fba50 SB |
1649 | * (because the guest isn't going to use radix) then set it up here. */ |
1650 | if ((spapr->patb_entry & PATBE1_GR) && !guest_radix) { | |
1651 | /* legacy hash or new hash: */ | |
1652 | spapr_setup_hpt_and_vrma(spapr); | |
1653 | } | |
6787d27b | 1654 | spapr->cas_reboot = |
5b120785 | 1655 | (spapr_h_cas_compose_response(spapr, args[1], args[2], |
6787d27b MR |
1656 | ov5_updates) != 0); |
1657 | } | |
1658 | spapr_ovec_cleanup(ov5_updates); | |
03d196b7 | 1659 | |
6787d27b | 1660 | if (spapr->cas_reboot) { |
cf83f140 | 1661 | qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); |
2a6593cb AK |
1662 | } |
1663 | ||
1664 | return H_SUCCESS; | |
1665 | } | |
1666 | ||
c59704b2 SJS |
1667 | static target_ulong h_get_cpu_characteristics(PowerPCCPU *cpu, |
1668 | sPAPRMachineState *spapr, | |
1669 | target_ulong opcode, | |
1670 | target_ulong *args) | |
1671 | { | |
1672 | uint64_t characteristics = H_CPU_CHAR_HON_BRANCH_HINTS & | |
1673 | ~H_CPU_CHAR_THR_RECONF_TRIG; | |
1674 | uint64_t behaviour = H_CPU_BEHAV_FAVOUR_SECURITY; | |
1675 | uint8_t safe_cache = spapr_get_cap(spapr, SPAPR_CAP_CFPC); | |
1676 | uint8_t safe_bounds_check = spapr_get_cap(spapr, SPAPR_CAP_SBBC); | |
1677 | uint8_t safe_indirect_branch = spapr_get_cap(spapr, SPAPR_CAP_IBS); | |
1678 | ||
1679 | switch (safe_cache) { | |
1680 | case SPAPR_CAP_WORKAROUND: | |
1681 | characteristics |= H_CPU_CHAR_L1D_FLUSH_ORI30; | |
1682 | characteristics |= H_CPU_CHAR_L1D_FLUSH_TRIG2; | |
1683 | characteristics |= H_CPU_CHAR_L1D_THREAD_PRIV; | |
1684 | behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR; | |
1685 | break; | |
1686 | case SPAPR_CAP_FIXED: | |
1687 | break; | |
1688 | default: /* broken */ | |
1689 | assert(safe_cache == SPAPR_CAP_BROKEN); | |
1690 | behaviour |= H_CPU_BEHAV_L1D_FLUSH_PR; | |
1691 | break; | |
1692 | } | |
1693 | ||
1694 | switch (safe_bounds_check) { | |
1695 | case SPAPR_CAP_WORKAROUND: | |
1696 | characteristics |= H_CPU_CHAR_SPEC_BAR_ORI31; | |
1697 | behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR; | |
1698 | break; | |
1699 | case SPAPR_CAP_FIXED: | |
1700 | break; | |
1701 | default: /* broken */ | |
1702 | assert(safe_bounds_check == SPAPR_CAP_BROKEN); | |
1703 | behaviour |= H_CPU_BEHAV_BNDS_CHK_SPEC_BAR; | |
1704 | break; | |
1705 | } | |
1706 | ||
1707 | switch (safe_indirect_branch) { | |
1708 | case SPAPR_CAP_FIXED: | |
1709 | characteristics |= H_CPU_CHAR_BCCTRL_SERIALISED; | |
fa86f592 | 1710 | break; |
c59704b2 SJS |
1711 | default: /* broken */ |
1712 | assert(safe_indirect_branch == SPAPR_CAP_BROKEN); | |
1713 | break; | |
1714 | } | |
1715 | ||
1716 | args[0] = characteristics; | |
1717 | args[1] = behaviour; | |
1718 | ||
1719 | return H_SUCCESS; | |
1720 | } | |
1721 | ||
7d7ba3fe DG |
1722 | static spapr_hcall_fn papr_hypercall_table[(MAX_HCALL_OPCODE / 4) + 1]; |
1723 | static spapr_hcall_fn kvmppc_hypercall_table[KVMPPC_HCALL_MAX - KVMPPC_HCALL_BASE + 1]; | |
9fdf0c29 DG |
1724 | |
1725 | void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn) | |
1726 | { | |
39ac8455 DG |
1727 | spapr_hcall_fn *slot; |
1728 | ||
1729 | if (opcode <= MAX_HCALL_OPCODE) { | |
1730 | assert((opcode & 0x3) == 0); | |
9fdf0c29 | 1731 | |
39ac8455 DG |
1732 | slot = &papr_hypercall_table[opcode / 4]; |
1733 | } else { | |
1734 | assert((opcode >= KVMPPC_HCALL_BASE) && (opcode <= KVMPPC_HCALL_MAX)); | |
9fdf0c29 | 1735 | |
39ac8455 DG |
1736 | slot = &kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; |
1737 | } | |
9fdf0c29 | 1738 | |
c89d5299 | 1739 | assert(!(*slot)); |
39ac8455 | 1740 | *slot = fn; |
9fdf0c29 DG |
1741 | } |
1742 | ||
aa100fa4 | 1743 | target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode, |
9fdf0c29 DG |
1744 | target_ulong *args) |
1745 | { | |
28e02042 DG |
1746 | sPAPRMachineState *spapr = SPAPR_MACHINE(qdev_get_machine()); |
1747 | ||
9fdf0c29 DG |
1748 | if ((opcode <= MAX_HCALL_OPCODE) |
1749 | && ((opcode & 0x3) == 0)) { | |
39ac8455 DG |
1750 | spapr_hcall_fn fn = papr_hypercall_table[opcode / 4]; |
1751 | ||
1752 | if (fn) { | |
b13ce26d | 1753 | return fn(cpu, spapr, opcode, args); |
39ac8455 DG |
1754 | } |
1755 | } else if ((opcode >= KVMPPC_HCALL_BASE) && | |
1756 | (opcode <= KVMPPC_HCALL_MAX)) { | |
1757 | spapr_hcall_fn fn = kvmppc_hypercall_table[opcode - KVMPPC_HCALL_BASE]; | |
9fdf0c29 DG |
1758 | |
1759 | if (fn) { | |
b13ce26d | 1760 | return fn(cpu, spapr, opcode, args); |
9fdf0c29 DG |
1761 | } |
1762 | } | |
1763 | ||
aaf87c66 TH |
1764 | qemu_log_mask(LOG_UNIMP, "Unimplemented SPAPR hcall 0x" TARGET_FMT_lx "\n", |
1765 | opcode); | |
9fdf0c29 DG |
1766 | return H_FUNCTION; |
1767 | } | |
f43e3525 | 1768 | |
83f7d43a | 1769 | static void hypercall_register_types(void) |
f43e3525 DG |
1770 | { |
1771 | /* hcall-pft */ | |
1772 | spapr_register_hypercall(H_ENTER, h_enter); | |
1773 | spapr_register_hypercall(H_REMOVE, h_remove); | |
1774 | spapr_register_hypercall(H_PROTECT, h_protect); | |
6bbd5dde | 1775 | spapr_register_hypercall(H_READ, h_read); |
39ac8455 | 1776 | |
a3d0abae DG |
1777 | /* hcall-bulk */ |
1778 | spapr_register_hypercall(H_BULK_REMOVE, h_bulk_remove); | |
1779 | ||
30f4b05b DG |
1780 | /* hcall-hpt-resize */ |
1781 | spapr_register_hypercall(H_RESIZE_HPT_PREPARE, h_resize_hpt_prepare); | |
1782 | spapr_register_hypercall(H_RESIZE_HPT_COMMIT, h_resize_hpt_commit); | |
1783 | ||
ed120055 DG |
1784 | /* hcall-splpar */ |
1785 | spapr_register_hypercall(H_REGISTER_VPA, h_register_vpa); | |
1786 | spapr_register_hypercall(H_CEDE, h_cede); | |
1c7ad77e | 1787 | spapr_register_hypercall(H_SIGNAL_SYS_RESET, h_signal_sys_reset); |
ed120055 | 1788 | |
423576f7 TH |
1789 | /* processor register resource access h-calls */ |
1790 | spapr_register_hypercall(H_SET_SPRG0, h_set_sprg0); | |
af08a58f | 1791 | spapr_register_hypercall(H_SET_DABR, h_set_dabr); |
e49ff266 | 1792 | spapr_register_hypercall(H_SET_XDABR, h_set_xdabr); |
3240dd9a | 1793 | spapr_register_hypercall(H_PAGE_INIT, h_page_init); |
423576f7 TH |
1794 | spapr_register_hypercall(H_SET_MODE, h_set_mode); |
1795 | ||
d77a98b0 SJS |
1796 | /* In Memory Table MMU h-calls */ |
1797 | spapr_register_hypercall(H_CLEAN_SLB, h_clean_slb); | |
1798 | spapr_register_hypercall(H_INVALIDATE_PID, h_invalidate_pid); | |
1799 | spapr_register_hypercall(H_REGISTER_PROC_TBL, h_register_process_table); | |
1800 | ||
c59704b2 SJS |
1801 | /* hcall-get-cpu-characteristics */ |
1802 | spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS, | |
1803 | h_get_cpu_characteristics); | |
1804 | ||
827200a2 DG |
1805 | /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate |
1806 | * here between the "CI" and the "CACHE" variants, they will use whatever | |
1807 | * mapping attributes qemu is using. When using KVM, the kernel will | |
1808 | * enforce the attributes more strongly | |
1809 | */ | |
1810 | spapr_register_hypercall(H_LOGICAL_CI_LOAD, h_logical_load); | |
1811 | spapr_register_hypercall(H_LOGICAL_CI_STORE, h_logical_store); | |
1812 | spapr_register_hypercall(H_LOGICAL_CACHE_LOAD, h_logical_load); | |
1813 | spapr_register_hypercall(H_LOGICAL_CACHE_STORE, h_logical_store); | |
1814 | spapr_register_hypercall(H_LOGICAL_ICBI, h_logical_icbi); | |
1815 | spapr_register_hypercall(H_LOGICAL_DCBF, h_logical_dcbf); | |
c73e3771 | 1816 | spapr_register_hypercall(KVMPPC_H_LOGICAL_MEMOP, h_logical_memop); |
827200a2 | 1817 | |
39ac8455 DG |
1818 | /* qemu/KVM-PPC specific hcalls */ |
1819 | spapr_register_hypercall(KVMPPC_H_RTAS, h_rtas); | |
42561bf2 | 1820 | |
2a6593cb AK |
1821 | /* ibm,client-architecture-support support */ |
1822 | spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support); | |
f43e3525 | 1823 | } |
83f7d43a AF |
1824 | |
1825 | type_init(hypercall_register_types) |