* Calculate the raw per-cpu offset without a translation from the
* kernel's mapping to the linear mapping, and store it in tpidr_el2
* so that we can use adr_l to access per-cpu variables in EL2.
+ * Also drop the KASAN tag which gets in the way...
*/
- params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
+ params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
(unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
params->mair_el2 = read_sysreg(mair_el1);
goto out_err;
}
- err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_ro_after_init_start),
- kvm_ksym_ref(__hyp_data_ro_after_init_end),
- PAGE_HYP_RO);
+ err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
+ kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
if (err) {
- kvm_err("Cannot map .hyp.data..ro_after_init section\n");
+ kvm_err("Cannot map .hyp.rodata section\n");
goto out_err;
}
# Makefile for Kernel-based Virtual Machine module, HYP/nVHE part
#
-asflags-y := -D__KVM_NVHE_HYPERVISOR__
-ccflags-y := -D__KVM_NVHE_HYPERVISOR__
+asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
+ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
+ hostprogs := gen-hyprel
+ HOST_EXTRACFLAGS += -I$(objtree)/include
+
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o
obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y))
obj-y := kvm_nvhe.o
- extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds
+ extra-y := $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o
# 1) Compile all source files to `.nvhe.o` object files. The file extension
# avoids file name clashes for files shared with VHE.
$(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
$(call if_changed,ld)
- # 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
+ # 4) Generate list of hyp code/data positions that need to be relocated at
+ # runtime. Because the hypervisor is part of the kernel binary, relocations
+ # produce a kernel VA. We enumerate relocations targeting hyp at build time
+ # and convert the kernel VAs at those positions to hyp VAs.
+ $(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
+ $(call if_changed,hyprel)
+
+ # 5) Compile hyp-reloc.S and link it into the existing partially linked object.
+ # The object file now contains a section with pointers to hyp positions that
+ # will contain kernel VAs at runtime. These pointers have relocations on them
+ # so that they get updated as the hyp object is linked into `vmlinux`.
+ LDFLAGS_kvm_nvhe.rel.o := -r
+ $(obj)/kvm_nvhe.rel.o: $(obj)/kvm_nvhe.tmp.o $(obj)/hyp-reloc.o FORCE
+ $(call if_changed,ld)
+
+ # 6) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
# Prefixes names of ELF symbols with '__kvm_nvhe_'.
- $(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE
+ $(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.rel.o FORCE
$(call if_changed,hypcopy)
+ # The HYPREL command calls `gen-hyprel` to generate an assembly file with
+ # a list of relocations targeting hyp code/data.
+ quiet_cmd_hyprel = HYPREL $@
+ cmd_hyprel = $(obj)/gen-hyprel $< > $@
+
# The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
# to avoid clashes with VHE code/data.
quiet_cmd_hypcopy = HYPCOPY $@
#include <asm/virt.h>
.text
- .pushsection .hyp.idmap.text, "ax"
+ .pushsection .idmap.text, "ax"
.align 11
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
- // We only actively check bits [24:31], and everything
- // else has to be zero, which we check at build time.
-#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
-#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
-#endif
-
- ror x0, x0, #24
- eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
- ror x0, x0, #4
- eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
- cbz x0, 1f
+ mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
+ cmp x0, x3
+ b.eq 1f
+
mov x0, #SMCCC_RET_NOT_SUPPORTED
eret
/* Set the host vector */
ldr x0, =__kvm_hyp_host_vector
- kimg_hyp_va x0, x1
msr vbar_el2, x0
ret
/* Leave idmap. */
mov x0, x29
ldr x1, =kvm_host_psci_cpu_entry
- kimg_hyp_va x1, x2
br x1
SYM_CODE_END(__kvm_hyp_init_cpu)
cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
}
-static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
-{
- psci_forward(host_ctxt);
- hyp_panic(); /* unreachable */
-}
-
static unsigned int find_cpu_id(u64 mpidr)
{
unsigned int i;
if (cpu_id == INVALID_CPU_ID)
return PSCI_RET_INVALID_PARAMS;
- boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id);
- init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id);
+ boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
+ init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
/* Check if the target CPU is already being booted. */
if (!try_acquire_boot_args(boot_args))
wmb();
ret = psci_call(func_id, mpidr,
- __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)),
+ __hyp_pa(&kvm_hyp_cpu_entry),
__hyp_pa(init_params));
/* If successful, the lock will be released by the target CPU. */
struct psci_boot_args *boot_args;
struct kvm_nvhe_init_params *init_params;
- boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
- init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
+ boot_args = this_cpu_ptr(&suspend_args);
+ init_params = this_cpu_ptr(&kvm_init_params);
/*
* No need to acquire a lock before writing to boot_args because a core
* point if it is a deep sleep state.
*/
return psci_call(func_id, power_state,
- __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
+ __hyp_pa(&kvm_hyp_cpu_resume),
__hyp_pa(init_params));
}
struct psci_boot_args *boot_args;
struct kvm_nvhe_init_params *init_params;
- boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
- init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
+ boot_args = this_cpu_ptr(&suspend_args);
+ init_params = this_cpu_ptr(&kvm_init_params);
/*
* No need to acquire a lock before writing to boot_args because a core
/* Will only return on error. */
return psci_call(func_id,
- __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
+ __hyp_pa(&kvm_hyp_cpu_resume),
__hyp_pa(init_params), 0);
}
struct psci_boot_args *boot_args;
struct kvm_cpu_context *host_ctxt;
- host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt;
+ host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
if (is_cpu_on)
- boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args));
+ boot_args = this_cpu_ptr(&cpu_on_args);
else
- boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
+ boot_args = this_cpu_ptr(&suspend_args);
cpu_reg(host_ctxt, 0) = boot_args->r0;
write_sysreg_el2(boot_args->pc, SYS_ELR);
case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
return psci_forward(host_ctxt);
+ /*
+ * SYSTEM_OFF/RESET should not return according to the spec.
+ * Allow it so as to stay robust to broken firmware.
+ */
case PSCI_0_2_FN_SYSTEM_OFF:
case PSCI_0_2_FN_SYSTEM_RESET:
- psci_forward_noreturn(host_ctxt);
- unreachable();
+ return psci_forward(host_ctxt);
case PSCI_0_2_FN64_CPU_SUSPEND:
return psci_cpu_suspend(func_id, host_ctxt);
case PSCI_0_2_FN64_CPU_ON: