]> Git Repo - J-linux.git/commitdiff
Merge branch 'kvm-arm64/hyp-reloc' into kvmarm-master/next
authorMarc Zyngier <[email protected]>
Fri, 12 Feb 2021 14:08:18 +0000 (14:08 +0000)
committerMarc Zyngier <[email protected]>
Fri, 12 Feb 2021 14:08:18 +0000 (14:08 +0000)
Signed-off-by: Marc Zyngier <[email protected]>
1  2 
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/Makefile
arch/arm64/kvm/hyp/nvhe/hyp-init.S
arch/arm64/kvm/hyp/nvhe/psci-relay.c

diff --combined arch/arm64/kvm/arm.c
index fe60d25c000e4f7148fd36e762932f4e923745f4,de1af4052780dc988b0c7f4da2ebec34c44319bd..bb85da1d58803c0caed00f0efa06c0d21f58dcae
@@@ -1396,9 -1396,8 +1396,9 @@@ static void cpu_init_hyp_mode(void
         * Calculate the raw per-cpu offset without a translation from the
         * kernel's mapping to the linear mapping, and store it in tpidr_el2
         * so that we can use adr_l to access per-cpu variables in EL2.
 +       * Also drop the KASAN tag which gets in the way...
         */
 -      params->tpidr_el2 = (unsigned long)this_cpu_ptr_nvhe_sym(__per_cpu_start) -
 +      params->tpidr_el2 = (unsigned long)kasan_reset_tag(this_cpu_ptr_nvhe_sym(__per_cpu_start)) -
                            (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start));
  
        params->mair_el2 = read_sysreg(mair_el1);
@@@ -1750,11 -1749,10 +1750,10 @@@ static int init_hyp_mode(void
                goto out_err;
        }
  
-       err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_ro_after_init_start),
-                                 kvm_ksym_ref(__hyp_data_ro_after_init_end),
-                                 PAGE_HYP_RO);
+       err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
+                                 kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
        if (err) {
-               kvm_err("Cannot map .hyp.data..ro_after_init section\n");
+               kvm_err("Cannot map .hyp.rodata section\n");
                goto out_err;
        }
  
index c9c121c8d5deb81a68a8cf8e36672659f8b6fc09,ed10fcf1b3458aa80bc5cdb30699911baab9320d..a6707df4f6c0fc017c796e770d9ff5f05ded1f54
@@@ -3,9 -3,12 +3,12 @@@
  # Makefile for Kernel-based Virtual Machine module, HYP/nVHE part
  #
  
 -asflags-y := -D__KVM_NVHE_HYPERVISOR__
 -ccflags-y := -D__KVM_NVHE_HYPERVISOR__
 +asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
 +ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
  
+ hostprogs := gen-hyprel
+ HOST_EXTRACFLAGS += -I$(objtree)/include
  obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
         hyp-main.o hyp-smp.o psci-relay.o
  obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
@@@ -19,7 -22,7 +22,7 @@@
  
  hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y))
  obj-y := kvm_nvhe.o
- extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds
+ extra-y := $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o
  
  # 1) Compile all source files to `.nvhe.o` object files. The file extension
  #    avoids file name clashes for files shared with VHE.
@@@ -42,11 -45,31 +45,31 @@@ LDFLAGS_kvm_nvhe.tmp.o := -r -
  $(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
        $(call if_changed,ld)
  
- # 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
+ # 4) Generate list of hyp code/data positions that need to be relocated at
+ #    runtime. Because the hypervisor is part of the kernel binary, relocations
+ #    produce a kernel VA. We enumerate relocations targeting hyp at build time
+ #    and convert the kernel VAs at those positions to hyp VAs.
+ $(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
+       $(call if_changed,hyprel)
+ # 5) Compile hyp-reloc.S and link it into the existing partially linked object.
+ #    The object file now contains a section with pointers to hyp positions that
+ #    will contain kernel VAs at runtime. These pointers have relocations on them
+ #    so that they get updated as the hyp object is linked into `vmlinux`.
+ LDFLAGS_kvm_nvhe.rel.o := -r
+ $(obj)/kvm_nvhe.rel.o: $(obj)/kvm_nvhe.tmp.o $(obj)/hyp-reloc.o FORCE
+       $(call if_changed,ld)
+ # 6) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
  #    Prefixes names of ELF symbols with '__kvm_nvhe_'.
- $(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE
+ $(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.rel.o FORCE
        $(call if_changed,hypcopy)
  
+ # The HYPREL command calls `gen-hyprel` to generate an assembly file with
+ # a list of relocations targeting hyp code/data.
+ quiet_cmd_hyprel = HYPREL  $@
+       cmd_hyprel = $(obj)/gen-hyprel $< > $@
  # The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
  # to avoid clashes with VHE code/data.
  quiet_cmd_hypcopy = HYPCOPY $@
index b3915ccb23b0f46839a16126def9abf785457830,99b408fe09eed89237f41f0151aa537d5279b38f..3dbc0c649d363b080cccbf69d6e1c4a7d82bc330
@@@ -18,7 -18,7 +18,7 @@@
  #include <asm/virt.h>
  
        .text
-       .pushsection    .hyp.idmap.text, "ax"
+       .pushsection    .idmap.text, "ax"
  
        .align  11
  
@@@ -55,10 -55,17 +55,10 @@@ __do_hyp_init
        cmp     x0, #HVC_STUB_HCALL_NR
        b.lo    __kvm_handle_stub_hvc
  
 -      // We only actively check bits [24:31], and everything
 -      // else has to be zero, which we check at build time.
 -#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
 -#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
 -#endif
 -
 -      ror     x0, x0, #24
 -      eor     x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
 -      ror     x0, x0, #4
 -      eor     x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
 -      cbz     x0, 1f
 +      mov     x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
 +      cmp     x0, x3
 +      b.eq    1f
 +
        mov     x0, #SMCCC_RET_NOT_SUPPORTED
        eret
  
@@@ -132,7 -139,6 +132,6 @@@ alternative_else_nop_endi
  
        /* Set the host vector */
        ldr     x0, =__kvm_hyp_host_vector
-       kimg_hyp_va x0, x1
        msr     vbar_el2, x0
  
        ret
@@@ -191,7 -197,6 +190,6 @@@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu
        /* Leave idmap. */
        mov     x0, x29
        ldr     x1, =kvm_host_psci_cpu_entry
-       kimg_hyp_va x1, x2
        br      x1
  SYM_CODE_END(__kvm_hyp_init_cpu)
  
index 8e7128cb76678f8697abc0bfe4a9e7c57a899afd,f254a425cb3aa3ed743d47e61dc4aa3d8006c11c..63de71c0481e7a04ef91b52b375563d8e8c4015a
@@@ -77,6 -77,12 +77,6 @@@ static unsigned long psci_forward(struc
                         cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
  }
  
 -static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
 -{
 -      psci_forward(host_ctxt);
 -      hyp_panic(); /* unreachable */
 -}
 -
  static unsigned int find_cpu_id(u64 mpidr)
  {
        unsigned int i;
@@@ -128,8 -134,8 +128,8 @@@ static int psci_cpu_on(u64 func_id, str
        if (cpu_id == INVALID_CPU_ID)
                return PSCI_RET_INVALID_PARAMS;
  
-       boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id);
-       init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id);
+       boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
+       init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
  
        /* Check if the target CPU is already being booted. */
        if (!try_acquire_boot_args(boot_args))
        wmb();
  
        ret = psci_call(func_id, mpidr,
-                       __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)),
+                       __hyp_pa(&kvm_hyp_cpu_entry),
                        __hyp_pa(init_params));
  
        /* If successful, the lock will be released by the target CPU. */
@@@ -159,8 -165,8 +159,8 @@@ static int psci_cpu_suspend(u64 func_id
        struct psci_boot_args *boot_args;
        struct kvm_nvhe_init_params *init_params;
  
-       boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
-       init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
+       boot_args = this_cpu_ptr(&suspend_args);
+       init_params = this_cpu_ptr(&kvm_init_params);
  
        /*
         * No need to acquire a lock before writing to boot_args because a core
         * point if it is a deep sleep state.
         */
        return psci_call(func_id, power_state,
-                        __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
+                        __hyp_pa(&kvm_hyp_cpu_resume),
                         __hyp_pa(init_params));
  }
  
@@@ -186,8 -192,8 +186,8 @@@ static int psci_system_suspend(u64 func
        struct psci_boot_args *boot_args;
        struct kvm_nvhe_init_params *init_params;
  
-       boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
-       init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
+       boot_args = this_cpu_ptr(&suspend_args);
+       init_params = this_cpu_ptr(&kvm_init_params);
  
        /*
         * No need to acquire a lock before writing to boot_args because a core
  
        /* Will only return on error. */
        return psci_call(func_id,
-                        __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
+                        __hyp_pa(&kvm_hyp_cpu_resume),
                         __hyp_pa(init_params), 0);
  }
  
@@@ -207,12 -213,12 +207,12 @@@ asmlinkage void __noreturn kvm_host_psc
        struct psci_boot_args *boot_args;
        struct kvm_cpu_context *host_ctxt;
  
-       host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt;
+       host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
  
        if (is_cpu_on)
-               boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args));
+               boot_args = this_cpu_ptr(&cpu_on_args);
        else
-               boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
+               boot_args = this_cpu_ptr(&suspend_args);
  
        cpu_reg(host_ctxt, 0) = boot_args->r0;
        write_sysreg_el2(boot_args->pc, SYS_ELR);
@@@ -245,13 -251,10 +245,13 @@@ static unsigned long psci_0_2_handler(u
        case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
        case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
                return psci_forward(host_ctxt);
 +      /*
 +       * SYSTEM_OFF/RESET should not return according to the spec.
 +       * Allow it so as to stay robust to broken firmware.
 +       */
        case PSCI_0_2_FN_SYSTEM_OFF:
        case PSCI_0_2_FN_SYSTEM_RESET:
 -              psci_forward_noreturn(host_ctxt);
 -              unreachable();
 +              return psci_forward(host_ctxt);
        case PSCI_0_2_FN64_CPU_SUSPEND:
                return psci_cpu_suspend(func_id, host_ctxt);
        case PSCI_0_2_FN64_CPU_ON:
This page took 0.07026 seconds and 4 git commands to generate.