]> Git Repo - J-linux.git/commitdiff
Merge branch 'kvm-guest-sev-migration' into kvm-master
authorPaolo Bonzini <[email protected]>
Thu, 11 Nov 2021 12:40:26 +0000 (07:40 -0500)
committerPaolo Bonzini <[email protected]>
Thu, 11 Nov 2021 12:40:26 +0000 (07:40 -0500)
Add guest api and guest kernel support for SEV live migration.

Introduces a new hypercall to notify the host of changes to the page
encryption status.  If the page is encrypted then it must be migrated
through the SEV firmware or a helper VM sharing the key.  If page is
not encrypted then it can be migrated normally by userspace.  This new
hypercall is invoked using paravirt_ops.

Conflicts: sev_active() replaced by cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT).

1  2 
arch/x86/include/asm/mem_encrypt.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/kernel/kvm.c
arch/x86/kernel/paravirt.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/pat/set_memory.c

index 2d4f5c17d79cc563cd134cdd403a8a811ef38b83,8dd373cc8b661ae4c446056211276f45c69a3d1f..e2c6f433ed100b0b131b8cb8008fa659fb9b97d4
@@@ -13,7 -13,6 +13,7 @@@
  #ifndef __ASSEMBLY__
  
  #include <linux/init.h>
 +#include <linux/cc_platform.h>
  
  #include <asm/bootparam.h>
  
@@@ -44,6 -43,8 +44,8 @@@ void __init sme_enable(struct boot_para
  
  int __init early_set_memory_decrypted(unsigned long vaddr, unsigned long size);
  int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
+ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages,
+                                           bool enc);
  
  void __init mem_encrypt_free_decrypted_mem(void);
  
@@@ -51,6 -52,9 +53,6 @@@
  void __init mem_encrypt_init(void);
  
  void __init sev_es_init_vc_handling(void);
 -bool sme_active(void);
 -bool sev_active(void);
 -bool sev_es_active(void);
  
  #define __bss_decrypted __section(".bss..decrypted")
  
@@@ -73,11 -77,16 +75,13 @@@ static inline void __init sme_encrypt_k
  static inline void __init sme_enable(struct boot_params *bp) { }
  
  static inline void sev_es_init_vc_handling(void) { }
 -static inline bool sme_active(void) { return false; }
 -static inline bool sev_active(void) { return false; }
 -static inline bool sev_es_active(void) { return false; }
  
  static inline int __init
  early_set_memory_decrypted(unsigned long vaddr, unsigned long size) { return 0; }
  static inline int __init
  early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; }
+ static inline void __init
+ early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc) {}
  
  static inline void mem_encrypt_free_decrypted_mem(void) { }
  
  
  extern char __start_bss_decrypted[], __end_bss_decrypted[], __start_bss_decrypted_unused[];
  
 -static inline bool mem_encrypt_active(void)
 -{
 -      return sme_me_mask;
 -}
 -
  static inline u64 sme_get_me_mask(void)
  {
        return sme_me_mask;
index cebec95a7124fdc245838e9081492533cb7e483e,540bf8cb37dbad8b314130c24d0eca2672985467..21c4a694ca114ecc66862b1efc52e3c6694c9f61
@@@ -52,11 -52,11 +52,11 @@@ void __init paravirt_set_cap(void)
  /* The paravirtualized I/O functions */
  static inline void slow_down_io(void)
  {
 -      pv_ops.cpu.io_delay();
 +      PVOP_VCALL0(cpu.io_delay);
  #ifdef REALLY_SLOW_IO
 -      pv_ops.cpu.io_delay();
 -      pv_ops.cpu.io_delay();
 -      pv_ops.cpu.io_delay();
 +      PVOP_VCALL0(cpu.io_delay);
 +      PVOP_VCALL0(cpu.io_delay);
 +      PVOP_VCALL0(cpu.io_delay);
  #endif
  }
  
@@@ -97,6 -97,12 +97,12 @@@ static inline void paravirt_arch_exit_m
        PVOP_VCALL1(mmu.exit_mmap, mm);
  }
  
+ static inline void notify_page_enc_status_changed(unsigned long pfn,
+                                                 int npages, bool enc)
+ {
+       PVOP_VCALL3(mmu.notify_page_enc_status_changed, pfn, npages, enc);
+ }
  #ifdef CONFIG_PARAVIRT_XXL
  static inline void load_sp0(unsigned long sp0)
  {
@@@ -113,12 -119,12 +119,12 @@@ static inline void __cpuid(unsigned in
  /*
   * These special macros can be used to get or set a debugging register
   */
 -static inline unsigned long paravirt_get_debugreg(int reg)
 +static __always_inline unsigned long paravirt_get_debugreg(int reg)
  {
        return PVOP_CALL1(unsigned long, cpu.get_debugreg, reg);
  }
  #define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
 -static inline void set_debugreg(unsigned long val, int reg)
 +static __always_inline void set_debugreg(unsigned long val, int reg)
  {
        PVOP_VCALL2(cpu.set_debugreg, reg, val);
  }
@@@ -133,14 -139,14 +139,14 @@@ static inline void write_cr0(unsigned l
        PVOP_VCALL1(cpu.write_cr0, x);
  }
  
 -static inline unsigned long read_cr2(void)
 +static __always_inline unsigned long read_cr2(void)
  {
        return PVOP_ALT_CALLEE0(unsigned long, mmu.read_cr2,
                                "mov %%cr2, %%rax;",
                                ALT_NOT(X86_FEATURE_XENPV));
  }
  
 -static inline void write_cr2(unsigned long x)
 +static __always_inline void write_cr2(unsigned long x)
  {
        PVOP_VCALL1(mmu.write_cr2, x);
  }
@@@ -653,10 -659,10 +659,10 @@@ bool __raw_callee_save___native_vcpu_is
   * functions.
   */
  #define PV_THUNK_NAME(func) "__raw_callee_save_" #func
 -#define PV_CALLEE_SAVE_REGS_THUNK(func)                                       \
 +#define __PV_CALLEE_SAVE_REGS_THUNK(func, section)                    \
        extern typeof(func) __raw_callee_save_##func;                   \
                                                                        \
 -      asm(".pushsection .text;"                                       \
 +      asm(".pushsection " section ", \"ax\";"                         \
            ".globl " PV_THUNK_NAME(func) ";"                           \
            ".type " PV_THUNK_NAME(func) ", @function;"                 \
            PV_THUNK_NAME(func) ":"                                     \
            ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
            ".popsection")
  
 +#define PV_CALLEE_SAVE_REGS_THUNK(func)                       \
 +      __PV_CALLEE_SAVE_REGS_THUNK(func, ".text")
 +
  /* Get a reference to a callee-save function */
  #define PV_CALLEE_SAVE(func)                                          \
        ((struct paravirt_callee_save) { __raw_callee_save_##func })
        ((struct paravirt_callee_save) { func })
  
  #ifdef CONFIG_PARAVIRT_XXL
 -static inline notrace unsigned long arch_local_save_flags(void)
 +static __always_inline unsigned long arch_local_save_flags(void)
  {
        return PVOP_ALT_CALLEE0(unsigned long, irq.save_fl, "pushf; pop %%rax;",
                                ALT_NOT(X86_FEATURE_XENPV));
  }
  
 -static inline notrace void arch_local_irq_disable(void)
 +static __always_inline void arch_local_irq_disable(void)
  {
        PVOP_ALT_VCALLEE0(irq.irq_disable, "cli;", ALT_NOT(X86_FEATURE_XENPV));
  }
  
 -static inline notrace void arch_local_irq_enable(void)
 +static __always_inline void arch_local_irq_enable(void)
  {
        PVOP_ALT_VCALLEE0(irq.irq_enable, "sti;", ALT_NOT(X86_FEATURE_XENPV));
  }
  
 -static inline notrace unsigned long arch_local_irq_save(void)
 +static __always_inline unsigned long arch_local_irq_save(void)
  {
        unsigned long f;
  
index fc1151e77569c0a9374870ddee5b3bd3cbfc95e1,66419982023920fa2b050480f09984dd910956b8..a69012e1903f1d6edab3cc56782fc0382955b64e
@@@ -168,6 -168,7 +168,7 @@@ struct pv_mmu_ops 
  
        /* Hook for intercepting the destruction of an mm_struct. */
        void (*exit_mmap)(struct mm_struct *mm);
+       void (*notify_page_enc_status_changed)(unsigned long pfn, int npages, bool enc);
  
  #ifdef CONFIG_PARAVIRT_XXL
        struct paravirt_callee_save read_cr2;
@@@ -577,9 -578,7 +578,9 @@@ void paravirt_leave_lazy_mmu(void)
  void paravirt_flush_lazy_mmu(void);
  
  void _paravirt_nop(void);
 +void paravirt_BUG(void);
  u64 _paravirt_ident_64(u64);
 +unsigned long paravirt_ret0(void);
  
  #define paravirt_nop  ((void *)_paravirt_nop)
  
diff --combined arch/x86/kernel/kvm.c
index 8863d1941f1bedff318aa234561701d3ef5c1031,3910419fae6cbcaed87e6fd2444693c51757f9a2..41e2965f64991e128bada0a4ba22a9ed75cde2ed
@@@ -27,7 -27,7 +27,8 @@@
  #include <linux/nmi.h>
  #include <linux/swait.h>
  #include <linux/syscore_ops.h>
 +#include <linux/cc_platform.h>
+ #include <linux/efi.h>
  #include <asm/timer.h>
  #include <asm/cpu.h>
  #include <asm/traps.h>
@@@ -41,6 -41,7 +42,7 @@@
  #include <asm/ptrace.h>
  #include <asm/reboot.h>
  #include <asm/svm.h>
+ #include <asm/e820/api.h>
  
  DEFINE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
  
@@@ -419,7 -420,7 +421,7 @@@ static void __init sev_map_percpu_data(
  {
        int cpu;
  
 -      if (!sev_active())
 +      if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                return;
  
        for_each_possible_cpu(cpu) {
@@@ -434,6 -435,8 +436,8 @@@ static void kvm_guest_cpu_offline(bool 
        kvm_disable_steal_time();
        if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
                wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+       if (kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
+               wrmsrl(MSR_KVM_MIGRATION_CONTROL, 0);
        kvm_pv_disable_apf();
        if (!shutdown)
                apf_task_wake_all();
@@@ -548,6 -551,55 +552,55 @@@ static void kvm_send_ipi_mask_allbutsel
        __send_ipi_mask(local_mask, vector);
  }
  
 -      if (!sev_active() ||
+ static int __init setup_efi_kvm_sev_migration(void)
+ {
+       efi_char16_t efi_sev_live_migration_enabled[] = L"SevLiveMigrationEnabled";
+       efi_guid_t efi_variable_guid = AMD_SEV_MEM_ENCRYPT_GUID;
+       efi_status_t status;
+       unsigned long size;
+       bool enabled;
++      if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) ||
+           !kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL))
+               return 0;
+       if (!efi_enabled(EFI_BOOT))
+               return 0;
+       if (!efi_enabled(EFI_RUNTIME_SERVICES)) {
+               pr_info("%s : EFI runtime services are not enabled\n", __func__);
+               return 0;
+       }
+       size = sizeof(enabled);
+       /* Get variable contents into buffer */
+       status = efi.get_variable(efi_sev_live_migration_enabled,
+                                 &efi_variable_guid, NULL, &size, &enabled);
+       if (status == EFI_NOT_FOUND) {
+               pr_info("%s : EFI live migration variable not found\n", __func__);
+               return 0;
+       }
+       if (status != EFI_SUCCESS) {
+               pr_info("%s : EFI variable retrieval failed\n", __func__);
+               return 0;
+       }
+       if (enabled == 0) {
+               pr_info("%s: live migration disabled in EFI\n", __func__);
+               return 0;
+       }
+       pr_info("%s : live migration enabled in EFI\n", __func__);
+       wrmsrl(MSR_KVM_MIGRATION_CONTROL, KVM_MIGRATION_READY);
+       return 1;
+ }
+ late_initcall(setup_efi_kvm_sev_migration);
  /*
   * Set the IPI entry points
   */
@@@ -806,8 -858,62 +859,62 @@@ static bool __init kvm_msi_ext_dest_id(
        return kvm_para_has_feature(KVM_FEATURE_MSI_EXT_DEST_ID);
  }
  
+ static void kvm_sev_hc_page_enc_status(unsigned long pfn, int npages, bool enc)
+ {
+       kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, pfn << PAGE_SHIFT, npages,
+                          KVM_MAP_GPA_RANGE_ENC_STAT(enc) | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
+ }
  static void __init kvm_init_platform(void)
  {
 -      if (sev_active() &&
++      if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
+           kvm_para_has_feature(KVM_FEATURE_MIGRATION_CONTROL)) {
+               unsigned long nr_pages;
+               int i;
+               pv_ops.mmu.notify_page_enc_status_changed =
+                       kvm_sev_hc_page_enc_status;
+               /*
+                * Reset the host's shared pages list related to kernel
+                * specific page encryption status settings before we load a
+                * new kernel by kexec. Reset the page encryption status
+                * during early boot intead of just before kexec to avoid SMP
+                * races during kvm_pv_guest_cpu_reboot().
+                * NOTE: We cannot reset the complete shared pages list
+                * here as we need to retain the UEFI/OVMF firmware
+                * specific settings.
+                */
+               for (i = 0; i < e820_table->nr_entries; i++) {
+                       struct e820_entry *entry = &e820_table->entries[i];
+                       if (entry->type != E820_TYPE_RAM)
+                               continue;
+                       nr_pages = DIV_ROUND_UP(entry->size, PAGE_SIZE);
+                       kvm_sev_hypercall3(KVM_HC_MAP_GPA_RANGE, entry->addr,
+                                      nr_pages,
+                                      KVM_MAP_GPA_RANGE_ENCRYPTED | KVM_MAP_GPA_RANGE_PAGE_SZ_4K);
+               }
+               /*
+                * Ensure that _bss_decrypted section is marked as decrypted in the
+                * shared pages list.
+                */
+               nr_pages = DIV_ROUND_UP(__end_bss_decrypted - __start_bss_decrypted,
+                                       PAGE_SIZE);
+               early_set_mem_enc_dec_hypercall((unsigned long)__start_bss_decrypted,
+                                               nr_pages, 0);
+               /*
+                * If not booted using EFI, enable Live migration support.
+                */
+               if (!efi_enabled(EFI_BOOT))
+                       wrmsrl(MSR_KVM_MIGRATION_CONTROL,
+                              KVM_MIGRATION_READY);
+       }
        kvmclock_init();
        x86_platform.apic_post_init = kvm_apic_init;
  }
index 7157c2df3bc2aba06d3a6e11210f678bd9138861,1cc20ac9a54ffe8b350873f5cfb13636388ec667..7f7636aac62091bdeaa38a0b0fa3ce2f0f7d43f5
@@@ -46,17 -46,6 +46,17 @@@ asm (".pushsection .entry.text, \"ax\"\
       ".type _paravirt_nop, @function\n\t"
       ".popsection");
  
 +/* stub always returning 0. */
 +asm (".pushsection .entry.text, \"ax\"\n"
 +     ".global paravirt_ret0\n"
 +     "paravirt_ret0:\n\t"
 +     "xor %" _ASM_AX ", %" _ASM_AX ";\n\t"
 +     "ret\n\t"
 +     ".size paravirt_ret0, . - paravirt_ret0\n\t"
 +     ".type paravirt_ret0, @function\n\t"
 +     ".popsection");
 +
 +
  void __init default_banner(void)
  {
        printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
@@@ -64,7 -53,7 +64,7 @@@
  }
  
  /* Undefined instruction for dealing with missing ops pointers. */
 -static void paravirt_BUG(void)
 +noinstr void paravirt_BUG(void)
  {
        BUG();
  }
@@@ -229,36 -218,6 +229,36 @@@ void paravirt_end_context_switch(struc
        if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
                arch_enter_lazy_mmu_mode();
  }
 +
 +static noinstr unsigned long pv_native_read_cr2(void)
 +{
 +      return native_read_cr2();
 +}
 +
 +static noinstr void pv_native_write_cr2(unsigned long val)
 +{
 +      native_write_cr2(val);
 +}
 +
 +static noinstr unsigned long pv_native_get_debugreg(int regno)
 +{
 +      return native_get_debugreg(regno);
 +}
 +
 +static noinstr void pv_native_set_debugreg(int regno, unsigned long val)
 +{
 +      native_set_debugreg(regno, val);
 +}
 +
 +static noinstr void pv_native_irq_enable(void)
 +{
 +      native_irq_enable();
 +}
 +
 +static noinstr void pv_native_irq_disable(void)
 +{
 +      native_irq_disable();
 +}
  #endif
  
  enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
@@@ -285,8 -244,8 +285,8 @@@ struct paravirt_patch_template pv_ops 
  
  #ifdef CONFIG_PARAVIRT_XXL
        .cpu.cpuid              = native_cpuid,
 -      .cpu.get_debugreg       = native_get_debugreg,
 -      .cpu.set_debugreg       = native_set_debugreg,
 +      .cpu.get_debugreg       = pv_native_get_debugreg,
 +      .cpu.set_debugreg       = pv_native_set_debugreg,
        .cpu.read_cr0           = native_read_cr0,
        .cpu.write_cr0          = native_write_cr0,
        .cpu.write_cr4          = native_write_cr4,
  
        /* Irq ops. */
        .irq.save_fl            = __PV_IS_CALLEE_SAVE(native_save_fl),
 -      .irq.irq_disable        = __PV_IS_CALLEE_SAVE(native_irq_disable),
 -      .irq.irq_enable         = __PV_IS_CALLEE_SAVE(native_irq_enable),
 +      .irq.irq_disable        = __PV_IS_CALLEE_SAVE(pv_native_irq_disable),
 +      .irq.irq_enable         = __PV_IS_CALLEE_SAVE(pv_native_irq_enable),
        .irq.safe_halt          = native_safe_halt,
        .irq.halt               = native_halt,
  #endif /* CONFIG_PARAVIRT_XXL */
                        (void (*)(struct mmu_gather *, void *))tlb_remove_page,
  
        .mmu.exit_mmap          = paravirt_nop,
+       .mmu.notify_page_enc_status_changed     = paravirt_nop,
  
  #ifdef CONFIG_PARAVIRT_XXL
 -      .mmu.read_cr2           = __PV_IS_CALLEE_SAVE(native_read_cr2),
 -      .mmu.write_cr2          = native_write_cr2,
 +      .mmu.read_cr2           = __PV_IS_CALLEE_SAVE(pv_native_read_cr2),
 +      .mmu.write_cr2          = pv_native_write_cr2,
        .mmu.read_cr3           = __native_read_cr3,
        .mmu.write_cr3          = native_write_cr3,
  
  };
  
  #ifdef CONFIG_PARAVIRT_XXL
 -/* At this point, native_get/set_debugreg has real function entries */
 -NOKPROBE_SYMBOL(native_get_debugreg);
 -NOKPROBE_SYMBOL(native_set_debugreg);
  NOKPROBE_SYMBOL(native_load_idt);
  
  void (*paravirt_iret)(void) = native_iret;
index 23d54b810f0854a95546565c4a865beb58f01593,2673a89d17d9c52e6e929f56dfafad6e4121de01..35487305d8afec3bda45c6757abd312e59dfba36
@@@ -20,7 -20,6 +20,7 @@@
  #include <linux/bitops.h>
  #include <linux/dma-mapping.h>
  #include <linux/virtio_config.h>
 +#include <linux/cc_platform.h>
  
  #include <asm/tlbflush.h>
  #include <asm/fixmap.h>
@@@ -144,7 -143,7 +144,7 @@@ void __init sme_unmap_bootdata(char *re
        struct boot_params *boot_data;
        unsigned long cmdline_paddr;
  
 -      if (!sme_active())
 +      if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
                return;
  
        /* Get the command line address before unmapping the real_mode_data */
@@@ -164,7 -163,7 +164,7 @@@ void __init sme_map_bootdata(char *real
        struct boot_params *boot_data;
        unsigned long cmdline_paddr;
  
 -      if (!sme_active())
 +      if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
                return;
  
        __sme_early_map_unmap_mem(real_mode_data, sizeof(boot_params), true);
@@@ -194,7 -193,7 +194,7 @@@ void __init sme_early_init(void
        for (i = 0; i < ARRAY_SIZE(protection_map); i++)
                protection_map[i] = pgprot_encrypted(protection_map[i]);
  
 -      if (sev_active())
 +      if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                swiotlb_force = SWIOTLB_FORCE;
  }
  
@@@ -203,7 -202,7 +203,7 @@@ void __init sev_setup_arch(void
        phys_addr_t total_mem = memblock_phys_mem_size();
        unsigned long size;
  
 -      if (!sev_active())
 +      if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                return;
  
        /*
        swiotlb_adjust_size(size);
  }
  
- static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+ static unsigned long pg_level_to_pfn(int level, pte_t *kpte, pgprot_t *ret_prot)
  {
-       pgprot_t old_prot, new_prot;
-       unsigned long pfn, pa, size;
-       pte_t new_pte;
+       unsigned long pfn = 0;
+       pgprot_t prot;
  
        switch (level) {
        case PG_LEVEL_4K:
                pfn = pte_pfn(*kpte);
-               old_prot = pte_pgprot(*kpte);
+               prot = pte_pgprot(*kpte);
                break;
        case PG_LEVEL_2M:
                pfn = pmd_pfn(*(pmd_t *)kpte);
-               old_prot = pmd_pgprot(*(pmd_t *)kpte);
+               prot = pmd_pgprot(*(pmd_t *)kpte);
                break;
        case PG_LEVEL_1G:
                pfn = pud_pfn(*(pud_t *)kpte);
-               old_prot = pud_pgprot(*(pud_t *)kpte);
+               prot = pud_pgprot(*(pud_t *)kpte);
                break;
        default:
-               return;
+               WARN_ONCE(1, "Invalid level for kpte\n");
+               return 0;
        }
  
+       if (ret_prot)
+               *ret_prot = prot;
+       return pfn;
+ }
+ void notify_range_enc_status_changed(unsigned long vaddr, int npages, bool enc)
+ {
+ #ifdef CONFIG_PARAVIRT
+       unsigned long sz = npages << PAGE_SHIFT;
+       unsigned long vaddr_end = vaddr + sz;
+       while (vaddr < vaddr_end) {
+               int psize, pmask, level;
+               unsigned long pfn;
+               pte_t *kpte;
+               kpte = lookup_address(vaddr, &level);
+               if (!kpte || pte_none(*kpte)) {
+                       WARN_ONCE(1, "kpte lookup for vaddr\n");
+                       return;
+               }
+               pfn = pg_level_to_pfn(level, kpte, NULL);
+               if (!pfn)
+                       continue;
+               psize = page_level_size(level);
+               pmask = page_level_mask(level);
+               notify_page_enc_status_changed(pfn, psize >> PAGE_SHIFT, enc);
+               vaddr = (vaddr & pmask) + psize;
+       }
+ #endif
+ }
+ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc)
+ {
+       pgprot_t old_prot, new_prot;
+       unsigned long pfn, pa, size;
+       pte_t new_pte;
+       pfn = pg_level_to_pfn(level, kpte, &old_prot);
+       if (!pfn)
+               return;
        new_prot = old_prot;
        if (enc)
                pgprot_val(new_prot) |= _PAGE_ENC;
  static int __init early_set_memory_enc_dec(unsigned long vaddr,
                                           unsigned long size, bool enc)
  {
-       unsigned long vaddr_end, vaddr_next;
+       unsigned long vaddr_end, vaddr_next, start;
        unsigned long psize, pmask;
        int split_page_size_mask;
        int level, ret;
        pte_t *kpte;
  
+       start = vaddr;
        vaddr_next = vaddr;
        vaddr_end = vaddr + size;
  
  
        ret = 0;
  
+       notify_range_enc_status_changed(start, PAGE_ALIGN(size) >> PAGE_SHIFT, enc);
  out:
        __flush_tlb_all();
        return ret;
@@@ -361,13 -409,47 +410,18 @@@ int __init early_set_memory_encrypted(u
        return early_set_memory_enc_dec(vaddr, size, true);
  }
  
 -/*
 - * SME and SEV are very similar but they are not the same, so there are
 - * times that the kernel will need to distinguish between SME and SEV. The
 - * sme_active() and sev_active() functions are used for this.  When a
 - * distinction isn't needed, the mem_encrypt_active() function can be used.
 - *
 - * The trampoline code is a good example for this requirement.  Before
 - * paging is activated, SME will access all memory as decrypted, but SEV
 - * will access all memory as encrypted.  So, when APs are being brought
 - * up under SME the trampoline area cannot be encrypted, whereas under SEV
 - * the trampoline area must be encrypted.
 - */
 -bool sev_active(void)
 -{
 -      return sev_status & MSR_AMD64_SEV_ENABLED;
 -}
 -
 -bool sme_active(void)
 -{
 -      return sme_me_mask && !sev_active();
 -}
 -EXPORT_SYMBOL_GPL(sev_active);
 -
 -/* Needs to be called from non-instrumentable code */
 -bool noinstr sev_es_active(void)
 -{
 -      return sev_status & MSR_AMD64_SEV_ES_ENABLED;
 -}
 -
+ void __init early_set_mem_enc_dec_hypercall(unsigned long vaddr, int npages, bool enc)
+ {
+       notify_range_enc_status_changed(vaddr, npages, enc);
+ }
  /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
  bool force_dma_unencrypted(struct device *dev)
  {
        /*
         * For SEV, all DMA must be to unencrypted addresses.
         */
 -      if (sev_active())
 +      if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                return true;
  
        /*
         * device does not support DMA to addresses that include the
         * encryption mask.
         */
 -      if (sme_active()) {
 +      if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
                u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
                u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
                                                dev->bus_dma_limit);
@@@ -400,7 -482,7 +454,7 @@@ void __init mem_encrypt_free_decrypted_
         * The unused memory range was mapped decrypted, change the encryption
         * attribute from decrypted to encrypted before freeing it.
         */
 -      if (mem_encrypt_active()) {
 +      if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
                r = set_memory_encrypted(vaddr, npages);
                if (r) {
                        pr_warn("failed to free unused decrypted pages\n");
@@@ -416,7 -498,7 +470,7 @@@ static void print_mem_encrypt_feature_i
        pr_info("AMD Memory Encryption Features active:");
  
        /* Secure Memory Encryption */
 -      if (sme_active()) {
 +      if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
                /*
                 * SME is mutually exclusive with any of the SEV
                 * features below.
        }
  
        /* Secure Encrypted Virtualization */
 -      if (sev_active())
 +      if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
                pr_cont(" SEV");
  
        /* Encrypted Register State */
 -      if (sev_es_active())
 +      if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
                pr_cont(" SEV-ES");
  
        pr_cont("\n");
@@@ -449,8 -531,7 +503,8 @@@ void __init mem_encrypt_init(void
         * With SEV, we need to unroll the rep string I/O instructions,
         * but SEV-ES supports them through the #VC handler.
         */
 -      if (sev_active() && !sev_es_active())
 +      if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
 +          !cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
                static_branch_enable(&sev_enable_key);
  
        print_mem_encrypt_feature_info();
  
  int arch_has_restricted_virtio_memory_access(void)
  {
 -      return sev_active();
 +      return cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT);
  }
  EXPORT_SYMBOL_GPL(arch_has_restricted_virtio_memory_access);
index 934dc5b2df3617576e777afeac4e7fae09c3d1d1,4f0cd505f9246ec1241cd84103afa508420db08b..b4072115c8ef685b307a2b4a5fe4fe0c6331caec
@@@ -18,7 -18,6 +18,7 @@@
  #include <linux/libnvdimm.h>
  #include <linux/vmstat.h>
  #include <linux/kernel.h>
 +#include <linux/cc_platform.h>
  
  #include <asm/e820/api.h>
  #include <asm/processor.h>
@@@ -30,8 -29,6 +30,8 @@@
  #include <asm/proto.h>
  #include <asm/memtype.h>
  #include <asm/set_memory.h>
 +#include <asm/hyperv-tlfs.h>
 +#include <asm/mshyperv.h>
  
  #include "../mm_internal.h"
  
@@@ -1983,15 -1980,15 +1983,15 @@@ int set_memory_global(unsigned long add
                                    __pgprot(_PAGE_GLOBAL), 0);
  }
  
 -static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
 +/*
 + * __set_memory_enc_pgtable() is used for the hypervisors that get
 + * informed about "encryption" status via page tables.
 + */
 +static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
  {
        struct cpa_data cpa;
        int ret;
  
 -      /* Nothing to do if memory encryption is not active */
 -      if (!mem_encrypt_active())
 -              return 0;
 -
        /* Should not be working on unaligned addresses */
        if (WARN_ONCE(addr & ~PAGE_MASK, "misaligned address: %#lx\n", addr))
                addr &= PAGE_MASK;
         */
        cpa_flush(&cpa, 0);
  
+       /*
+        * Notify hypervisor that a given memory range is mapped encrypted
+        * or decrypted.
+        */
+       notify_range_enc_status_changed(addr, numpages, enc);
        return ret;
  }
  
 +static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)
 +{
 +      if (hv_is_isolation_supported())
 +              return hv_set_mem_host_visibility(addr, numpages, !enc);
 +
 +      if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
 +              return __set_memory_enc_pgtable(addr, numpages, enc);
 +
 +      return 0;
 +}
 +
  int set_memory_encrypted(unsigned long addr, int numpages)
  {
        return __set_memory_enc_dec(addr, numpages, true);
This page took 0.091148 seconds and 4 git commands to generate.