]> Git Repo - linux.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
authorLinus Torvalds <[email protected]>
Wed, 3 Jun 2020 22:13:47 +0000 (15:13 -0700)
committerLinus Torvalds <[email protected]>
Wed, 3 Jun 2020 22:13:47 +0000 (15:13 -0700)
Pull kvm updates from Paolo Bonzini:
 "ARM:
   - Move the arch-specific code into arch/arm64/kvm

   - Start the post-32bit cleanup

   - Cherry-pick a few non-invasive pre-NV patches

  x86:
   - Rework of TLB flushing

   - Rework of event injection, especially with respect to nested
     virtualization

   - Nested AMD event injection facelift, building on the rework of
     generic code and fixing a lot of corner cases

   - Nested AMD live migration support

   - Optimization for TSC deadline MSR writes and IPIs

   - Various cleanups

   - Asynchronous page fault cleanups (from tglx, common topic branch
     with tip tree)

   - Interrupt-based delivery of asynchronous "page ready" events (host
     side)

   - Hyper-V MSRs and hypercalls for guest debugging

   - VMX preemption timer fixes

  s390:
   - Cleanups

  Generic:
   - switch vCPU thread wakeup from swait to rcuwait

  The other architectures, and the guest side of the asynchronous page
  fault work, will come next week"

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (256 commits)
  KVM: selftests: fix rdtsc() for vmx_tsc_adjust_test
  KVM: check userspace_addr for all memslots
  KVM: selftests: update hyperv_cpuid with SynDBG tests
  x86/kvm/hyper-v: Add support for synthetic debugger via hypercalls
  x86/kvm/hyper-v: enable hypercalls regardless of hypercall page
  x86/kvm/hyper-v: Add support for synthetic debugger interface
  x86/hyper-v: Add synthetic debugger definitions
  KVM: selftests: VMX preemption timer migration test
  KVM: nVMX: Fix VMX preemption timer migration
  x86/kvm/hyper-v: Explicitly align hcall param for kvm_hyperv_exit
  KVM: x86/pmu: Support full width counting
  KVM: x86/pmu: Tweak kvm_pmu_get_msr to pass 'struct msr_data' in
  KVM: x86: announce KVM_FEATURE_ASYNC_PF_INT
  KVM: x86: acknowledgment mechanism for async pf page ready notifications
  KVM: x86: interrupt based APF 'page ready' event delivery
  KVM: introduce kvm_read_guest_offset_cached()
  KVM: rename kvm_arch_can_inject_async_page_present() to kvm_arch_can_dequeue_async_page_present()
  KVM: x86: extend struct kvm_vcpu_pv_apf_data with token info
  Revert "KVM: async_pf: Fix #DF due to inject "Page not Present" and "Page Ready" exceptions simultaneously"
  KVM: VMX: Replace zero-length array with flexible-array
  ...

28 files changed:
1  2 
Documentation/virt/kvm/api.rst
MAINTAINERS
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/smp.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/switch.c
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/vgic/vgic-mmio-v3.c
arch/arm64/kvm/vgic/vgic.h
arch/mips/kvm/mips.c
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/include/asm/hyperv-tlfs.h
arch/x86/include/asm/kvm_host.h
arch/x86/kernel/traps.c
arch/x86/kvm/hyperv.c
arch/x86/kvm/mmu/mmu.c
arch/x86/mm/fault.c
include/asm-generic/hyperv-tlfs.h
include/linux/sched.h
include/uapi/linux/kvm.h
kernel/exit.c
kernel/locking/lockdep.c

index d2c1cbce1018de6656048c07847141a1d336440e,d280af5345dfdf972198691f42ae0405c89e5400..426f94582b7a1a58a21e7e703beb3bc052440f90
@@@ -2572,15 -2572,13 +2572,15 @@@ list in 4.68
  :Parameters: None
  :Returns: 0 on success, -1 on error
  
 -This signals to the host kernel that the specified guest is being paused by
 -userspace.  The host will set a flag in the pvclock structure that is checked
 -from the soft lockup watchdog.  The flag is part of the pvclock structure that
 -is shared between guest and host, specifically the second bit of the flags
 +This ioctl sets a flag accessible to the guest indicating that the specified
 +vCPU has been paused by the host userspace.
 +
 +The host will set a flag in the pvclock structure that is checked from the
 +soft lockup watchdog.  The flag is part of the pvclock structure that is
 +shared between guest and host, specifically the second bit of the flags
  field of the pvclock_vcpu_time_info structure.  It will be set exclusively by
  the host and read/cleared exclusively by the guest.  The guest operation of
 -checking and clearing the flag must an atomic operation so
 +checking and clearing the flag must be an atomic operation so
  load-link/store-conditional, or equivalent must be used.  There are two cases
  where the guest will clear the flag: when the soft lockup watchdog timer resets
  itself or when a soft lockup is detected.  This ioctl can be called any time
@@@ -4336,9 -4334,13 +4336,13 @@@ Errors
    #define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE 0x00000001
    #define KVM_STATE_NESTED_VMX_SMM_VMXON      0x00000002
  
+ #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001
    struct kvm_vmx_nested_state_hdr {
+       __u32 flags;
        __u64 vmxon_pa;
        __u64 vmcs12_pa;
+       __u64 preemption_timer_deadline;
  
        struct {
                __u16 flags;
@@@ -5068,10 -5070,13 +5072,13 @@@ EOI was received
                struct kvm_hyperv_exit {
    #define KVM_EXIT_HYPERV_SYNIC          1
    #define KVM_EXIT_HYPERV_HCALL          2
+   #define KVM_EXIT_HYPERV_SYNDBG         3
                        __u32 type;
+                       __u32 pad1;
                        union {
                                struct {
                                        __u32 msr;
+                                       __u32 pad2;
                                        __u64 control;
                                        __u64 evt_page;
                                        __u64 msg_page;
                                        __u64 result;
                                        __u64 params[2];
                                } hcall;
+                               struct {
+                                       __u32 msr;
+                                       __u32 pad2;
+                                       __u64 control;
+                                       __u64 status;
+                                       __u64 send_page;
+                                       __u64 recv_page;
+                                       __u64 pending_page;
+                               } syndbg;
                        } u;
                };
                /* KVM_EXIT_HYPERV */
@@@ -5097,6 -5111,12 +5113,12 @@@ Hyper-V SynIC state change. Notificatio
  event/message pages and to enable/disable SynIC messages/events processing
  in userspace.
  
+       - KVM_EXIT_HYPERV_SYNDBG -- synchronously notify user-space about
+ Hyper-V Synthetic debugger state change. Notification is used to either update
+ the pending_page location or to send a control command (send the buffer located
+ in send_page or recv a buffer to recv_page).
  ::
  
                /* KVM_EXIT_ARM_NISV */
@@@ -5779,7 -5799,7 +5801,7 @@@ will be initialized to 1 when created
  dirty logging can be enabled gradually in small chunks on the first call
  to KVM_CLEAR_DIRTY_LOG.  KVM_DIRTY_LOG_INITIALLY_SET depends on
  KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (it is also only available on
- x86 for now).
+ x86 and arm64 for now).
  
  KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name
  KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make
@@@ -5804,6 -5824,23 +5826,23 @@@ If present, this capability can be enab
  will allow the transition to secure guest mode.  Otherwise KVM will
  veto the transition.
  
+ 7.20 KVM_CAP_HALT_POLL
+ ----------------------
+ :Architectures: all
+ :Target: VM
+ :Parameters: args[0] is the maximum poll time in nanoseconds
+ :Returns: 0 on success; -1 on error
+ This capability overrides the kvm module parameter halt_poll_ns for the
+ target VM.
+ VCPU polling allows a VCPU to poll for wakeup events instead of immediately
+ scheduling during guest halts. The maximum time a VCPU can spend polling is
+ controlled by the kvm module parameter halt_poll_ns. This capability allows
+ the maximum halt time to specified on a per-VM basis, effectively overriding
+ the module parameter for the target VM.
  8. Other capabilities.
  ======================
  
diff --combined MAINTAINERS
index 660f5326a3639d8cc0c2f72e8a68b4ff529146d2,6c5b928989ed70b88186cd9208623f569cd53264..71db61fc60c010cc38d502fe14e1963615e48638
@@@ -842,13 -842,6 +842,13 @@@ S:       Supporte
  T:    git git://people.freedesktop.org/~agd5f/linux
  F:    drivers/gpu/drm/amd/display/
  
 +AMD ENERGY DRIVER
 +M:    Naveen Krishna Chatradhi <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/hwmon/amd_energy.rst
 +F:    drivers/hwmon/amd_energy.c
 +
  AMD FAM15H PROCESSOR POWER MONITORING DRIVER
  M:    Huang Rui <[email protected]>
  L:    [email protected]
@@@ -899,11 -892,6 +899,11 @@@ F:       drivers/gpu/drm/amd/include/v9_struc
  F:    drivers/gpu/drm/amd/include/vi_structs.h
  F:    include/uapi/linux/kfd_ioctl.h
  
 +AMD SPI DRIVER
 +M:    Sanjay R Mehta <[email protected]>
 +S:    Maintained
 +F:    drivers/spi/spi-amd.c
 +
  AMD MP2 I2C DRIVER
  M:    Elie Morisse <[email protected]>
  M:    Nehal Shah <[email protected]>
@@@ -2237,7 -2225,6 +2237,7 @@@ F:      drivers/*/qcom
  F:    drivers/*/qcom/
  F:    drivers/bluetooth/btqcomsmd.c
  F:    drivers/clocksource/timer-qcom.c
 +F:    drivers/cpuidle/cpuidle-qcom-spm.c
  F:    drivers/extcon/extcon-qcom*
  F:    drivers/i2c/busses/i2c-qcom-geni.c
  F:    drivers/i2c/busses/i2c-qup.c
@@@ -3743,7 -3730,7 +3743,7 @@@ CACHEFILES: FS-CACHE BACKEND FOR CACHIN
  M:    David Howells <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  S:    Supported
 -F:    Documentation/filesystems/caching/cachefiles.txt
 +F:    Documentation/filesystems/caching/cachefiles.rst
  F:    fs/cachefiles/
  
  CADENCE MIPI-CSI2 BRIDGES
@@@ -3909,15 -3896,6 +3909,15 @@@ S:    Supporte
  W:    https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
  F:    drivers/crypto/ccree/
  
 +CCTRNG ARM TRUSTZONE CRYPTOCELL TRUE RANDOM NUMBER GENERATOR (TRNG) DRIVER
 +M:    Hadar Gat <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/char/hw_random/cctrng.c
 +F:    drivers/char/hw_random/cctrng.h
 +F:    Documentation/devicetree/bindings/rng/arm-cctrng.txt
 +W:    https://developer.arm.com/products/system-ip/trustzone-cryptocell/cryptocell-700-family
 +
  CEC FRAMEWORK
  M:    Hans Verkuil <[email protected]>
  L:    [email protected]
@@@ -4220,7 -4198,7 +4220,7 @@@ M:      [email protected]
  L:    [email protected]
  S:    Maintained
  W:    http://www.coda.cs.cmu.edu/
 -F:    Documentation/filesystems/coda.txt
 +F:    Documentation/filesystems/coda.rst
  F:    fs/coda/
  F:    include/linux/coda*.h
  F:    include/uapi/linux/coda*.h
@@@ -5013,7 -4991,7 +5013,7 @@@ M:      Jan Kara <[email protected]
  R:    Amir Goldstein <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/filesystems/dnotify.txt
 +F:    Documentation/filesystems/dnotify.rst
  F:    fs/notify/dnotify/
  F:    include/linux/dnotify.h
  
@@@ -5027,7 -5005,7 +5027,7 @@@ W:      http://www.win.tue.nl/~aeb/partition
  DISKQUOTA
  M:    Jan Kara <[email protected]>
  S:    Maintained
 -F:    Documentation/filesystems/quota.txt
 +F:    Documentation/filesystems/quota.rst
  F:    fs/quota/
  F:    include/linux/quota*.h
  F:    include/uapi/linux/quota*.h
@@@ -5062,7 -5040,7 +5062,7 @@@ F:      drivers/dma-buf
  F:    include/linux/*fence.h
  F:    include/linux/dma-buf*
  F:    include/linux/dma-resv.h
 -K:    dma_(buf|fence|resv)
 +K:    \bdma_(?:buf|fence|resv)\b
  
  DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
  M:    Vinod Koul <[email protected]>
@@@ -5274,7 -5252,7 +5274,7 @@@ DRM DRIVER FOR ARM VERSATILE TFT PANEL
  M:    Linus Walleij <[email protected]>
  S:    Maintained
  T:    git git://anongit.freedesktop.org/drm/drm-misc
 -F:    Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.txt
 +F:    Documentation/devicetree/bindings/display/panel/arm,versatile-tft-panel.yaml
  F:    drivers/gpu/drm/panel/panel-arm-versatile.c
  
  DRM DRIVER FOR ASPEED BMC GFX
@@@ -5300,7 -5278,7 +5300,7 @@@ F:      drivers/gpu/drm/bochs
  DRM DRIVER FOR BOE HIMAX8279D PANELS
  M:    Jerry Han <[email protected]>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/display/panel/boe,himax8279d.txt
 +F:    Documentation/devicetree/bindings/display/panel/boe,himax8279d.yaml
  F:    drivers/gpu/drm/panel/panel-boe-himax8279d.c
  
  DRM DRIVER FOR FARADAY TVE200 TV ENCODER
@@@ -5318,7 -5296,7 +5318,7 @@@ F:      drivers/gpu/drm/panel/panel-feixin-k
  DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
  M:    Jagan Teki <[email protected]>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
 +F:    Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.yaml
  F:    drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
  
  DRM DRIVER FOR GRAIN MEDIA GM12U320 PROJECTORS
@@@ -5353,14 -5331,6 +5353,14 @@@ S:    Orphan / Obsolet
  F:    drivers/gpu/drm/i810/
  F:    include/uapi/drm/i810_drm.h
  
 +DRM DRIVER FOR LVDS PANELS
 +M:    Laurent Pinchart <[email protected]>
 +L:    [email protected]
 +T:    git git://anongit.freedesktop.org/drm/drm-misc
 +S:    Maintained
 +F:    drivers/gpu/drm/panel/panel-lvds.c
 +F:    Documentation/devicetree/bindings/display/panel/lvds.yaml
 +
  DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/mga/
@@@ -5409,7 -5379,7 +5409,7 @@@ F:      include/uapi/drm/nouveau_drm.
  DRM DRIVER FOR OLIMEX LCD-OLINUXINO PANELS
  M:    Stefan Mavrodiev <[email protected]>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.txt
 +F:    Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino.yaml
  F:    drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c
  
  DRM DRIVER FOR PERVASIVE DISPLAYS REPAPER PANELS
@@@ -5426,7 -5396,7 +5426,7 @@@ L:      [email protected]
  S:    Obsolete
  W:    https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
  T:    git git://anongit.freedesktop.org/drm/drm-misc
 -F:    drivers/gpu/drm/cirrus/
 +F:    drivers/gpu/drm/tiny/cirrus.c
  
  DRM DRIVER FOR QXL VIRTUAL GPU
  M:    Dave Airlie <[email protected]>
@@@ -5476,7 -5446,7 +5476,7 @@@ F:      drivers/gpu/drm/tiny/st7586.
  DRM DRIVER FOR SITRONIX ST7701 PANELS
  M:    Jagan Teki <[email protected]>
  S:    Maintained
 -F:    Documentation/devicetree/bindings/display/panel/sitronix,st7701.txt
 +F:    Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
  F:    drivers/gpu/drm/panel/panel-sitronix-st7701.c
  
  DRM DRIVER FOR SITRONIX ST7735R PANELS
@@@ -5537,10 -5507,10 +5537,10 @@@ F:   drivers/gpu/drm/vboxvideo
  
  DRM DRIVER FOR VMWARE VIRTUAL GPU
  M:    "VMware Graphics" <[email protected]>
 -M:    Thomas Hellstrom <thellstrom@vmware.com>
 +M:    Roland Scheidegger <sroland@vmware.com>
  L:    [email protected]
  S:    Supported
 -T:    git git://people.freedesktop.org/~thomash/linux
 +T:    git git://people.freedesktop.org/~sroland/linux
  F:    drivers/gpu/drm/vmwgfx/
  F:    include/uapi/drm/vmwgfx_drm.h
  
@@@ -6202,6 -6172,7 +6202,6 @@@ M:      Yash Shah <[email protected]
  L:    [email protected]
  S:    Supported
  F:    drivers/edac/sifive_edac.c
 -F:    drivers/soc/sifive_l2_cache.c
  
  EDAC-SKYLAKE
  M:    Tony Luck <[email protected]>
@@@ -6742,13 -6713,6 +6742,13 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/crypto/fsl-sec4.txt
  F:    drivers/crypto/caam/
  
 +FREESCALE COLDFIRE M5441X MMC DRIVER
 +M:    Angelo Dureghello <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/mmc/host/sdhci-esdhc-mcf.c
 +F:    include/linux/platform_data/mmc-esdhc-mcf.h
 +
  FREESCALE DIU FRAMEBUFFER DRIVER
  M:    Timur Tabi <[email protected]>
  L:    [email protected]
@@@ -7056,24 -7020,13 +7056,24 @@@ R:   Darren Hart <[email protected]
  L:    [email protected]
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
 -F:    Documentation/*futex*
 +F:    Documentation/locking/*futex*
  F:    include/asm-generic/futex.h
  F:    include/linux/futex.h
  F:    include/uapi/linux/futex.h
  F:    kernel/futex.c
  F:    tools/perf/bench/futex*
 -F:    tools/testing/selftests/futex/
 +F:    Documentation/locking/*futex*
 +
 +GATEWORKS SYSTEM CONTROLLER (GSC) DRIVER
 +M:    Tim Harvey <[email protected]>
 +M:    Robert Jones <[email protected]>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/mfd/gateworks-gsc.yaml
 +F:    drivers/mfd/gateworks-gsc.c
 +F:    include/linux/mfd/gsc.h
 +F:    Documentation/hwmon/gsc-hwmon.rst
 +F:    drivers/hwmon/gsc-hwmon.c
 +F:    include/linux/platform_data/gsc_hwmon.h
  
  GASKET DRIVER FRAMEWORK
  M:    Rob Springer <[email protected]>
@@@ -7543,7 -7496,7 +7543,7 @@@ L:      [email protected]
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/andersson/remoteproc.git hwspinlock-next
  F:    Documentation/devicetree/bindings/hwlock/
 -F:    Documentation/hwspinlock.txt
 +F:    Documentation/locking/hwspinlock.rst
  F:    drivers/hwspinlock/
  F:    include/linux/hwspinlock.h
  
@@@ -7776,9 -7729,7 +7776,9 @@@ L:      [email protected]
  S:    Maintained
  F:    Documentation/vm/hmm.rst
  F:    include/linux/hmm*
 +F:    lib/test_hmm*
  F:    mm/hmm*
 +F:    tools/testing/selftests/vm/*hmm*
  
  HOST AP DRIVER
  M:    Jouni Malinen <[email protected]>
@@@ -7878,7 -7829,7 +7878,7 @@@ T:      git git://linuxtv.org/media_tree.gi
  F:    drivers/media/platform/sti/hva
  
  HWPOISON MEMORY FAILURE HANDLING
 -M:    Naoya Horiguchi <n[email protected].nec.com>
 +M:    Naoya Horiguchi <naoya.horiguchi@nec.com>
  L:    [email protected]
  S:    Maintained
  F:    mm/hwpoison-inject.c
@@@ -7924,7 -7875,6 +7924,7 @@@ F:      drivers/pci/controller/pci-hyperv.
  F:    drivers/scsi/storvsc_drv.c
  F:    drivers/uio/uio_hv_generic.c
  F:    drivers/video/fbdev/hyperv_fb.c
 +F:    include/asm-generic/hyperv-tlfs.h
  F:    include/asm-generic/mshyperv.h
  F:    include/clocksource/hyperv_timer.h
  F:    include/linux/hyperv.h
@@@ -7991,7 -7941,7 +7991,7 @@@ F:      Documentation/i2c/busses/i2c-parport
  F:    drivers/i2c/busses/i2c-parport.c
  
  I2C SUBSYSTEM
 -M:    Wolfram Sang <wsa@the-dreams.de>
 +M:    Wolfram Sang <wsa@kernel.org>
  L:    [email protected]
  S:    Maintained
  W:    https://i2c.wiki.kernel.org/
  S:    Maintained
  F:    drivers/platform/x86/intel_atomisp2_pm.c
  
 +INTEL BROXTON PMC DRIVER
 +M:    Mika Westerberg <[email protected]>
 +M:    Zha Qipeng <[email protected]>
 +S:    Maintained
 +F:    drivers/mfd/intel_pmc_bxt.c
 +F:    include/linux/mfd/intel_pmc_bxt.h
 +
  INTEL C600 SERIES SAS CONTROLLER DRIVER
  M:    Intel SCU Linux support <[email protected]>
  M:    Artur Paszkiewicz <[email protected]>
@@@ -8755,13 -8698,6 +8755,13 @@@ F:    include/uapi/linux/mic_common.
  F:    include/uapi/linux/mic_ioctl.h
  F:    include/uapi/linux/scif_ioctl.h
  
 +INTEL P-Unit IPC DRIVER
 +M:    Zha Qipeng <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    arch/x86/include/asm/intel_punit_ipc.h
 +F:    drivers/platform/x86/intel_punit_ipc.c
 +
  INTEL PMC CORE DRIVER
  M:    Rajneesh Bhardwaj <[email protected]>
  M:    Vishwanath Somayaji <[email protected]>
  S:    Maintained
  F:    drivers/platform/x86/intel_pmc_core*
  
 -INTEL PMC/P-Unit IPC DRIVER
 -M:    Zha Qipeng<[email protected]>
 -L:    [email protected]
 -S:    Maintained
 -F:    arch/x86/include/asm/intel_pmc_ipc.h
 -F:    arch/x86/include/asm/intel_punit_ipc.h
 -F:    drivers/platform/x86/intel_pmc_ipc.c
 -F:    drivers/platform/x86/intel_punit_ipc.c
 -
  INTEL PMIC GPIO DRIVERS
  M:    Andy Shevchenko <[email protected]>
  S:    Maintained
@@@ -8807,12 -8752,6 +8807,12 @@@ S:    Supporte
  F:    drivers/infiniband/hw/i40iw/
  F:    include/uapi/rdma/i40iw-abi.h
  
 +INTEL SCU DRIVERS
 +M:    Mika Westerberg <[email protected]>
 +S:    Maintained
 +F:    arch/x86/include/asm/intel_scu_ipc.h
 +F:    drivers/platform/x86/intel_scu_*
 +
  INTEL SPEED SELECT TECHNOLOGY
  M:    Srinivas Pandruvada <[email protected]>
  L:    [email protected]
@@@ -8879,13 -8818,6 +8879,13 @@@ F:    Documentation/admin-guide/wimax/i240
  F:    drivers/net/wimax/i2400m/
  F:    include/uapi/linux/wimax/i2400m.h
  
 +INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER
 +M:    Jithu Joseph <[email protected]>
 +R:    Maurice Ma <[email protected]>
 +S:    Maintained
 +W:    https://slimbootloader.github.io/security/firmware-update.html
 +F:    drivers/platform/x86/intel-wmi-sbl-fw-update.c
 +
  INTEL WMI THUNDERBOLT FORCE POWER DRIVER
  M:    Mario Limonciello <[email protected]>
  S:    Maintained
@@@ -8971,7 -8903,7 +8971,7 @@@ M:      Corey Minyard <[email protected]
  L:    [email protected] (moderated for non-subscribers)
  S:    Supported
  W:    http://openipmi.sourceforge.net/
 -F:    Documentation/IPMI.txt
 +F:    Documentation/driver-api/ipmi.rst
  F:    Documentation/devicetree/bindings/ipmi/
  F:    drivers/char/ipmi/
  F:    include/linux/ipmi*
@@@ -9013,7 -8945,7 +9013,7 @@@ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY
  M:    Marc Zyngier <[email protected]>
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
 -F:    Documentation/IRQ-domain.txt
 +F:    Documentation/core-api/irq/irq-domain.rst
  F:    include/linux/irqdomain.h
  F:    kernel/irq/irqdomain.c
  F:    kernel/irq/msi.c
  S:    Maintained
  W:    http://lse.sourceforge.net/kdump/
  F:    Documentation/admin-guide/kdump/
 +F:    fs/proc/vmcore.c
 +F:    include/linux/crash_core.h
 +F:    include/linux/crash_dump.h
 +F:    include/uapi/linux/vmcore.h
 +F:    kernel/crash_*.c
  
  KEENE FM RADIO TRANSMITTER DRIVER
  M:    Hans Verkuil <[email protected]>
@@@ -9368,7 -9295,6 +9368,6 @@@ F:      arch/arm64/include/asm/kvm
  F:    arch/arm64/include/uapi/asm/kvm*
  F:    arch/arm64/kvm/
  F:    include/kvm/arm_*
- F:    virt/kvm/arm/
  
  KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips)
  L:    [email protected]
@@@ -9793,13 -9719,6 +9792,13 @@@ F:    drivers/lightnvm
  F:    include/linux/lightnvm.h
  F:    include/uapi/linux/lightnvm.h
  
 +LINEAR RANGES HELPERS
 +M:    Mark Brown <[email protected]>
 +R:    Matti Vaittinen <[email protected]>
 +F:    lib/linear_ranges.c
 +F:    lib/test_linear_ranges.c
 +F:    include/linux/linear_range.h
 +
  LINUX FOR POWER MACINTOSH
  M:    Benjamin Herrenschmidt <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    drivers/net/ethernet/mediatek/
  
 +MEDIATEK I2C CONTROLLER DRIVER
 +M:    Qii Wang <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/i2c/i2c-mt65xx.txt
 +F:    drivers/i2c/busses/i2c-mt65xx.c
 +
  MEDIATEK JPEG DRIVER
  M:    Rick Chang <[email protected]>
  M:    Bin Liu <[email protected]>
@@@ -11797,9 -11709,8 +11796,9 @@@ F:   net/core/drop_monitor.
  
  NETWORKING DRIVERS
  M:    "David S. Miller" <[email protected]>
 +M:    Jakub Kicinski <[email protected]>
  L:    [email protected]
 -S:    Odd Fixes
 +S:    Maintained
  W:    http://www.linuxfoundation.org/en/Net
  Q:    http://patchwork.ozlabs.org/project/netdev/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
@@@ -12842,7 -12753,7 +12841,7 @@@ M:   "James E.J. Bottomley" <James.Bottom
  M:    Helge Deller <[email protected]>
  L:    [email protected]
  S:    Maintained
 -W:    http://www.parisc-linux.org/
 +W:    https://parisc.wiki.kernel.org
  Q:    http://patchwork.kernel.org/project/linux-parisc/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
@@@ -13751,7 -13662,6 +13750,7 @@@ M:   Tony Luck <[email protected]
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/pstore
  F:    Documentation/admin-guide/ramoops.rst
 +F:    Documentation/admin-guide/pstore-blk.rst
  F:    Documentation/devicetree/bindings/reserved-memory/ramoops.txt
  F:    drivers/acpi/apei/erst.c
  F:    drivers/firmware/efi/efi-pstore.c
@@@ -14150,6 -14060,7 +14149,6 @@@ F:   drivers/net/wireless/quantenn
  RADEON and AMDGPU DRM DRIVERS
  M:    Alex Deucher <[email protected]>
  M:    Christian König <[email protected]>
 -M:    David (ChunMing) Zhou <[email protected]>
  L:    [email protected]
  S:    Supported
  T:    git git://people.freedesktop.org/~agd5f/linux
@@@ -14305,7 -14216,7 +14304,7 @@@ M:   Reinette Chatre <reinette.chatre@int
  L:    [email protected]
  S:    Supported
  F:    Documentation/x86/resctrl*
 -F:    arch/x86/include/asm/resctrl_sched.h
 +F:    arch/x86/include/asm/resctrl.h
  F:    arch/x86/kernel/cpu/resctrl/
  F:    tools/testing/selftests/resctrl/
  
@@@ -14712,7 -14623,6 +14711,7 @@@ S:   Supporte
  W:    http://www.ibm.com/developerworks/linux/linux390/
  F:    block/partitions/ibm.c
  F:    drivers/s390/block/dasd*
 +F:    include/linux/dasd_mod.h
  
  S390 IOMMU (PCI)
  M:    Gerald Schaefer <[email protected]>
@@@ -14723,7 -14633,6 +14722,7 @@@ F:   drivers/iommu/s390-iommu.
  
  S390 IUCV NETWORK LAYER
  M:    Julian Wiedmann <[email protected]>
 +M:    Karsten Graul <[email protected]>
  M:    Ursula Braun <[email protected]>
  L:    [email protected]
  S:    Supported
@@@ -14734,7 -14643,6 +14733,7 @@@ F:   net/iucv
  
  S390 NETWORK DRIVERS
  M:    Julian Wiedmann <[email protected]>
 +M:    Karsten Graul <[email protected]>
  M:    Ursula Braun <[email protected]>
  L:    [email protected]
  S:    Supported
@@@ -15555,15 -15463,6 +15554,15 @@@ M: Nicolas Pitre <[email protected]
  S:    Odd Fixes
  F:    drivers/net/ethernet/smsc/smc91x.*
  
 +SECURE MONITOR CALL(SMC) CALLING CONVENTION (SMCCC)
 +M:    Mark Rutland <[email protected]>
 +M:    Lorenzo Pieralisi <[email protected]>
 +M:    Sudeep Holla <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/firmware/smccc/
 +F:    include/linux/arm-smccc.h
 +
  SMIA AND SMIA++ IMAGE SENSOR DRIVER
  M:    Sakari Ailus <[email protected]>
  L:    [email protected]
@@@ -15740,7 -15639,7 +15739,7 @@@ F:   drivers/ssb
  F:    include/linux/ssb/
  
  SONY IMX214 SENSOR DRIVER
 -M:    Ricardo Ribalda <ri[email protected]>
 +M:    Ricardo Ribalda <ri[email protected]>
  L:    [email protected]
  S:    Maintained
  T:    git git://linuxtv.org/media_tree.git
@@@ -15980,7 -15879,7 +15979,7 @@@ M:   Jeremy Kerr <[email protected]
  L:    [email protected]
  S:    Supported
  W:    http://www.ibm.com/developerworks/power/cell/
 -F:    Documentation/filesystems/spufs.txt
 +F:    Documentation/filesystems/spufs/spufs.rst
  F:    arch/powerpc/platforms/cell/spufs/
  
  SQUASHFS FILE SYSTEM
@@@ -16727,7 -16626,7 +16726,7 @@@ S:   Maintaine
  F:    sound/soc/ti/
  
  TEXAS INSTRUMENTS' DAC7612 DAC DRIVER
 -M:    Ricardo Ribalda <ri[email protected]>
 +M:    Ricardo Ribalda <ri[email protected]>
  L:    [email protected]
  S:    Supported
  F:    Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
@@@ -18631,8 -18530,8 +18630,8 @@@ W:   http://xfs.org
  T:    git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
  F:    Documentation/ABI/testing/sysfs-fs-xfs
  F:    Documentation/admin-guide/xfs.rst
 -F:    Documentation/filesystems/xfs-delayed-logging-design.txt
 -F:    Documentation/filesystems/xfs-self-describing-metadata.txt
 +F:    Documentation/filesystems/xfs-delayed-logging-design.rst
 +F:    Documentation/filesystems/xfs-self-describing-metadata.rst
  F:    fs/xfs/
  F:    include/uapi/linux/dqblk_xfs.h
  F:    include/uapi/linux/fsmap.h
index 57c0afcf9dcf7b658ceb0a73b0f8f365cd2f4b4a,5dd236256f6497a06b7b7e29cebdd2fb2c582321..abbdf9703e2076b6da7a11bded2cd7c69fdf0d0d
@@@ -46,6 -46,9 +46,9 @@@
  #define KVM_REQ_RECORD_STEAL  KVM_ARCH_REQ(3)
  #define KVM_REQ_RELOAD_GICv4  KVM_ARCH_REQ(4)
  
+ #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
+                                    KVM_DIRTY_LOG_INITIALLY_SET)
  DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
  
  extern unsigned int kvm_sve_max_vl;
@@@ -112,12 -115,8 +115,8 @@@ struct kvm_vcpu_fault_info 
        u64 disr_el1;           /* Deferred [SError] Status Register */
  };
  
- /*
-  * 0 is reserved as an invalid value.
-  * Order should be kept in sync with the save/restore code.
-  */
  enum vcpu_sysreg {
-       __INVALID_SYSREG__,
+       __INVALID_SYSREG__,   /* 0 is reserved as an invalid value */
        MPIDR_EL1,      /* MultiProcessor Affinity Register */
        CSSELR_EL1,     /* Cache Size Selection Register */
        SCTLR_EL1,      /* System Control Register */
@@@ -415,6 -414,8 +414,8 @@@ struct kvm_vm_stat 
  struct kvm_vcpu_stat {
        u64 halt_successful_poll;
        u64 halt_attempted_poll;
+       u64 halt_poll_success_ns;
+       u64 halt_poll_fail_ns;
        u64 halt_poll_invalid;
        u64 halt_wakeup;
        u64 hvc_exit_stat;
@@@ -530,39 -531,6 +531,6 @@@ static inline void kvm_init_host_cpu_co
        cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
  }
  
- void __kvm_enable_ssbs(void);
- static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
-                                      unsigned long hyp_stack_ptr,
-                                      unsigned long vector_ptr)
- {
-       /*
-        * Calculate the raw per-cpu offset without a translation from the
-        * kernel's mapping to the linear mapping, and store it in tpidr_el2
-        * so that we can use adr_l to access per-cpu variables in EL2.
-        */
-       u64 tpidr_el2 = ((u64)this_cpu_ptr(&kvm_host_data) -
-                        (u64)kvm_ksym_ref(kvm_host_data));
-       /*
-        * Call initialization code, and switch to the full blown HYP code.
-        * If the cpucaps haven't been finalized yet, something has gone very
-        * wrong, and hyp will crash and burn when it uses any
-        * cpus_have_const_cap() wrapper.
-        */
-       BUG_ON(!system_capabilities_finalized());
-       __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
-       /*
-        * Disabling SSBD on a non-VHE system requires us to enable SSBS
-        * at EL2.
-        */
-       if (!has_vhe() && this_cpu_has_cap(ARM64_SSBS) &&
-           arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
-               kvm_call_hyp(__kvm_enable_ssbs);
-       }
- }
  static inline bool kvm_arch_requires_vhe(void)
  {
        /*
        if (system_supports_sve())
                return true;
  
 -      /* Some implementations have defects that confine them to VHE */
 -      if (cpus_have_cap(ARM64_WORKAROUND_SPECULATIVE_AT_VHE))
 -              return true;
 -
        return false;
  }
  
@@@ -594,8 -566,6 +562,6 @@@ int kvm_arm_vcpu_arch_get_attr(struct k
  int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
                               struct kvm_device_attr *attr);
  
- static inline void __cpu_init_stage2(void) {}
  /* Guest/host FPSIMD coordination helpers */
  int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
  void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
@@@ -666,7 -636,7 +632,7 @@@ static inline int kvm_arm_have_ssbd(voi
  void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu);
  void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu);
  
 -void kvm_set_ipa_limit(void);
 +int kvm_set_ipa_limit(void);
  
  #define __KVM_HAVE_ARCH_VM_ALLOC
  struct kvm *kvm_arch_alloc_vm(void);
index 015883671ec34c841bd2aca5fe8d0d1ceab6d5de,4f67b0cdffe825341bbe0a9473616dc03ed91630..ce3080834bfaa02cdac350ee80861218513923e7
  #include <linux/compiler.h>
  #include <linux/kvm_host.h>
  #include <asm/alternative.h>
 -#include <asm/kvm_mmu.h>
  #include <asm/sysreg.h>
  
 -#define __hyp_text __section(.hyp.text) notrace
 +#define __hyp_text __section(.hyp.text) notrace __noscs
  
  #define read_sysreg_elx(r,nvh,vh)                                     \
        ({                                                              \
  
  int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
  
- void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
- void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
- void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
- void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
- void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
- void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
+ void __vgic_v3_save_state(struct vgic_v3_cpu_if *cpu_if);
+ void __vgic_v3_restore_state(struct vgic_v3_cpu_if *cpu_if);
+ void __vgic_v3_activate_traps(struct vgic_v3_cpu_if *cpu_if);
+ void __vgic_v3_deactivate_traps(struct vgic_v3_cpu_if *cpu_if);
+ void __vgic_v3_save_aprs(struct vgic_v3_cpu_if *cpu_if);
+ void __vgic_v3_restore_aprs(struct vgic_v3_cpu_if *cpu_if);
  int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu);
  
  void __timer_enable_traps(struct kvm_vcpu *vcpu);
@@@ -87,5 -88,22 +87,5 @@@ void deactivate_traps_vhe_put(void)
  u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
  void __noreturn __hyp_do_panic(unsigned long, ...);
  
 -/*
 - * Must be called from hyp code running at EL2 with an updated VTTBR
 - * and interrupts disabled.
 - */
 -static __always_inline void __hyp_text __load_guest_stage2(struct kvm *kvm)
 -{
 -      write_sysreg(kvm->arch.vtcr, vtcr_el2);
 -      write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
 -
 -      /*
 -       * ARM errata 1165522 and 1530923 require the actual execution of the
 -       * above before we can switch to the EL1/EL0 translation regime used by
 -       * the guest.
 -       */
 -      asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
 -}
 -
  #endif /* __ARM64_KVM_HYP_H__ */
  
index 85da6befe76e392a9933de4b9d6da17bdcb5de6a,53bd4d517a4d4ef4785113f60313394d204b51e1..324c8483d2b900eac9d07a7987a46e48f91d2540
@@@ -363,8 -363,6 +363,6 @@@ static inline void __kvm_flush_dcache_p
        }
  }
  
- #define kvm_virt_to_phys(x)           __pa_symbol(x)
  void kvm_set_way_flush(struct kvm_vcpu *vcpu);
  void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
  
@@@ -416,7 -414,7 +414,7 @@@ static inline unsigned int kvm_get_vmid
  {
        int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
  
 -      return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 +      return get_vmid_bits(reg);
  }
  
  /*
@@@ -473,7 -471,7 +471,7 @@@ static inline int kvm_write_guest_lock(
  extern void *__kvm_bp_vect_base;
  extern int __kvm_harden_el2_vector_slot;
  
- /*  This is only called on a VHE system */
+ /*  This is called on both VHE and !VHE systems */
  static inline void *kvm_get_hyp_vector(void)
  {
        struct bp_hardening_data *data = arm64_get_bp_hardening_data();
@@@ -604,22 -602,5 +602,22 @@@ static __always_inline u64 kvm_get_vttb
        return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
  }
  
 +/*
 + * Must be called from hyp code running at EL2 with an updated VTTBR
 + * and interrupts disabled.
 + */
 +static __always_inline void __load_guest_stage2(struct kvm *kvm)
 +{
 +      write_sysreg(kvm->arch.vtcr, vtcr_el2);
 +      write_sysreg(kvm_get_vttbr(kvm), vttbr_el2);
 +
 +      /*
 +       * ARM errata 1165522 and 1530923 require the actual execution of the
 +       * above before we can switch to the EL1/EL0 translation regime used by
 +       * the guest.
 +       */
 +      asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 +}
 +
  #endif /* __ASSEMBLY__ */
  #endif /* __ARM64_KVM_MMU_H__ */
index 3539d7092612760cba39c071e5b3f4417fc6e51f,a27e0cd731e918e3ee91f277dd9df0aa9099cbf3..0577e21422845fa3d5bbe5c5e8bd6fd784e8bbec
@@@ -33,10 -33,6 +33,10 @@@ int main(void
    DEFINE(TSK_TI_ADDR_LIMIT,   offsetof(struct task_struct, thread_info.addr_limit));
  #ifdef CONFIG_ARM64_SW_TTBR0_PAN
    DEFINE(TSK_TI_TTBR0,                offsetof(struct task_struct, thread_info.ttbr0));
 +#endif
 +#ifdef CONFIG_SHADOW_CALL_STACK
 +  DEFINE(TSK_TI_SCS_BASE,     offsetof(struct task_struct, thread_info.scs_base));
 +  DEFINE(TSK_TI_SCS_SP,               offsetof(struct task_struct, thread_info.scs_sp));
  #endif
    DEFINE(TSK_STACK,           offsetof(struct task_struct, stack));
  #ifdef CONFIG_STACKPROTECTOR
    BLANK();
    DEFINE(CPU_BOOT_STACK,      offsetof(struct secondary_data, stack));
    DEFINE(CPU_BOOT_TASK,               offsetof(struct secondary_data, task));
 -#ifdef CONFIG_ARM64_PTR_AUTH
 -  DEFINE(CPU_BOOT_PTRAUTH_KEY,        offsetof(struct secondary_data, ptrauth_key));
 -#endif
    BLANK();
- #ifdef CONFIG_KVM_ARM_HOST
+ #ifdef CONFIG_KVM
    DEFINE(VCPU_CONTEXT,                offsetof(struct kvm_vcpu, arch.ctxt));
    DEFINE(VCPU_FAULT_DISR,     offsetof(struct kvm_vcpu, arch.fault.disr_el1));
    DEFINE(VCPU_WORKAROUND_FLAGS,       offsetof(struct kvm_vcpu, arch.workaround_flags));
index b0ce6bf14f6a92c8746b008e9f3481e4ee0c162f,a102321fc8a23bf92686be5d4c13ddf654b91217..ad06d6802d2eecf82bb0e646c622a09c5e07ec43
@@@ -234,7 -234,7 +234,7 @@@ static int detect_harden_bp_fw(void
                smccc_end = NULL;
                break;
  
- #if IS_ENABLED(CONFIG_KVM_ARM_HOST)
+ #if IS_ENABLED(CONFIG_KVM)
        case SMCCC_CONDUIT_SMC:
                cb = call_smc_arch_workaround_1;
                smccc_start = __smccc_workaround_1_smc;
@@@ -635,7 -635,7 +635,7 @@@ has_neoverse_n1_erratum_1542419(const s
        return is_midr_in_range(midr, &range) && has_dic;
  }
  
 -#if defined(CONFIG_HARDEN_EL2_VECTORS) || defined(CONFIG_ARM64_ERRATUM_1319367)
 +#if defined(CONFIG_HARDEN_EL2_VECTORS)
  
  static const struct midr_range ca57_a72[] = {
        MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
@@@ -757,16 -757,12 +757,16 @@@ static const struct arm64_cpu_capabilit
  };
  #endif
  
 -#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
 -static const struct midr_range erratum_speculative_at_vhe_list[] = {
 +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
 +static const struct midr_range erratum_speculative_at_list[] = {
  #ifdef CONFIG_ARM64_ERRATUM_1165522
        /* Cortex A76 r0p0 to r2p0 */
        MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
  #endif
 +#ifdef CONFIG_ARM64_ERRATUM_1319367
 +      MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 +      MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 +#endif
  #ifdef CONFIG_ARM64_ERRATUM_1530923
        /* Cortex A55 r0p0 to r2p0 */
        MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
  const struct arm64_cpu_capabilities arm64_errata[] = {
  #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
        {
 -              .desc = "ARM errata 826319, 827319, 824069, 819472",
 +              .desc = "ARM errata 826319, 827319, 824069, or 819472",
                .capability = ARM64_WORKAROUND_CLEAN_CACHE,
                ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
                .cpu_enable = cpu_enable_cache_maint_trap,
  #endif
  #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
        {
 -              .desc = "Qualcomm erratum 1009, ARM erratum 1286807",
 +              .desc = "Qualcomm erratum 1009, or ARM erratum 1286807",
                .capability = ARM64_WORKAROUND_REPEAT_TLBI,
                .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
                .matches = cpucap_multi_entry_cap_matches,
                ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
        },
  #endif
 -#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT_VHE
 +#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
        {
 -              .desc = "ARM errata 1165522, 1530923",
 -              .capability = ARM64_WORKAROUND_SPECULATIVE_AT_VHE,
 -              ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_vhe_list),
 +              .desc = "ARM errata 1165522, 1319367, or 1530923",
 +              .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
 +              ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
        },
  #endif
  #ifdef CONFIG_ARM64_ERRATUM_1463225
                .matches = has_neoverse_n1_erratum_1542419,
                .cpu_enable = cpu_enable_trap_ctr_access,
        },
 -#endif
 -#ifdef CONFIG_ARM64_ERRATUM_1319367
 -      {
 -              .desc = "ARM erratum 1319367",
 -              .capability = ARM64_WORKAROUND_SPECULATIVE_AT_NVHE,
 -              ERRATA_MIDR_RANGE_LIST(ca57_a72),
 -      },
  #endif
        {
        }
diff --combined arch/arm64/kernel/smp.c
index 04b1ca0d7aba3957da7b100123eda635b6637f9f,0a3045d9f33f1f7333e645bcdb7f253d7eb4bba4..4b6f4999d06ac51884b7bf0be89c8730f35dae47
@@@ -65,7 -65,7 +65,7 @@@ EXPORT_PER_CPU_SYMBOL(cpu_number)
   */
  struct secondary_data secondary_data;
  /* Number of CPUs which aren't online, but looping in kernel text. */
 -int cpus_stuck_in_kernel;
 +static int cpus_stuck_in_kernel;
  
  enum ipi_msg_type {
        IPI_RESCHEDULE,
@@@ -114,6 -114,10 +114,6 @@@ int __cpu_up(unsigned int cpu, struct t
         */
        secondary_data.task = idle;
        secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
 -#if defined(CONFIG_ARM64_PTR_AUTH)
 -      secondary_data.ptrauth_key.apia.lo = idle->thread.keys_kernel.apia.lo;
 -      secondary_data.ptrauth_key.apia.hi = idle->thread.keys_kernel.apia.hi;
 -#endif
        update_cpu_boot_status(CPU_MMU_OFF);
        __flush_dcache_area(&secondary_data, sizeof(secondary_data));
  
        pr_crit("CPU%u: failed to come online\n", cpu);
        secondary_data.task = NULL;
        secondary_data.stack = NULL;
 -#if defined(CONFIG_ARM64_PTR_AUTH)
 -      secondary_data.ptrauth_key.apia.lo = 0;
 -      secondary_data.ptrauth_key.apia.hi = 0;
 -#endif
        __flush_dcache_area(&secondary_data, sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
        if (status == CPU_MMU_OFF)
                panic("CPU%u detected unsupported configuration\n", cpu);
        }
  
 -      return ret;
 +      return -EIO;
  }
  
  static void init_gic_priority_masking(void)
@@@ -422,7 -430,7 +422,7 @@@ static void __init hyp_mode_check(void
                           "CPU: CPUs started in inconsistent modes");
        else
                pr_info("CPU: All CPU(s) started at EL1\n");
-       if (IS_ENABLED(CONFIG_KVM_ARM_HOST))
+       if (IS_ENABLED(CONFIG_KVM))
                kvm_compute_layout();
  }
  
diff --combined arch/arm64/kvm/arm.c
index 0000000000000000000000000000000000000000,b5282943b85490bb7fd76619cd31105d7562d273..7a57381c05e8fc046472f1e1cf1fdbe3d8fc2ac8
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1712 +1,1710 @@@
 -      kvm_set_ipa_limit();
 -
 -      return 0;
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+  * Author: Christoffer Dall <[email protected]>
+  */
+ #include <linux/bug.h>
+ #include <linux/cpu_pm.h>
+ #include <linux/errno.h>
+ #include <linux/err.h>
+ #include <linux/kvm_host.h>
+ #include <linux/list.h>
+ #include <linux/module.h>
+ #include <linux/vmalloc.h>
+ #include <linux/fs.h>
+ #include <linux/mman.h>
+ #include <linux/sched.h>
+ #include <linux/kvm.h>
+ #include <linux/kvm_irqfd.h>
+ #include <linux/irqbypass.h>
+ #include <linux/sched/stat.h>
+ #include <trace/events/kvm.h>
+ #define CREATE_TRACE_POINTS
+ #include "trace_arm.h"
+ #include <linux/uaccess.h>
+ #include <asm/ptrace.h>
+ #include <asm/mman.h>
+ #include <asm/tlbflush.h>
+ #include <asm/cacheflush.h>
+ #include <asm/cpufeature.h>
+ #include <asm/virt.h>
+ #include <asm/kvm_arm.h>
+ #include <asm/kvm_asm.h>
+ #include <asm/kvm_mmu.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_coproc.h>
+ #include <asm/sections.h>
+ #include <kvm/arm_hypercalls.h>
+ #include <kvm/arm_pmu.h>
+ #include <kvm/arm_psci.h>
+ #ifdef REQUIRES_VIRT
+ __asm__(".arch_extension      virt");
+ #endif
+ DEFINE_PER_CPU(kvm_host_data_t, kvm_host_data);
+ static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
+ /* The VMID used in the VTTBR */
+ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
+ static u32 kvm_next_vmid;
+ static DEFINE_SPINLOCK(kvm_vmid_lock);
+ static bool vgic_present;
+ static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
+ DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
+ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+ {
+       return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
+ }
+ int kvm_arch_hardware_setup(void *opaque)
+ {
+       return 0;
+ }
+ int kvm_arch_check_processor_compat(void *opaque)
+ {
+       return 0;
+ }
+ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
+                           struct kvm_enable_cap *cap)
+ {
+       int r;
+       if (cap->flags)
+               return -EINVAL;
+       switch (cap->cap) {
+       case KVM_CAP_ARM_NISV_TO_USER:
+               r = 0;
+               kvm->arch.return_nisv_io_abort_to_user = true;
+               break;
+       default:
+               r = -EINVAL;
+               break;
+       }
+       return r;
+ }
+ static int kvm_arm_default_max_vcpus(void)
+ {
+       return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
+ }
+ /**
+  * kvm_arch_init_vm - initializes a VM data structure
+  * @kvm:      pointer to the KVM struct
+  */
+ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+ {
+       int ret, cpu;
+       ret = kvm_arm_setup_stage2(kvm, type);
+       if (ret)
+               return ret;
+       kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran));
+       if (!kvm->arch.last_vcpu_ran)
+               return -ENOMEM;
+       for_each_possible_cpu(cpu)
+               *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1;
+       ret = kvm_alloc_stage2_pgd(kvm);
+       if (ret)
+               goto out_fail_alloc;
+       ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
+       if (ret)
+               goto out_free_stage2_pgd;
+       kvm_vgic_early_init(kvm);
+       /* Mark the initial VMID generation invalid */
+       kvm->arch.vmid.vmid_gen = 0;
+       /* The maximum number of VCPUs is limited by the host's GIC model */
+       kvm->arch.max_vcpus = kvm_arm_default_max_vcpus();
+       return ret;
+ out_free_stage2_pgd:
+       kvm_free_stage2_pgd(kvm);
+ out_fail_alloc:
+       free_percpu(kvm->arch.last_vcpu_ran);
+       kvm->arch.last_vcpu_ran = NULL;
+       return ret;
+ }
+ int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
+ {
+       return 0;
+ }
+ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+ {
+       return VM_FAULT_SIGBUS;
+ }
+ /**
+  * kvm_arch_destroy_vm - destroy the VM data structure
+  * @kvm:      pointer to the KVM struct
+  */
+ void kvm_arch_destroy_vm(struct kvm *kvm)
+ {
+       int i;
+       kvm_vgic_destroy(kvm);
+       free_percpu(kvm->arch.last_vcpu_ran);
+       kvm->arch.last_vcpu_ran = NULL;
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               if (kvm->vcpus[i]) {
+                       kvm_vcpu_destroy(kvm->vcpus[i]);
+                       kvm->vcpus[i] = NULL;
+               }
+       }
+       atomic_set(&kvm->online_vcpus, 0);
+ }
+ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
+ {
+       int r;
+       switch (ext) {
+       case KVM_CAP_IRQCHIP:
+               r = vgic_present;
+               break;
+       case KVM_CAP_IOEVENTFD:
+       case KVM_CAP_DEVICE_CTRL:
+       case KVM_CAP_USER_MEMORY:
+       case KVM_CAP_SYNC_MMU:
+       case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
+       case KVM_CAP_ONE_REG:
+       case KVM_CAP_ARM_PSCI:
+       case KVM_CAP_ARM_PSCI_0_2:
+       case KVM_CAP_READONLY_MEM:
+       case KVM_CAP_MP_STATE:
+       case KVM_CAP_IMMEDIATE_EXIT:
+       case KVM_CAP_VCPU_EVENTS:
+       case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2:
+       case KVM_CAP_ARM_NISV_TO_USER:
+       case KVM_CAP_ARM_INJECT_EXT_DABT:
+               r = 1;
+               break;
+       case KVM_CAP_ARM_SET_DEVICE_ADDR:
+               r = 1;
+               break;
+       case KVM_CAP_NR_VCPUS:
+               r = num_online_cpus();
+               break;
+       case KVM_CAP_MAX_VCPUS:
+       case KVM_CAP_MAX_VCPU_ID:
+               if (kvm)
+                       r = kvm->arch.max_vcpus;
+               else
+                       r = kvm_arm_default_max_vcpus();
+               break;
+       case KVM_CAP_MSI_DEVID:
+               if (!kvm)
+                       r = -EINVAL;
+               else
+                       r = kvm->arch.vgic.msis_require_devid;
+               break;
+       case KVM_CAP_ARM_USER_IRQ:
+               /*
+                * 1: EL1_VTIMER, EL1_PTIMER, and PMU.
+                * (bump this number if adding more devices)
+                */
+               r = 1;
+               break;
+       default:
+               r = kvm_arch_vm_ioctl_check_extension(kvm, ext);
+               break;
+       }
+       return r;
+ }
+ long kvm_arch_dev_ioctl(struct file *filp,
+                       unsigned int ioctl, unsigned long arg)
+ {
+       return -EINVAL;
+ }
+ struct kvm *kvm_arch_alloc_vm(void)
+ {
+       if (!has_vhe())
+               return kzalloc(sizeof(struct kvm), GFP_KERNEL);
+       return vzalloc(sizeof(struct kvm));
+ }
+ void kvm_arch_free_vm(struct kvm *kvm)
+ {
+       if (!has_vhe())
+               kfree(kvm);
+       else
+               vfree(kvm);
+ }
+ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
+ {
+       if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
+               return -EBUSY;
+       if (id >= kvm->arch.max_vcpus)
+               return -EINVAL;
+       return 0;
+ }
+ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+ {
+       int err;
+       /* Force users to call KVM_ARM_VCPU_INIT */
+       vcpu->arch.target = -1;
+       bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+       /* Set up the timer */
+       kvm_timer_vcpu_init(vcpu);
+       kvm_pmu_vcpu_init(vcpu);
+       kvm_arm_reset_debug_ptr(vcpu);
+       kvm_arm_pvtime_vcpu_init(&vcpu->arch);
+       err = kvm_vgic_vcpu_init(vcpu);
+       if (err)
+               return err;
+       return create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
+ }
+ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+ {
+ }
+ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+ {
+       if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm)))
+               static_branch_dec(&userspace_irqchip_in_use);
+       kvm_mmu_free_memory_caches(vcpu);
+       kvm_timer_vcpu_terminate(vcpu);
+       kvm_pmu_vcpu_destroy(vcpu);
+       kvm_arm_vcpu_destroy(vcpu);
+ }
+ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+ {
+       return kvm_timer_is_pending(vcpu);
+ }
+ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
+ {
+       /*
+        * If we're about to block (most likely because we've just hit a
+        * WFI), we need to sync back the state of the GIC CPU interface
+        * so that we have the latest PMR and group enables. This ensures
+        * that kvm_arch_vcpu_runnable has up-to-date data to decide
+        * whether we have pending interrupts.
+        *
+        * For the same reason, we want to tell GICv4 that we need
+        * doorbells to be signalled, should an interrupt become pending.
+        */
+       preempt_disable();
+       kvm_vgic_vmcr_sync(vcpu);
+       vgic_v4_put(vcpu, true);
+       preempt_enable();
+ }
+ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
+ {
+       preempt_disable();
+       vgic_v4_load(vcpu);
+       preempt_enable();
+ }
+ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+ {
+       int *last_ran;
+       kvm_host_data_t *cpu_data;
+       last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
+       cpu_data = this_cpu_ptr(&kvm_host_data);
+       /*
+        * We might get preempted before the vCPU actually runs, but
+        * over-invalidation doesn't affect correctness.
+        */
+       if (*last_ran != vcpu->vcpu_id) {
+               kvm_call_hyp(__kvm_tlb_flush_local_vmid, vcpu);
+               *last_ran = vcpu->vcpu_id;
+       }
+       vcpu->cpu = cpu;
+       vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
+       kvm_vgic_load(vcpu);
+       kvm_timer_vcpu_load(vcpu);
+       kvm_vcpu_load_sysregs(vcpu);
+       kvm_arch_vcpu_load_fp(vcpu);
+       kvm_vcpu_pmu_restore_guest(vcpu);
+       if (kvm_arm_is_pvtime_enabled(&vcpu->arch))
+               kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu);
+       if (single_task_running())
+               vcpu_clear_wfx_traps(vcpu);
+       else
+               vcpu_set_wfx_traps(vcpu);
+       vcpu_ptrauth_setup_lazy(vcpu);
+ }
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+ {
+       kvm_arch_vcpu_put_fp(vcpu);
+       kvm_vcpu_put_sysregs(vcpu);
+       kvm_timer_vcpu_put(vcpu);
+       kvm_vgic_put(vcpu);
+       kvm_vcpu_pmu_restore_host(vcpu);
+       vcpu->cpu = -1;
+ }
+ static void vcpu_power_off(struct kvm_vcpu *vcpu)
+ {
+       vcpu->arch.power_off = true;
+       kvm_make_request(KVM_REQ_SLEEP, vcpu);
+       kvm_vcpu_kick(vcpu);
+ }
+ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+ {
+       if (vcpu->arch.power_off)
+               mp_state->mp_state = KVM_MP_STATE_STOPPED;
+       else
+               mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+       return 0;
+ }
+ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+ {
+       int ret = 0;
+       switch (mp_state->mp_state) {
+       case KVM_MP_STATE_RUNNABLE:
+               vcpu->arch.power_off = false;
+               break;
+       case KVM_MP_STATE_STOPPED:
+               vcpu_power_off(vcpu);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       return ret;
+ }
+ /**
+  * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
+  * @v:                The VCPU pointer
+  *
+  * If the guest CPU is not waiting for interrupts or an interrupt line is
+  * asserted, the CPU is by definition runnable.
+  */
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+ {
+       bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
+       return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
+               && !v->arch.power_off && !v->arch.pause);
+ }
+ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
+ {
+       return vcpu_mode_priv(vcpu);
+ }
+ /* Just ensure a guest exit from a particular CPU */
+ static void exit_vm_noop(void *info)
+ {
+ }
+ void force_vm_exit(const cpumask_t *mask)
+ {
+       preempt_disable();
+       smp_call_function_many(mask, exit_vm_noop, NULL, true);
+       preempt_enable();
+ }
+ /**
+  * need_new_vmid_gen - check that the VMID is still valid
+  * @vmid: The VMID to check
+  *
+  * return true if there is a new generation of VMIDs being used
+  *
+  * The hardware supports a limited set of values with the value zero reserved
+  * for the host, so we check if an assigned value belongs to a previous
+  * generation, which requires us to assign a new value. If we're the first to
+  * use a VMID for the new generation, we must flush necessary caches and TLBs
+  * on all CPUs.
+  */
+ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
+ {
+       u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+       smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
+       return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
+ }
+ /**
+  * update_vmid - Update the vmid with a valid VMID for the current generation
+  * @kvm: The guest that struct vmid belongs to
+  * @vmid: The stage-2 VMID information struct
+  */
+ static void update_vmid(struct kvm_vmid *vmid)
+ {
+       if (!need_new_vmid_gen(vmid))
+               return;
+       spin_lock(&kvm_vmid_lock);
+       /*
+        * We need to re-check the vmid_gen here to ensure that if another vcpu
+        * already allocated a valid vmid for this vm, then this vcpu should
+        * use the same vmid.
+        */
+       if (!need_new_vmid_gen(vmid)) {
+               spin_unlock(&kvm_vmid_lock);
+               return;
+       }
+       /* First user of a new VMID generation? */
+       if (unlikely(kvm_next_vmid == 0)) {
+               atomic64_inc(&kvm_vmid_gen);
+               kvm_next_vmid = 1;
+               /*
+                * On SMP we know no other CPUs can use this CPU's or each
+                * other's VMID after force_vm_exit returns since the
+                * kvm_vmid_lock blocks them from reentry to the guest.
+                */
+               force_vm_exit(cpu_all_mask);
+               /*
+                * Now broadcast TLB + ICACHE invalidation over the inner
+                * shareable domain to make sure all data structures are
+                * clean.
+                */
+               kvm_call_hyp(__kvm_flush_vm_context);
+       }
+       vmid->vmid = kvm_next_vmid;
+       kvm_next_vmid++;
+       kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
+       smp_wmb();
+       WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
+       spin_unlock(&kvm_vmid_lock);
+ }
+ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
+ {
+       struct kvm *kvm = vcpu->kvm;
+       int ret = 0;
+       if (likely(vcpu->arch.has_run_once))
+               return 0;
+       if (!kvm_arm_vcpu_is_finalized(vcpu))
+               return -EPERM;
+       vcpu->arch.has_run_once = true;
+       if (likely(irqchip_in_kernel(kvm))) {
+               /*
+                * Map the VGIC hardware resources before running a vcpu the
+                * first time on this VM.
+                */
+               if (unlikely(!vgic_ready(kvm))) {
+                       ret = kvm_vgic_map_resources(kvm);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               /*
+                * Tell the rest of the code that there are userspace irqchip
+                * VMs in the wild.
+                */
+               static_branch_inc(&userspace_irqchip_in_use);
+       }
+       ret = kvm_timer_enable(vcpu);
+       if (ret)
+               return ret;
+       ret = kvm_arm_pmu_v3_enable(vcpu);
+       return ret;
+ }
+ bool kvm_arch_intc_initialized(struct kvm *kvm)
+ {
+       return vgic_initialized(kvm);
+ }
+ void kvm_arm_halt_guest(struct kvm *kvm)
+ {
+       int i;
+       struct kvm_vcpu *vcpu;
+       kvm_for_each_vcpu(i, vcpu, kvm)
+               vcpu->arch.pause = true;
+       kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP);
+ }
+ void kvm_arm_resume_guest(struct kvm *kvm)
+ {
+       int i;
+       struct kvm_vcpu *vcpu;
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               vcpu->arch.pause = false;
+               rcuwait_wake_up(kvm_arch_vcpu_get_wait(vcpu));
+       }
+ }
+ static void vcpu_req_sleep(struct kvm_vcpu *vcpu)
+ {
+       struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
+       rcuwait_wait_event(wait,
+                          (!vcpu->arch.power_off) &&(!vcpu->arch.pause),
+                          TASK_INTERRUPTIBLE);
+       if (vcpu->arch.power_off || vcpu->arch.pause) {
+               /* Awaken to handle a signal, request we sleep again later. */
+               kvm_make_request(KVM_REQ_SLEEP, vcpu);
+       }
+       /*
+        * Make sure we will observe a potential reset request if we've
+        * observed a change to the power state. Pairs with the smp_wmb() in
+        * kvm_psci_vcpu_on().
+        */
+       smp_rmb();
+ }
+ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+ {
+       return vcpu->arch.target >= 0;
+ }
+ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
+ {
+       if (kvm_request_pending(vcpu)) {
+               if (kvm_check_request(KVM_REQ_SLEEP, vcpu))
+                       vcpu_req_sleep(vcpu);
+               if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+                       kvm_reset_vcpu(vcpu);
+               /*
+                * Clear IRQ_PENDING requests that were made to guarantee
+                * that a VCPU sees new virtual interrupts.
+                */
+               kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu);
+               if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu))
+                       kvm_update_stolen_time(vcpu);
+               if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) {
+                       /* The distributor enable bits were changed */
+                       preempt_disable();
+                       vgic_v4_put(vcpu, false);
+                       vgic_v4_load(vcpu);
+                       preempt_enable();
+               }
+       }
+ }
+ /**
+  * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
+  * @vcpu:     The VCPU pointer
+  *
+  * This function is called through the VCPU_RUN ioctl called from user space. It
+  * will execute VM code in a loop until the time slice for the process is used
+  * or some emulation is needed from user space in which case the function will
+  * return with return value 0 and with the kvm_run structure filled in with the
+  * required data for the requested emulation.
+  */
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_run *run = vcpu->run;
+       int ret;
+       if (unlikely(!kvm_vcpu_initialized(vcpu)))
+               return -ENOEXEC;
+       ret = kvm_vcpu_first_run_init(vcpu);
+       if (ret)
+               return ret;
+       if (run->exit_reason == KVM_EXIT_MMIO) {
+               ret = kvm_handle_mmio_return(vcpu, run);
+               if (ret)
+                       return ret;
+       }
+       if (run->immediate_exit)
+               return -EINTR;
+       vcpu_load(vcpu);
+       kvm_sigset_activate(vcpu);
+       ret = 1;
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       while (ret > 0) {
+               /*
+                * Check conditions before entering the guest
+                */
+               cond_resched();
+               update_vmid(&vcpu->kvm->arch.vmid);
+               check_vcpu_requests(vcpu);
+               /*
+                * Preparing the interrupts to be injected also
+                * involves poking the GIC, which must be done in a
+                * non-preemptible context.
+                */
+               preempt_disable();
+               kvm_pmu_flush_hwstate(vcpu);
+               local_irq_disable();
+               kvm_vgic_flush_hwstate(vcpu);
+               /*
+                * Exit if we have a signal pending so that we can deliver the
+                * signal to user space.
+                */
+               if (signal_pending(current)) {
+                       ret = -EINTR;
+                       run->exit_reason = KVM_EXIT_INTR;
+               }
+               /*
+                * If we're using a userspace irqchip, then check if we need
+                * to tell a userspace irqchip about timer or PMU level
+                * changes and if so, exit to userspace (the actual level
+                * state gets updated in kvm_timer_update_run and
+                * kvm_pmu_update_run below).
+                */
+               if (static_branch_unlikely(&userspace_irqchip_in_use)) {
+                       if (kvm_timer_should_notify_user(vcpu) ||
+                           kvm_pmu_should_notify_user(vcpu)) {
+                               ret = -EINTR;
+                               run->exit_reason = KVM_EXIT_INTR;
+                       }
+               }
+               /*
+                * Ensure we set mode to IN_GUEST_MODE after we disable
+                * interrupts and before the final VCPU requests check.
+                * See the comment in kvm_vcpu_exiting_guest_mode() and
+                * Documentation/virt/kvm/vcpu-requests.rst
+                */
+               smp_store_mb(vcpu->mode, IN_GUEST_MODE);
+               if (ret <= 0 || need_new_vmid_gen(&vcpu->kvm->arch.vmid) ||
+                   kvm_request_pending(vcpu)) {
+                       vcpu->mode = OUTSIDE_GUEST_MODE;
+                       isb(); /* Ensure work in x_flush_hwstate is committed */
+                       kvm_pmu_sync_hwstate(vcpu);
+                       if (static_branch_unlikely(&userspace_irqchip_in_use))
+                               kvm_timer_sync_hwstate(vcpu);
+                       kvm_vgic_sync_hwstate(vcpu);
+                       local_irq_enable();
+                       preempt_enable();
+                       continue;
+               }
+               kvm_arm_setup_debug(vcpu);
+               /**************************************************************
+                * Enter the guest
+                */
+               trace_kvm_entry(*vcpu_pc(vcpu));
+               guest_enter_irqoff();
+               if (has_vhe()) {
+                       ret = kvm_vcpu_run_vhe(vcpu);
+               } else {
+                       ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
+               }
+               vcpu->mode = OUTSIDE_GUEST_MODE;
+               vcpu->stat.exits++;
+               /*
+                * Back from guest
+                *************************************************************/
+               kvm_arm_clear_debug(vcpu);
+               /*
+                * We must sync the PMU state before the vgic state so
+                * that the vgic can properly sample the updated state of the
+                * interrupt line.
+                */
+               kvm_pmu_sync_hwstate(vcpu);
+               /*
+                * Sync the vgic state before syncing the timer state because
+                * the timer code needs to know if the virtual timer
+                * interrupts are active.
+                */
+               kvm_vgic_sync_hwstate(vcpu);
+               /*
+                * Sync the timer hardware state before enabling interrupts as
+                * we don't want vtimer interrupts to race with syncing the
+                * timer virtual interrupt state.
+                */
+               if (static_branch_unlikely(&userspace_irqchip_in_use))
+                       kvm_timer_sync_hwstate(vcpu);
+               kvm_arch_vcpu_ctxsync_fp(vcpu);
+               /*
+                * We may have taken a host interrupt in HYP mode (ie
+                * while executing the guest). This interrupt is still
+                * pending, as we haven't serviced it yet!
+                *
+                * We're now back in SVC mode, with interrupts
+                * disabled.  Enabling the interrupts now will have
+                * the effect of taking the interrupt again, in SVC
+                * mode this time.
+                */
+               local_irq_enable();
+               /*
+                * We do local_irq_enable() before calling guest_exit() so
+                * that if a timer interrupt hits while running the guest we
+                * account that tick as being spent in the guest.  We enable
+                * preemption after calling guest_exit() so that if we get
+                * preempted we make sure ticks after that is not counted as
+                * guest time.
+                */
+               guest_exit();
+               trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
+               /* Exit types that need handling before we can be preempted */
+               handle_exit_early(vcpu, run, ret);
+               preempt_enable();
+               ret = handle_exit(vcpu, run, ret);
+       }
+       /* Tell userspace about in-kernel device output levels */
+       if (unlikely(!irqchip_in_kernel(vcpu->kvm))) {
+               kvm_timer_update_run(vcpu);
+               kvm_pmu_update_run(vcpu);
+       }
+       kvm_sigset_deactivate(vcpu);
+       vcpu_put(vcpu);
+       return ret;
+ }
+ static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level)
+ {
+       int bit_index;
+       bool set;
+       unsigned long *hcr;
+       if (number == KVM_ARM_IRQ_CPU_IRQ)
+               bit_index = __ffs(HCR_VI);
+       else /* KVM_ARM_IRQ_CPU_FIQ */
+               bit_index = __ffs(HCR_VF);
+       hcr = vcpu_hcr(vcpu);
+       if (level)
+               set = test_and_set_bit(bit_index, hcr);
+       else
+               set = test_and_clear_bit(bit_index, hcr);
+       /*
+        * If we didn't change anything, no need to wake up or kick other CPUs
+        */
+       if (set == level)
+               return 0;
+       /*
+        * The vcpu irq_lines field was updated, wake up sleeping VCPUs and
+        * trigger a world-switch round on the running physical CPU to set the
+        * virtual IRQ/FIQ fields in the HCR appropriately.
+        */
+       kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+       kvm_vcpu_kick(vcpu);
+       return 0;
+ }
+ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
+                         bool line_status)
+ {
+       u32 irq = irq_level->irq;
+       unsigned int irq_type, vcpu_idx, irq_num;
+       int nrcpus = atomic_read(&kvm->online_vcpus);
+       struct kvm_vcpu *vcpu = NULL;
+       bool level = irq_level->level;
+       irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK;
+       vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK;
+       vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1);
+       irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK;
+       trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level);
+       switch (irq_type) {
+       case KVM_ARM_IRQ_TYPE_CPU:
+               if (irqchip_in_kernel(kvm))
+                       return -ENXIO;
+               if (vcpu_idx >= nrcpus)
+                       return -EINVAL;
+               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+               if (!vcpu)
+                       return -EINVAL;
+               if (irq_num > KVM_ARM_IRQ_CPU_FIQ)
+                       return -EINVAL;
+               return vcpu_interrupt_line(vcpu, irq_num, level);
+       case KVM_ARM_IRQ_TYPE_PPI:
+               if (!irqchip_in_kernel(kvm))
+                       return -ENXIO;
+               if (vcpu_idx >= nrcpus)
+                       return -EINVAL;
+               vcpu = kvm_get_vcpu(kvm, vcpu_idx);
+               if (!vcpu)
+                       return -EINVAL;
+               if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS)
+                       return -EINVAL;
+               return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL);
+       case KVM_ARM_IRQ_TYPE_SPI:
+               if (!irqchip_in_kernel(kvm))
+                       return -ENXIO;
+               if (irq_num < VGIC_NR_PRIVATE_IRQS)
+                       return -EINVAL;
+               return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL);
+       }
+       return -EINVAL;
+ }
+ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu,
+                              const struct kvm_vcpu_init *init)
+ {
+       unsigned int i, ret;
+       int phys_target = kvm_target_cpu();
+       if (init->target != phys_target)
+               return -EINVAL;
+       /*
+        * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+        * use the same target.
+        */
+       if (vcpu->arch.target != -1 && vcpu->arch.target != init->target)
+               return -EINVAL;
+       /* -ENOENT for unknown features, -EINVAL for invalid combinations. */
+       for (i = 0; i < sizeof(init->features) * 8; i++) {
+               bool set = (init->features[i / 32] & (1 << (i % 32)));
+               if (set && i >= KVM_VCPU_MAX_FEATURES)
+                       return -ENOENT;
+               /*
+                * Secondary and subsequent calls to KVM_ARM_VCPU_INIT must
+                * use the same feature set.
+                */
+               if (vcpu->arch.target != -1 && i < KVM_VCPU_MAX_FEATURES &&
+                   test_bit(i, vcpu->arch.features) != set)
+                       return -EINVAL;
+               if (set)
+                       set_bit(i, vcpu->arch.features);
+       }
+       vcpu->arch.target = phys_target;
+       /* Now we know what it is, we can reset it. */
+       ret = kvm_reset_vcpu(vcpu);
+       if (ret) {
+               vcpu->arch.target = -1;
+               bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES);
+       }
+       return ret;
+ }
+ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
+                                        struct kvm_vcpu_init *init)
+ {
+       int ret;
+       ret = kvm_vcpu_set_target(vcpu, init);
+       if (ret)
+               return ret;
+       /*
+        * Ensure a rebooted VM will fault in RAM pages and detect if the
+        * guest MMU is turned off and flush the caches as needed.
+        *
+        * S2FWB enforces all memory accesses to RAM being cacheable, we
+        * ensure that the cache is always coherent.
+        */
+       if (vcpu->arch.has_run_once && !cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
+               stage2_unmap_vm(vcpu->kvm);
+       vcpu_reset_hcr(vcpu);
+       /*
+        * Handle the "start in power-off" case.
+        */
+       if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
+               vcpu_power_off(vcpu);
+       else
+               vcpu->arch.power_off = false;
+       return 0;
+ }
+ static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+ {
+       int ret = -ENXIO;
+       switch (attr->group) {
+       default:
+               ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
+               break;
+       }
+       return ret;
+ }
+ static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+ {
+       int ret = -ENXIO;
+       switch (attr->group) {
+       default:
+               ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
+               break;
+       }
+       return ret;
+ }
+ static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
+                                struct kvm_device_attr *attr)
+ {
+       int ret = -ENXIO;
+       switch (attr->group) {
+       default:
+               ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
+               break;
+       }
+       return ret;
+ }
+ static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
+                                  struct kvm_vcpu_events *events)
+ {
+       memset(events, 0, sizeof(*events));
+       return __kvm_arm_vcpu_get_events(vcpu, events);
+ }
+ static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
+                                  struct kvm_vcpu_events *events)
+ {
+       int i;
+       /* check whether the reserved field is zero */
+       for (i = 0; i < ARRAY_SIZE(events->reserved); i++)
+               if (events->reserved[i])
+                       return -EINVAL;
+       /* check whether the pad field is zero */
+       for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++)
+               if (events->exception.pad[i])
+                       return -EINVAL;
+       return __kvm_arm_vcpu_set_events(vcpu, events);
+ }
+ long kvm_arch_vcpu_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+ {
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       struct kvm_device_attr attr;
+       long r;
+       switch (ioctl) {
+       case KVM_ARM_VCPU_INIT: {
+               struct kvm_vcpu_init init;
+               r = -EFAULT;
+               if (copy_from_user(&init, argp, sizeof(init)))
+                       break;
+               r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, &init);
+               break;
+       }
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+               r = -ENOEXEC;
+               if (unlikely(!kvm_vcpu_initialized(vcpu)))
+                       break;
+               r = -EFAULT;
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       break;
+               if (ioctl == KVM_SET_ONE_REG)
+                       r = kvm_arm_set_reg(vcpu, &reg);
+               else
+                       r = kvm_arm_get_reg(vcpu, &reg);
+               break;
+       }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               struct kvm_reg_list reg_list;
+               unsigned n;
+               r = -ENOEXEC;
+               if (unlikely(!kvm_vcpu_initialized(vcpu)))
+                       break;
+               r = -EPERM;
+               if (!kvm_arm_vcpu_is_finalized(vcpu))
+                       break;
+               r = -EFAULT;
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       break;
+               n = reg_list.n;
+               reg_list.n = kvm_arm_num_regs(vcpu);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       break;
+               r = -E2BIG;
+               if (n < reg_list.n)
+                       break;
+               r = kvm_arm_copy_reg_indices(vcpu, user_list->reg);
+               break;
+       }
+       case KVM_SET_DEVICE_ATTR: {
+               r = -EFAULT;
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       break;
+               r = kvm_arm_vcpu_set_attr(vcpu, &attr);
+               break;
+       }
+       case KVM_GET_DEVICE_ATTR: {
+               r = -EFAULT;
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       break;
+               r = kvm_arm_vcpu_get_attr(vcpu, &attr);
+               break;
+       }
+       case KVM_HAS_DEVICE_ATTR: {
+               r = -EFAULT;
+               if (copy_from_user(&attr, argp, sizeof(attr)))
+                       break;
+               r = kvm_arm_vcpu_has_attr(vcpu, &attr);
+               break;
+       }
+       case KVM_GET_VCPU_EVENTS: {
+               struct kvm_vcpu_events events;
+               if (kvm_arm_vcpu_get_events(vcpu, &events))
+                       return -EINVAL;
+               if (copy_to_user(argp, &events, sizeof(events)))
+                       return -EFAULT;
+               return 0;
+       }
+       case KVM_SET_VCPU_EVENTS: {
+               struct kvm_vcpu_events events;
+               if (copy_from_user(&events, argp, sizeof(events)))
+                       return -EFAULT;
+               return kvm_arm_vcpu_set_events(vcpu, &events);
+       }
+       case KVM_ARM_VCPU_FINALIZE: {
+               int what;
+               if (!kvm_vcpu_initialized(vcpu))
+                       return -ENOEXEC;
+               if (get_user(what, (const int __user *)argp))
+                       return -EFAULT;
+               return kvm_arm_vcpu_finalize(vcpu, what);
+       }
+       default:
+               r = -EINVAL;
+       }
+       return r;
+ }
+ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
+ {
+ }
+ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
+                                       struct kvm_memory_slot *memslot)
+ {
+       kvm_flush_remote_tlbs(kvm);
+ }
+ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
+                                       struct kvm_arm_device_addr *dev_addr)
+ {
+       unsigned long dev_id, type;
+       dev_id = (dev_addr->id & KVM_ARM_DEVICE_ID_MASK) >>
+               KVM_ARM_DEVICE_ID_SHIFT;
+       type = (dev_addr->id & KVM_ARM_DEVICE_TYPE_MASK) >>
+               KVM_ARM_DEVICE_TYPE_SHIFT;
+       switch (dev_id) {
+       case KVM_ARM_DEVICE_VGIC_V2:
+               if (!vgic_present)
+                       return -ENXIO;
+               return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
+       default:
+               return -ENODEV;
+       }
+ }
+ long kvm_arch_vm_ioctl(struct file *filp,
+                      unsigned int ioctl, unsigned long arg)
+ {
+       struct kvm *kvm = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       switch (ioctl) {
+       case KVM_CREATE_IRQCHIP: {
+               int ret;
+               if (!vgic_present)
+                       return -ENXIO;
+               mutex_lock(&kvm->lock);
+               ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
+               mutex_unlock(&kvm->lock);
+               return ret;
+       }
+       case KVM_ARM_SET_DEVICE_ADDR: {
+               struct kvm_arm_device_addr dev_addr;
+               if (copy_from_user(&dev_addr, argp, sizeof(dev_addr)))
+                       return -EFAULT;
+               return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr);
+       }
+       case KVM_ARM_PREFERRED_TARGET: {
+               int err;
+               struct kvm_vcpu_init init;
+               err = kvm_vcpu_preferred_target(&init);
+               if (err)
+                       return err;
+               if (copy_to_user(argp, &init, sizeof(init)))
+                       return -EFAULT;
+               return 0;
+       }
+       default:
+               return -EINVAL;
+       }
+ }
+ static void cpu_init_hyp_mode(void)
+ {
+       phys_addr_t pgd_ptr;
+       unsigned long hyp_stack_ptr;
+       unsigned long vector_ptr;
+       unsigned long tpidr_el2;
+       /* Switch from the HYP stub to our own HYP init vector */
+       __hyp_set_vectors(kvm_get_idmap_vector());
+       /*
+        * Calculate the raw per-cpu offset without a translation from the
+        * kernel's mapping to the linear mapping, and store it in tpidr_el2
+        * so that we can use adr_l to access per-cpu variables in EL2.
+        */
+       tpidr_el2 = ((unsigned long)this_cpu_ptr(&kvm_host_data) -
+                    (unsigned long)kvm_ksym_ref(kvm_host_data));
+       pgd_ptr = kvm_mmu_get_httbr();
+       hyp_stack_ptr = __this_cpu_read(kvm_arm_hyp_stack_page) + PAGE_SIZE;
+       vector_ptr = (unsigned long)kvm_get_hyp_vector();
+       /*
+        * Call initialization code, and switch to the full blown HYP code.
+        * If the cpucaps haven't been finalized yet, something has gone very
+        * wrong, and hyp will crash and burn when it uses any
+        * cpus_have_const_cap() wrapper.
+        */
+       BUG_ON(!system_capabilities_finalized());
+       __kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr, tpidr_el2);
+       /*
+        * Disabling SSBD on a non-VHE system requires us to enable SSBS
+        * at EL2.
+        */
+       if (this_cpu_has_cap(ARM64_SSBS) &&
+           arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
+               kvm_call_hyp(__kvm_enable_ssbs);
+       }
+ }
+ static void cpu_hyp_reset(void)
+ {
+       if (!is_kernel_in_hyp_mode())
+               __hyp_reset_vectors();
+ }
+ static void cpu_hyp_reinit(void)
+ {
+       kvm_init_host_cpu_context(&this_cpu_ptr(&kvm_host_data)->host_ctxt);
+       cpu_hyp_reset();
+       if (is_kernel_in_hyp_mode())
+               kvm_timer_init_vhe();
+       else
+               cpu_init_hyp_mode();
+       kvm_arm_init_debug();
+       if (vgic_present)
+               kvm_vgic_init_cpu_hardware();
+ }
+ static void _kvm_arch_hardware_enable(void *discard)
+ {
+       if (!__this_cpu_read(kvm_arm_hardware_enabled)) {
+               cpu_hyp_reinit();
+               __this_cpu_write(kvm_arm_hardware_enabled, 1);
+       }
+ }
+ int kvm_arch_hardware_enable(void)
+ {
+       _kvm_arch_hardware_enable(NULL);
+       return 0;
+ }
+ static void _kvm_arch_hardware_disable(void *discard)
+ {
+       if (__this_cpu_read(kvm_arm_hardware_enabled)) {
+               cpu_hyp_reset();
+               __this_cpu_write(kvm_arm_hardware_enabled, 0);
+       }
+ }
+ void kvm_arch_hardware_disable(void)
+ {
+       _kvm_arch_hardware_disable(NULL);
+ }
+ #ifdef CONFIG_CPU_PM
+ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
+                                   unsigned long cmd,
+                                   void *v)
+ {
+       /*
+        * kvm_arm_hardware_enabled is left with its old value over
+        * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should
+        * re-enable hyp.
+        */
+       switch (cmd) {
+       case CPU_PM_ENTER:
+               if (__this_cpu_read(kvm_arm_hardware_enabled))
+                       /*
+                        * don't update kvm_arm_hardware_enabled here
+                        * so that the hardware will be re-enabled
+                        * when we resume. See below.
+                        */
+                       cpu_hyp_reset();
+               return NOTIFY_OK;
+       case CPU_PM_ENTER_FAILED:
+       case CPU_PM_EXIT:
+               if (__this_cpu_read(kvm_arm_hardware_enabled))
+                       /* The hardware was enabled before suspend. */
+                       cpu_hyp_reinit();
+               return NOTIFY_OK;
+       default:
+               return NOTIFY_DONE;
+       }
+ }
+ static struct notifier_block hyp_init_cpu_pm_nb = {
+       .notifier_call = hyp_init_cpu_pm_notifier,
+ };
+ static void __init hyp_cpu_pm_init(void)
+ {
+       cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
+ }
+ static void __init hyp_cpu_pm_exit(void)
+ {
+       cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb);
+ }
+ #else
+ static inline void hyp_cpu_pm_init(void)
+ {
+ }
+ static inline void hyp_cpu_pm_exit(void)
+ {
+ }
+ #endif
+ static int init_common_resources(void)
+ {
++      return kvm_set_ipa_limit();
+ }
+ static int init_subsystems(void)
+ {
+       int err = 0;
+       /*
+        * Enable hardware so that subsystem initialisation can access EL2.
+        */
+       on_each_cpu(_kvm_arch_hardware_enable, NULL, 1);
+       /*
+        * Register CPU lower-power notifier
+        */
+       hyp_cpu_pm_init();
+       /*
+        * Init HYP view of VGIC
+        */
+       err = kvm_vgic_hyp_init();
+       switch (err) {
+       case 0:
+               vgic_present = true;
+               break;
+       case -ENODEV:
+       case -ENXIO:
+               vgic_present = false;
+               err = 0;
+               break;
+       default:
+               goto out;
+       }
+       /*
+        * Init HYP architected timer support
+        */
+       err = kvm_timer_hyp_init(vgic_present);
+       if (err)
+               goto out;
+       kvm_perf_init();
+       kvm_coproc_table_init();
+ out:
+       on_each_cpu(_kvm_arch_hardware_disable, NULL, 1);
+       return err;
+ }
+ static void teardown_hyp_mode(void)
+ {
+       int cpu;
+       free_hyp_pgds();
+       for_each_possible_cpu(cpu)
+               free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+ }
+ /**
+  * Inits Hyp-mode on all online CPUs
+  */
+ static int init_hyp_mode(void)
+ {
+       int cpu;
+       int err = 0;
+       /*
+        * Allocate Hyp PGD and setup Hyp identity mapping
+        */
+       err = kvm_mmu_init();
+       if (err)
+               goto out_err;
+       /*
+        * Allocate stack pages for Hypervisor-mode
+        */
+       for_each_possible_cpu(cpu) {
+               unsigned long stack_page;
+               stack_page = __get_free_page(GFP_KERNEL);
+               if (!stack_page) {
+                       err = -ENOMEM;
+                       goto out_err;
+               }
+               per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
+       }
+       /*
+        * Map the Hyp-code called directly from the host
+        */
+       err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
+                                 kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
+       if (err) {
+               kvm_err("Cannot map world-switch code\n");
+               goto out_err;
+       }
+       err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
+                                 kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
+       if (err) {
+               kvm_err("Cannot map rodata section\n");
+               goto out_err;
+       }
+       err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
+                                 kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
+       if (err) {
+               kvm_err("Cannot map bss section\n");
+               goto out_err;
+       }
+       err = kvm_map_vectors();
+       if (err) {
+               kvm_err("Cannot map vectors\n");
+               goto out_err;
+       }
+       /*
+        * Map the Hyp stack pages
+        */
+       for_each_possible_cpu(cpu) {
+               char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
+               err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
+                                         PAGE_HYP);
+               if (err) {
+                       kvm_err("Cannot map hyp stack\n");
+                       goto out_err;
+               }
+       }
+       for_each_possible_cpu(cpu) {
+               kvm_host_data_t *cpu_data;
+               cpu_data = per_cpu_ptr(&kvm_host_data, cpu);
+               err = create_hyp_mappings(cpu_data, cpu_data + 1, PAGE_HYP);
+               if (err) {
+                       kvm_err("Cannot map host CPU state: %d\n", err);
+                       goto out_err;
+               }
+       }
+       err = hyp_map_aux_data();
+       if (err)
+               kvm_err("Cannot map host auxiliary data: %d\n", err);
+       return 0;
+ out_err:
+       teardown_hyp_mode();
+       kvm_err("error initializing Hyp mode: %d\n", err);
+       return err;
+ }
+ static void check_kvm_target_cpu(void *ret)
+ {
+       *(int *)ret = kvm_target_cpu();
+ }
+ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
+ {
+       struct kvm_vcpu *vcpu;
+       int i;
+       mpidr &= MPIDR_HWID_BITMASK;
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu))
+                       return vcpu;
+       }
+       return NULL;
+ }
+ bool kvm_arch_has_irq_bypass(void)
+ {
+       return true;
+ }
+ int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+                                     struct irq_bypass_producer *prod)
+ {
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+       return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
+                                         &irqfd->irq_entry);
+ }
+ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+                                     struct irq_bypass_producer *prod)
+ {
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+       kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
+                                    &irqfd->irq_entry);
+ }
+ void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
+ {
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+       kvm_arm_halt_guest(irqfd->kvm);
+ }
+ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
+ {
+       struct kvm_kernel_irqfd *irqfd =
+               container_of(cons, struct kvm_kernel_irqfd, consumer);
+       kvm_arm_resume_guest(irqfd->kvm);
+ }
+ /**
+  * Initialize Hyp-mode and memory mappings on all CPUs.
+  */
+ int kvm_arch_init(void *opaque)
+ {
+       int err;
+       int ret, cpu;
+       bool in_hyp_mode;
+       if (!is_hyp_mode_available()) {
+               kvm_info("HYP mode not available\n");
+               return -ENODEV;
+       }
+       in_hyp_mode = is_kernel_in_hyp_mode();
+       if (!in_hyp_mode && kvm_arch_requires_vhe()) {
+               kvm_pr_unimpl("CPU unsupported in non-VHE mode, not initializing\n");
+               return -ENODEV;
+       }
+       for_each_online_cpu(cpu) {
+               smp_call_function_single(cpu, check_kvm_target_cpu, &ret, 1);
+               if (ret < 0) {
+                       kvm_err("Error, CPU %d not supported!\n", cpu);
+                       return -ENODEV;
+               }
+       }
+       err = init_common_resources();
+       if (err)
+               return err;
+       err = kvm_arm_init_sve();
+       if (err)
+               return err;
+       if (!in_hyp_mode) {
+               err = init_hyp_mode();
+               if (err)
+                       goto out_err;
+       }
+       err = init_subsystems();
+       if (err)
+               goto out_hyp;
+       if (in_hyp_mode)
+               kvm_info("VHE mode initialized successfully\n");
+       else
+               kvm_info("Hyp mode initialized successfully\n");
+       return 0;
+ out_hyp:
+       hyp_cpu_pm_exit();
+       if (!in_hyp_mode)
+               teardown_hyp_mode();
+ out_err:
+       return err;
+ }
+ /* NOP: Compiling as a module not supported */
+ void kvm_arch_exit(void)
+ {
+       kvm_perf_teardown();
+ }
+ static int arm_init(void)
+ {
+       int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+       return rc;
+ }
+ module_init(arm_init);
index 1336e6f0acdf1b84d8375ef131f29212a75b7c7e,c07a45643cd41e2920b2043a66e91ebf133762dc..676b6585e5ae406ee9766e49047f8304bcb05037
@@@ -138,7 -138,7 +138,7 @@@ static void __hyp_text __activate_traps
  
        write_sysreg(val, cptr_el2);
  
 -      if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
 +      if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
  
                isb();
@@@ -181,7 -181,7 +181,7 @@@ static void deactivate_traps_vhe(void
         * above before we can switch to the EL2/EL0 translation regime used by
         * the host.
         */
 -      asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT_VHE));
 +      asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
  
        write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
        write_sysreg(vectors, vbar_el1);
@@@ -192,7 -192,7 +192,7 @@@ static void __hyp_text __deactivate_tra
  {
        u64 mdcr_el2 = read_sysreg(mdcr_el2);
  
 -      if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT_NVHE)) {
 +      if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
                u64 val;
  
                /*
@@@ -270,8 -270,8 +270,8 @@@ static void __hyp_text __deactivate_vm(
  static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
  {
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
-               __vgic_v3_save_state(vcpu);
-               __vgic_v3_deactivate_traps(vcpu);
+               __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
+               __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
        }
  }
  
  static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
  {
        if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
-               __vgic_v3_activate_traps(vcpu);
-               __vgic_v3_restore_state(vcpu);
+               __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
+               __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
        }
  }
  
diff --combined arch/arm64/kvm/reset.c
index 70cd7bcca4332537f053582097800284d1346134,865c8aa670bc2b9065e6fd4c962ee57bc48eea70..d3b2090237274f8ffe8f4d89de92074390d4075c
@@@ -36,16 -36,20 +36,12 @@@ static u32 kvm_ipa_limit
  /*
   * ARMv8 Reset Values
   */
- static const struct kvm_regs default_regs_reset = {
-       .regs.pstate = (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT |
-                       PSR_F_BIT | PSR_D_BIT),
- };
+ #define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
+                                PSR_F_BIT | PSR_D_BIT)
  
- static const struct kvm_regs default_regs_reset32 = {
-       .regs.pstate = (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT |
-                       PSR_AA32_I_BIT | PSR_AA32_F_BIT),
- };
+ #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
+                                PSR_AA32_I_BIT | PSR_AA32_F_BIT)
  
 -static bool cpu_has_32bit_el1(void)
 -{
 -      u64 pfr0;
 -
 -      pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
 -      return !!(pfr0 & 0x20);
 -}
 -
  /**
   * kvm_arch_vm_ioctl_check_extension
   *
@@@ -58,7 -62,7 +54,7 @@@ int kvm_arch_vm_ioctl_check_extension(s
  
        switch (ext) {
        case KVM_CAP_ARM_EL1_32BIT:
 -              r = cpu_has_32bit_el1();
 +              r = cpus_have_const_cap(ARM64_HAS_32BIT_EL1);
                break;
        case KVM_CAP_GUEST_DEBUG_HW_BPS:
                r = get_num_brps();
@@@ -155,7 -159,7 +151,7 @@@ static int kvm_vcpu_finalize_sve(struc
        vl = vcpu->arch.sve_max_vl;
  
        /*
-        * Resposibility for these properties is shared between
+        * Responsibility for these properties is shared between
         * kvm_arm_init_arch_resources(), kvm_vcpu_enable_sve() and
         * set_sve_vls().  Double-check here just to be sure:
         */
@@@ -241,7 -245,7 +237,7 @@@ static int kvm_vcpu_enable_ptrauth(stru
   * ioctl or as part of handling a request issued by another VCPU in the PSCI
   * handling code.  In the first case, the VCPU will not be loaded, and in the
   * second case the VCPU will be loaded.  Because this function operates purely
-  * on the memory-backed valus of system registers, we want to do a full put if
+  * on the memory-backed values of system registers, we want to do a full put if
   * we were loaded (handling a request) and load the values back at the end of
   * the function.  Otherwise we leave the state alone.  In both cases, we
   * disable preemption around the vcpu reset as we would otherwise race with
   */
  int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
  {
-       const struct kvm_regs *cpu_reset;
        int ret = -EINVAL;
        bool loaded;
+       u32 pstate;
  
        /* Reset PMU outside of the non-preemptible section */
        kvm_pmu_vcpu_reset(vcpu);
        switch (vcpu->arch.target) {
        default:
                if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) {
 -                      if (!cpu_has_32bit_el1())
 +                      if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1))
                                goto out;
-                       cpu_reset = &default_regs_reset32;
+                       pstate = VCPU_RESET_PSTATE_SVC;
                } else {
-                       cpu_reset = &default_regs_reset;
+                       pstate = VCPU_RESET_PSTATE_EL1;
                }
  
                break;
        }
  
        /* Reset core registers */
-       memcpy(vcpu_gp_regs(vcpu), cpu_reset, sizeof(*cpu_reset));
+       memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
+       vcpu_gp_regs(vcpu)->regs.pstate = pstate;
  
        /* Reset system registers */
        kvm_reset_sys_regs(vcpu);
@@@ -332,50 -337,11 +329,50 @@@ out
        return ret;
  }
  
 -void kvm_set_ipa_limit(void)
 +u32 get_kvm_ipa_limit(void)
 +{
 +      return kvm_ipa_limit;
 +}
 +
 +int kvm_set_ipa_limit(void)
  {
 -      unsigned int ipa_max, pa_max, va_max, parange;
 +      unsigned int ipa_max, pa_max, va_max, parange, tgran_2;
 +      u64 mmfr0;
 +
 +      mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
 +      parange = cpuid_feature_extract_unsigned_field(mmfr0,
 +                              ID_AA64MMFR0_PARANGE_SHIFT);
 +
 +      /*
 +       * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
 +       * Stage-2. If not, things will stop very quickly.
 +       */
 +      switch (PAGE_SIZE) {
 +      default:
 +      case SZ_4K:
 +              tgran_2 = ID_AA64MMFR0_TGRAN4_2_SHIFT;
 +              break;
 +      case SZ_16K:
 +              tgran_2 = ID_AA64MMFR0_TGRAN16_2_SHIFT;
 +              break;
 +      case SZ_64K:
 +              tgran_2 = ID_AA64MMFR0_TGRAN64_2_SHIFT;
 +              break;
 +      }
 +
 +      switch (cpuid_feature_extract_unsigned_field(mmfr0, tgran_2)) {
 +      default:
 +      case 1:
 +              kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
 +              return -EINVAL;
 +      case 0:
 +              kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
 +              break;
 +      case 2:
 +              kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
 +              break;
 +      }
  
 -      parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 0x7;
        pa_max = id_aa64mmfr0_parange_to_phys_shift(parange);
  
        /* Clamp the IPA limit to the PA size supported by the kernel */
         *
         * So clamp the ipa limit further down to limit the number of levels.
         * Since we can concatenate upto 16 tables at entry level, we could
-        * go upto 4bits above the maximum VA addressible with the current
+        * go upto 4bits above the maximum VA addressable with the current
         * number of levels.
         */
        va_max = PGDIR_SHIFT + PAGE_SHIFT - 3;
             "KVM IPA limit (%d bit) is smaller than default size\n", ipa_max);
        kvm_ipa_limit = ipa_max;
        kvm_info("IPA Size Limit: %dbits\n", kvm_ipa_limit);
 +
 +      return 0;
  }
  
  /*
   */
  int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
  {
 -      u64 vtcr = VTCR_EL2_FLAGS;
 +      u64 vtcr = VTCR_EL2_FLAGS, mmfr0;
        u32 parange, phys_shift;
        u8 lvls;
  
                phys_shift = KVM_PHYS_SHIFT;
        }
  
 -      parange = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1) & 7;
 +      mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
 +      parange = cpuid_feature_extract_unsigned_field(mmfr0,
 +                              ID_AA64MMFR0_PARANGE_SHIFT);
        if (parange > ID_AA64MMFR0_PARANGE_MAX)
                parange = ID_AA64MMFR0_PARANGE_MAX;
        vtcr |= parange << VTCR_EL2_PS_SHIFT;
index 7d7a39b01135c42cbbccbdab1dbd3a0146584b6f,ad1d57501d6d8e7fc20fbf1d7cee241736c8da51..80985439bfb24d4adb006b49d855e08cdb4ecbc4
@@@ -34,7 -34,7 +34,7 @@@
  #include "trace.h"
  
  /*
-  * All of this file is extremly similar to the ARM coproc.c, but the
+  * All of this file is extremely similar to the ARM coproc.c, but the
   * types are different. My gut feeling is that it should be pretty
   * easy to merge, but that would be an ABI breakage -- again. VFP
   * would also need to be abstracted.
@@@ -64,11 -64,8 +64,8 @@@ static bool write_to_read_only(struct k
        return false;
  }
  
u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
static bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
  {
-       if (!vcpu->arch.sysregs_loaded_on_cpu)
-               goto immediate_read;
        /*
         * System registers listed in the switch are not saved on every
         * exit from the guest but are only saved on vcpu_put.
         * thread when emulating cross-VCPU communication.
         */
        switch (reg) {
-       case CSSELR_EL1:        return read_sysreg_s(SYS_CSSELR_EL1);
-       case SCTLR_EL1:         return read_sysreg_s(SYS_SCTLR_EL12);
-       case ACTLR_EL1:         return read_sysreg_s(SYS_ACTLR_EL1);
-       case CPACR_EL1:         return read_sysreg_s(SYS_CPACR_EL12);
-       case TTBR0_EL1:         return read_sysreg_s(SYS_TTBR0_EL12);
-       case TTBR1_EL1:         return read_sysreg_s(SYS_TTBR1_EL12);
-       case TCR_EL1:           return read_sysreg_s(SYS_TCR_EL12);
-       case ESR_EL1:           return read_sysreg_s(SYS_ESR_EL12);
-       case AFSR0_EL1:         return read_sysreg_s(SYS_AFSR0_EL12);
-       case AFSR1_EL1:         return read_sysreg_s(SYS_AFSR1_EL12);
-       case FAR_EL1:           return read_sysreg_s(SYS_FAR_EL12);
-       case MAIR_EL1:          return read_sysreg_s(SYS_MAIR_EL12);
-       case VBAR_EL1:          return read_sysreg_s(SYS_VBAR_EL12);
-       case CONTEXTIDR_EL1:    return read_sysreg_s(SYS_CONTEXTIDR_EL12);
-       case TPIDR_EL0:         return read_sysreg_s(SYS_TPIDR_EL0);
-       case TPIDRRO_EL0:       return read_sysreg_s(SYS_TPIDRRO_EL0);
-       case TPIDR_EL1:         return read_sysreg_s(SYS_TPIDR_EL1);
-       case AMAIR_EL1:         return read_sysreg_s(SYS_AMAIR_EL12);
-       case CNTKCTL_EL1:       return read_sysreg_s(SYS_CNTKCTL_EL12);
-       case PAR_EL1:           return read_sysreg_s(SYS_PAR_EL1);
-       case DACR32_EL2:        return read_sysreg_s(SYS_DACR32_EL2);
-       case IFSR32_EL2:        return read_sysreg_s(SYS_IFSR32_EL2);
-       case DBGVCR32_EL2:      return read_sysreg_s(SYS_DBGVCR32_EL2);
+       case CSSELR_EL1:        *val = read_sysreg_s(SYS_CSSELR_EL1);   break;
+       case SCTLR_EL1:         *val = read_sysreg_s(SYS_SCTLR_EL12);   break;
+       case ACTLR_EL1:         *val = read_sysreg_s(SYS_ACTLR_EL1);    break;
+       case CPACR_EL1:         *val = read_sysreg_s(SYS_CPACR_EL12);   break;
+       case TTBR0_EL1:         *val = read_sysreg_s(SYS_TTBR0_EL12);   break;
+       case TTBR1_EL1:         *val = read_sysreg_s(SYS_TTBR1_EL12);   break;
+       case TCR_EL1:           *val = read_sysreg_s(SYS_TCR_EL12);     break;
+       case ESR_EL1:           *val = read_sysreg_s(SYS_ESR_EL12);     break;
+       case AFSR0_EL1:         *val = read_sysreg_s(SYS_AFSR0_EL12);   break;
+       case AFSR1_EL1:         *val = read_sysreg_s(SYS_AFSR1_EL12);   break;
+       case FAR_EL1:           *val = read_sysreg_s(SYS_FAR_EL12);     break;
+       case MAIR_EL1:          *val = read_sysreg_s(SYS_MAIR_EL12);    break;
+       case VBAR_EL1:          *val = read_sysreg_s(SYS_VBAR_EL12);    break;
+       case CONTEXTIDR_EL1:    *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
+       case TPIDR_EL0:         *val = read_sysreg_s(SYS_TPIDR_EL0);    break;
+       case TPIDRRO_EL0:       *val = read_sysreg_s(SYS_TPIDRRO_EL0);  break;
+       case TPIDR_EL1:         *val = read_sysreg_s(SYS_TPIDR_EL1);    break;
+       case AMAIR_EL1:         *val = read_sysreg_s(SYS_AMAIR_EL12);   break;
+       case CNTKCTL_EL1:       *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
+       case PAR_EL1:           *val = read_sysreg_s(SYS_PAR_EL1);      break;
+       case DACR32_EL2:        *val = read_sysreg_s(SYS_DACR32_EL2);   break;
+       case IFSR32_EL2:        *val = read_sysreg_s(SYS_IFSR32_EL2);   break;
+       case DBGVCR32_EL2:      *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
+       default:                return false;
        }
  
- immediate_read:
-       return __vcpu_sys_reg(vcpu, reg);
+       return true;
  }
  
void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
static bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
  {
-       if (!vcpu->arch.sysregs_loaded_on_cpu)
-               goto immediate_write;
        /*
         * System registers listed in the switch are not restored on every
         * entry to the guest but are only restored on vcpu_load.
         *
         * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
-        * should never be listed below, because the the MPIDR should only be
-        * set once, before running the VCPU, and never changed later.
+        * should never be listed below, because the MPIDR should only be set
+        * once, before running the VCPU, and never changed later.
         */
        switch (reg) {
-       case CSSELR_EL1:        write_sysreg_s(val, SYS_CSSELR_EL1);    return;
-       case SCTLR_EL1:         write_sysreg_s(val, SYS_SCTLR_EL12);    return;
-       case ACTLR_EL1:         write_sysreg_s(val, SYS_ACTLR_EL1);     return;
-       case CPACR_EL1:         write_sysreg_s(val, SYS_CPACR_EL12);    return;
-       case TTBR0_EL1:         write_sysreg_s(val, SYS_TTBR0_EL12);    return;
-       case TTBR1_EL1:         write_sysreg_s(val, SYS_TTBR1_EL12);    return;
-       case TCR_EL1:           write_sysreg_s(val, SYS_TCR_EL12);      return;
-       case ESR_EL1:           write_sysreg_s(val, SYS_ESR_EL12);      return;
-       case AFSR0_EL1:         write_sysreg_s(val, SYS_AFSR0_EL12);    return;
-       case AFSR1_EL1:         write_sysreg_s(val, SYS_AFSR1_EL12);    return;
-       case FAR_EL1:           write_sysreg_s(val, SYS_FAR_EL12);      return;
-       case MAIR_EL1:          write_sysreg_s(val, SYS_MAIR_EL12);     return;
-       case VBAR_EL1:          write_sysreg_s(val, SYS_VBAR_EL12);     return;
-       case CONTEXTIDR_EL1:    write_sysreg_s(val, SYS_CONTEXTIDR_EL12); return;
-       case TPIDR_EL0:         write_sysreg_s(val, SYS_TPIDR_EL0);     return;
-       case TPIDRRO_EL0:       write_sysreg_s(val, SYS_TPIDRRO_EL0);   return;
-       case TPIDR_EL1:         write_sysreg_s(val, SYS_TPIDR_EL1);     return;
-       case AMAIR_EL1:         write_sysreg_s(val, SYS_AMAIR_EL12);    return;
-       case CNTKCTL_EL1:       write_sysreg_s(val, SYS_CNTKCTL_EL12);  return;
-       case PAR_EL1:           write_sysreg_s(val, SYS_PAR_EL1);       return;
-       case DACR32_EL2:        write_sysreg_s(val, SYS_DACR32_EL2);    return;
-       case IFSR32_EL2:        write_sysreg_s(val, SYS_IFSR32_EL2);    return;
-       case DBGVCR32_EL2:      write_sysreg_s(val, SYS_DBGVCR32_EL2);  return;
+       case CSSELR_EL1:        write_sysreg_s(val, SYS_CSSELR_EL1);    break;
+       case SCTLR_EL1:         write_sysreg_s(val, SYS_SCTLR_EL12);    break;
+       case ACTLR_EL1:         write_sysreg_s(val, SYS_ACTLR_EL1);     break;
+       case CPACR_EL1:         write_sysreg_s(val, SYS_CPACR_EL12);    break;
+       case TTBR0_EL1:         write_sysreg_s(val, SYS_TTBR0_EL12);    break;
+       case TTBR1_EL1:         write_sysreg_s(val, SYS_TTBR1_EL12);    break;
+       case TCR_EL1:           write_sysreg_s(val, SYS_TCR_EL12);      break;
+       case ESR_EL1:           write_sysreg_s(val, SYS_ESR_EL12);      break;
+       case AFSR0_EL1:         write_sysreg_s(val, SYS_AFSR0_EL12);    break;
+       case AFSR1_EL1:         write_sysreg_s(val, SYS_AFSR1_EL12);    break;
+       case FAR_EL1:           write_sysreg_s(val, SYS_FAR_EL12);      break;
+       case MAIR_EL1:          write_sysreg_s(val, SYS_MAIR_EL12);     break;
+       case VBAR_EL1:          write_sysreg_s(val, SYS_VBAR_EL12);     break;
+       case CONTEXTIDR_EL1:    write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
+       case TPIDR_EL0:         write_sysreg_s(val, SYS_TPIDR_EL0);     break;
+       case TPIDRRO_EL0:       write_sysreg_s(val, SYS_TPIDRRO_EL0);   break;
+       case TPIDR_EL1:         write_sysreg_s(val, SYS_TPIDR_EL1);     break;
+       case AMAIR_EL1:         write_sysreg_s(val, SYS_AMAIR_EL12);    break;
+       case CNTKCTL_EL1:       write_sysreg_s(val, SYS_CNTKCTL_EL12);  break;
+       case PAR_EL1:           write_sysreg_s(val, SYS_PAR_EL1);       break;
+       case DACR32_EL2:        write_sysreg_s(val, SYS_DACR32_EL2);    break;
+       case IFSR32_EL2:        write_sysreg_s(val, SYS_IFSR32_EL2);    break;
+       case DBGVCR32_EL2:      write_sysreg_s(val, SYS_DBGVCR32_EL2);  break;
+       default:                return false;
        }
  
- immediate_write:
+       return true;
+ }
+ u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
+ {
+       u64 val = 0x8badf00d8badf00d;
+       if (vcpu->arch.sysregs_loaded_on_cpu &&
+           __vcpu_read_sys_reg_from_cpu(reg, &val))
+               return val;
+       return __vcpu_sys_reg(vcpu, reg);
+ }
+ void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
+ {
+       if (vcpu->arch.sysregs_loaded_on_cpu &&
+           __vcpu_write_sys_reg_to_cpu(val, reg))
+               return;
         __vcpu_sys_reg(vcpu, reg) = val;
  }
  
@@@ -1456,9 -1470,9 +1470,9 @@@ static const struct sys_reg_desc sys_re
        ID_SANITISED(MVFR1_EL1),
        ID_SANITISED(MVFR2_EL1),
        ID_UNALLOCATED(3,3),
 -      ID_UNALLOCATED(3,4),
 -      ID_UNALLOCATED(3,5),
 -      ID_UNALLOCATED(3,6),
 +      ID_SANITISED(ID_PFR2_EL1),
 +      ID_HIDDEN(ID_DFR1_EL1),
 +      ID_SANITISED(ID_MMFR5_EL1),
        ID_UNALLOCATED(3,7),
  
        /* AArch64 ID registers */
        { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
  
        { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
-       { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
+       { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
  
        { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
        { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  
        { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
        { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
-       { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
-       { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
+       { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
+       { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
        { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
        { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
        { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
@@@ -2073,12 -2087,37 +2087,37 @@@ static const struct sys_reg_desc cp15_6
        { SYS_DESC(SYS_AARCH32_CNTP_CVAL),    access_arch_timer },
  };
  
+ static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
+                             bool is_32)
+ {
+       unsigned int i;
+       for (i = 0; i < n; i++) {
+               if (!is_32 && table[i].reg && !table[i].reset) {
+                       kvm_err("sys_reg table %p entry %d has lacks reset\n",
+                               table, i);
+                       return 1;
+               }
+               if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
+                       kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
+                       return 1;
+               }
+       }
+       return 0;
+ }
  /* Target specific emulation tables */
  static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  
  void kvm_register_target_sys_reg_table(unsigned int target,
                                       struct kvm_sys_reg_target_table *table)
  {
+       if (check_sysreg_table(table->table64.table, table->table64.num, false) ||
+           check_sysreg_table(table->table32.table, table->table32.num, true))
+               return;
        target_tables[target] = table;
  }
  
@@@ -2364,19 -2403,13 +2403,13 @@@ static int emulate_sys_reg(struct kvm_v
  }
  
  static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
-                               const struct sys_reg_desc *table, size_t num,
-                               unsigned long *bmap)
+                               const struct sys_reg_desc *table, size_t num)
  {
        unsigned long i;
  
        for (i = 0; i < num; i++)
-               if (table[i].reset) {
-                       int reg = table[i].reg;
+               if (table[i].reset)
                        table[i].reset(vcpu, &table[i]);
-                       if (reg > 0 && reg < NR_SYS_REGS)
-                               set_bit(reg, bmap);
-               }
  }
  
  /**
@@@ -2832,32 -2865,18 +2865,18 @@@ int kvm_arm_copy_sys_reg_indices(struc
        return write_demux_regids(uindices);
  }
  
- static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
- {
-       unsigned int i;
-       for (i = 1; i < n; i++) {
-               if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
-                       kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
-                       return 1;
-               }
-       }
-       return 0;
- }
  void kvm_sys_reg_table_init(void)
  {
        unsigned int i;
        struct sys_reg_desc clidr;
  
        /* Make sure tables are unique and in order. */
-       BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
-       BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
-       BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
-       BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
-       BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
-       BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
+       BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false));
+       BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true));
+       BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true));
+       BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true));
+       BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true));
+       BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false));
  
        /* We abuse the reset function to overwrite the table itself. */
        for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
@@@ -2893,17 -2912,10 +2912,10 @@@ void kvm_reset_sys_regs(struct kvm_vcp
  {
        size_t num;
        const struct sys_reg_desc *table;
-       DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
  
        /* Generic chip reset first (so target could override). */
-       reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
+       reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  
        table = get_target_table(vcpu->arch.target, true, &num);
-       reset_sys_reg_descs(vcpu, table, num, bmap);
-       for (num = 1; num < NR_SYS_REGS; num++) {
-               if (WARN(!test_bit(num, bmap),
-                        "Didn't reset __vcpu_sys_reg(%zi)\n", num))
-                       break;
-       }
+       reset_sys_reg_descs(vcpu, table, num);
  }
index 0000000000000000000000000000000000000000,89a14ec8b33bb2009786d921759868943a152e24..d2339a2b9fb9c5a6ebdc9f6c48d399f8c49423bb
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,1063 +1,1063 @@@
 -       * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.txt
+ // SPDX-License-Identifier: GPL-2.0-only
+ /*
+  * VGICv3 MMIO handling functions
+  */
+ #include <linux/bitfield.h>
+ #include <linux/irqchip/arm-gic-v3.h>
+ #include <linux/kvm.h>
+ #include <linux/kvm_host.h>
+ #include <linux/interrupt.h>
+ #include <kvm/iodev.h>
+ #include <kvm/arm_vgic.h>
+ #include <asm/kvm_emulate.h>
+ #include <asm/kvm_arm.h>
+ #include <asm/kvm_mmu.h>
+ #include "vgic.h"
+ #include "vgic-mmio.h"
+ /* extract @num bytes at @offset bytes offset in data */
+ unsigned long extract_bytes(u64 data, unsigned int offset,
+                           unsigned int num)
+ {
+       return (data >> (offset * 8)) & GENMASK_ULL(num * 8 - 1, 0);
+ }
+ /* allows updates of any half of a 64-bit register (or the whole thing) */
+ u64 update_64bit_reg(u64 reg, unsigned int offset, unsigned int len,
+                    unsigned long val)
+ {
+       int lower = (offset & 4) * 8;
+       int upper = lower + 8 * len - 1;
+       reg &= ~GENMASK_ULL(upper, lower);
+       val &= GENMASK_ULL(len * 8 - 1, 0);
+       return reg | ((u64)val << lower);
+ }
+ bool vgic_has_its(struct kvm *kvm)
+ {
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       if (dist->vgic_model != KVM_DEV_TYPE_ARM_VGIC_V3)
+               return false;
+       return dist->has_its;
+ }
+ bool vgic_supports_direct_msis(struct kvm *kvm)
+ {
+       return (kvm_vgic_global_state.has_gicv4_1 ||
+               (kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm)));
+ }
+ /*
+  * The Revision field in the IIDR have the following meanings:
+  *
+  * Revision 2: Interrupt groups are guest-configurable and signaled using
+  *           their configured groups.
+  */
+ static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
+                                           gpa_t addr, unsigned int len)
+ {
+       struct vgic_dist *vgic = &vcpu->kvm->arch.vgic;
+       u32 value = 0;
+       switch (addr & 0x0c) {
+       case GICD_CTLR:
+               if (vgic->enabled)
+                       value |= GICD_CTLR_ENABLE_SS_G1;
+               value |= GICD_CTLR_ARE_NS | GICD_CTLR_DS;
+               if (vgic->nassgireq)
+                       value |= GICD_CTLR_nASSGIreq;
+               break;
+       case GICD_TYPER:
+               value = vgic->nr_spis + VGIC_NR_PRIVATE_IRQS;
+               value = (value >> 5) - 1;
+               if (vgic_has_its(vcpu->kvm)) {
+                       value |= (INTERRUPT_ID_BITS_ITS - 1) << 19;
+                       value |= GICD_TYPER_LPIS;
+               } else {
+                       value |= (INTERRUPT_ID_BITS_SPIS - 1) << 19;
+               }
+               break;
+       case GICD_TYPER2:
+               if (kvm_vgic_global_state.has_gicv4_1)
+                       value = GICD_TYPER2_nASSGIcap;
+               break;
+       case GICD_IIDR:
+               value = (PRODUCT_ID_KVM << GICD_IIDR_PRODUCT_ID_SHIFT) |
+                       (vgic->implementation_rev << GICD_IIDR_REVISION_SHIFT) |
+                       (IMPLEMENTER_ARM << GICD_IIDR_IMPLEMENTER_SHIFT);
+               break;
+       default:
+               return 0;
+       }
+       return value;
+ }
+ static void vgic_mmio_write_v3_misc(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len,
+                                   unsigned long val)
+ {
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       switch (addr & 0x0c) {
+       case GICD_CTLR: {
+               bool was_enabled, is_hwsgi;
+               mutex_lock(&vcpu->kvm->lock);
+               was_enabled = dist->enabled;
+               is_hwsgi = dist->nassgireq;
+               dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
+               /* Not a GICv4.1? No HW SGIs */
+               if (!kvm_vgic_global_state.has_gicv4_1)
+                       val &= ~GICD_CTLR_nASSGIreq;
+               /* Dist stays enabled? nASSGIreq is RO */
+               if (was_enabled && dist->enabled) {
+                       val &= ~GICD_CTLR_nASSGIreq;
+                       val |= FIELD_PREP(GICD_CTLR_nASSGIreq, is_hwsgi);
+               }
+               /* Switching HW SGIs? */
+               dist->nassgireq = val & GICD_CTLR_nASSGIreq;
+               if (is_hwsgi != dist->nassgireq)
+                       vgic_v4_configure_vsgis(vcpu->kvm);
+               if (kvm_vgic_global_state.has_gicv4_1 &&
+                   was_enabled != dist->enabled)
+                       kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_RELOAD_GICv4);
+               else if (!was_enabled && dist->enabled)
+                       vgic_kick_vcpus(vcpu->kvm);
+               mutex_unlock(&vcpu->kvm->lock);
+               break;
+       }
+       case GICD_TYPER:
+       case GICD_TYPER2:
+       case GICD_IIDR:
+               /* This is at best for documentation purposes... */
+               return;
+       }
+ }
+ static int vgic_mmio_uaccess_write_v3_misc(struct kvm_vcpu *vcpu,
+                                          gpa_t addr, unsigned int len,
+                                          unsigned long val)
+ {
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       switch (addr & 0x0c) {
+       case GICD_TYPER2:
+       case GICD_IIDR:
+               if (val != vgic_mmio_read_v3_misc(vcpu, addr, len))
+                       return -EINVAL;
+               return 0;
+       case GICD_CTLR:
+               /* Not a GICv4.1? No HW SGIs */
+               if (!kvm_vgic_global_state.has_gicv4_1)
+                       val &= ~GICD_CTLR_nASSGIreq;
+               dist->enabled = val & GICD_CTLR_ENABLE_SS_G1;
+               dist->nassgireq = val & GICD_CTLR_nASSGIreq;
+               return 0;
+       }
+       vgic_mmio_write_v3_misc(vcpu, addr, len, val);
+       return 0;
+ }
+ static unsigned long vgic_mmio_read_irouter(struct kvm_vcpu *vcpu,
+                                           gpa_t addr, unsigned int len)
+ {
+       int intid = VGIC_ADDR_TO_INTID(addr, 64);
+       struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+       unsigned long ret = 0;
+       if (!irq)
+               return 0;
+       /* The upper word is RAZ for us. */
+       if (!(addr & 4))
+               ret = extract_bytes(READ_ONCE(irq->mpidr), addr & 7, len);
+       vgic_put_irq(vcpu->kvm, irq);
+       return ret;
+ }
+ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
+                                   gpa_t addr, unsigned int len,
+                                   unsigned long val)
+ {
+       int intid = VGIC_ADDR_TO_INTID(addr, 64);
+       struct vgic_irq *irq;
+       unsigned long flags;
+       /* The upper word is WI for us since we don't implement Aff3. */
+       if (addr & 4)
+               return;
+       irq = vgic_get_irq(vcpu->kvm, NULL, intid);
+       if (!irq)
+               return;
+       raw_spin_lock_irqsave(&irq->irq_lock, flags);
+       /* We only care about and preserve Aff0, Aff1 and Aff2. */
+       irq->mpidr = val & GENMASK(23, 0);
+       irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
+       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+       vgic_put_irq(vcpu->kvm, irq);
+ }
+ static unsigned long vgic_mmio_read_v3r_ctlr(struct kvm_vcpu *vcpu,
+                                            gpa_t addr, unsigned int len)
+ {
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       return vgic_cpu->lpis_enabled ? GICR_CTLR_ENABLE_LPIS : 0;
+ }
+ static void vgic_mmio_write_v3r_ctlr(struct kvm_vcpu *vcpu,
+                                    gpa_t addr, unsigned int len,
+                                    unsigned long val)
+ {
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       bool was_enabled = vgic_cpu->lpis_enabled;
+       if (!vgic_has_its(vcpu->kvm))
+               return;
+       vgic_cpu->lpis_enabled = val & GICR_CTLR_ENABLE_LPIS;
+       if (was_enabled && !vgic_cpu->lpis_enabled) {
+               vgic_flush_pending_lpis(vcpu);
+               vgic_its_invalidate_cache(vcpu->kvm);
+       }
+       if (!was_enabled && vgic_cpu->lpis_enabled)
+               vgic_enable_lpis(vcpu);
+ }
+ static unsigned long vgic_mmio_read_v3r_typer(struct kvm_vcpu *vcpu,
+                                             gpa_t addr, unsigned int len)
+ {
+       unsigned long mpidr = kvm_vcpu_get_mpidr_aff(vcpu);
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_redist_region *rdreg = vgic_cpu->rdreg;
+       int target_vcpu_id = vcpu->vcpu_id;
+       gpa_t last_rdist_typer = rdreg->base + GICR_TYPER +
+                       (rdreg->free_index - 1) * KVM_VGIC_V3_REDIST_SIZE;
+       u64 value;
+       value = (u64)(mpidr & GENMASK(23, 0)) << 32;
+       value |= ((target_vcpu_id & 0xffff) << 8);
+       if (addr == last_rdist_typer)
+               value |= GICR_TYPER_LAST;
+       if (vgic_has_its(vcpu->kvm))
+               value |= GICR_TYPER_PLPIS;
+       return extract_bytes(value, addr & 7, len);
+ }
+ static unsigned long vgic_mmio_read_v3r_iidr(struct kvm_vcpu *vcpu,
+                                            gpa_t addr, unsigned int len)
+ {
+       return (PRODUCT_ID_KVM << 24) | (IMPLEMENTER_ARM << 0);
+ }
+ static unsigned long vgic_mmio_read_v3_idregs(struct kvm_vcpu *vcpu,
+                                             gpa_t addr, unsigned int len)
+ {
+       switch (addr & 0xffff) {
+       case GICD_PIDR2:
+               /* report a GICv3 compliant implementation */
+               return 0x3b;
+       }
+       return 0;
+ }
+ static unsigned long vgic_v3_uaccess_read_pending(struct kvm_vcpu *vcpu,
+                                                 gpa_t addr, unsigned int len)
+ {
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       u32 value = 0;
+       int i;
+       /*
+        * pending state of interrupt is latched in pending_latch variable.
+        * Userspace will save and restore pending state and line_level
+        * separately.
++       * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst
+        * for handling of ISPENDR and ICPENDR.
+        */
+       for (i = 0; i < len * 8; i++) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+               bool state = irq->pending_latch;
+               if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
+                       int err;
+                       err = irq_get_irqchip_state(irq->host_irq,
+                                                   IRQCHIP_STATE_PENDING,
+                                                   &state);
+                       WARN_ON(err);
+               }
+               if (state)
+                       value |= (1U << i);
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+       return value;
+ }
+ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
+                                        gpa_t addr, unsigned int len,
+                                        unsigned long val)
+ {
+       u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
+       int i;
+       unsigned long flags;
+       for (i = 0; i < len * 8; i++) {
+               struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               if (test_bit(i, &val)) {
+                       /*
+                        * pending_latch is set irrespective of irq type
+                        * (level or edge) to avoid dependency that VM should
+                        * restore irq config before pending info.
+                        */
+                       irq->pending_latch = true;
+                       vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+               } else {
+                       irq->pending_latch = false;
+                       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+               }
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+       return 0;
+ }
+ /* We want to avoid outer shareable. */
+ u64 vgic_sanitise_shareability(u64 field)
+ {
+       switch (field) {
+       case GIC_BASER_OuterShareable:
+               return GIC_BASER_InnerShareable;
+       default:
+               return field;
+       }
+ }
+ /* Avoid any inner non-cacheable mapping. */
+ u64 vgic_sanitise_inner_cacheability(u64 field)
+ {
+       switch (field) {
+       case GIC_BASER_CACHE_nCnB:
+       case GIC_BASER_CACHE_nC:
+               return GIC_BASER_CACHE_RaWb;
+       default:
+               return field;
+       }
+ }
+ /* Non-cacheable or same-as-inner are OK. */
+ u64 vgic_sanitise_outer_cacheability(u64 field)
+ {
+       switch (field) {
+       case GIC_BASER_CACHE_SameAsInner:
+       case GIC_BASER_CACHE_nC:
+               return field;
+       default:
+               return GIC_BASER_CACHE_nC;
+       }
+ }
+ u64 vgic_sanitise_field(u64 reg, u64 field_mask, int field_shift,
+                       u64 (*sanitise_fn)(u64))
+ {
+       u64 field = (reg & field_mask) >> field_shift;
+       field = sanitise_fn(field) << field_shift;
+       return (reg & ~field_mask) | field;
+ }
+ #define PROPBASER_RES0_MASK                                           \
+       (GENMASK_ULL(63, 59) | GENMASK_ULL(55, 52) | GENMASK_ULL(6, 5))
+ #define PENDBASER_RES0_MASK                                           \
+       (BIT_ULL(63) | GENMASK_ULL(61, 59) | GENMASK_ULL(55, 52) |      \
+        GENMASK_ULL(15, 12) | GENMASK_ULL(6, 0))
+ static u64 vgic_sanitise_pendbaser(u64 reg)
+ {
+       reg = vgic_sanitise_field(reg, GICR_PENDBASER_SHAREABILITY_MASK,
+                                 GICR_PENDBASER_SHAREABILITY_SHIFT,
+                                 vgic_sanitise_shareability);
+       reg = vgic_sanitise_field(reg, GICR_PENDBASER_INNER_CACHEABILITY_MASK,
+                                 GICR_PENDBASER_INNER_CACHEABILITY_SHIFT,
+                                 vgic_sanitise_inner_cacheability);
+       reg = vgic_sanitise_field(reg, GICR_PENDBASER_OUTER_CACHEABILITY_MASK,
+                                 GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT,
+                                 vgic_sanitise_outer_cacheability);
+       reg &= ~PENDBASER_RES0_MASK;
+       return reg;
+ }
+ static u64 vgic_sanitise_propbaser(u64 reg)
+ {
+       reg = vgic_sanitise_field(reg, GICR_PROPBASER_SHAREABILITY_MASK,
+                                 GICR_PROPBASER_SHAREABILITY_SHIFT,
+                                 vgic_sanitise_shareability);
+       reg = vgic_sanitise_field(reg, GICR_PROPBASER_INNER_CACHEABILITY_MASK,
+                                 GICR_PROPBASER_INNER_CACHEABILITY_SHIFT,
+                                 vgic_sanitise_inner_cacheability);
+       reg = vgic_sanitise_field(reg, GICR_PROPBASER_OUTER_CACHEABILITY_MASK,
+                                 GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT,
+                                 vgic_sanitise_outer_cacheability);
+       reg &= ~PROPBASER_RES0_MASK;
+       return reg;
+ }
+ static unsigned long vgic_mmio_read_propbase(struct kvm_vcpu *vcpu,
+                                            gpa_t addr, unsigned int len)
+ {
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       return extract_bytes(dist->propbaser, addr & 7, len);
+ }
+ static void vgic_mmio_write_propbase(struct kvm_vcpu *vcpu,
+                                    gpa_t addr, unsigned int len,
+                                    unsigned long val)
+ {
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       u64 old_propbaser, propbaser;
+       /* Storing a value with LPIs already enabled is undefined */
+       if (vgic_cpu->lpis_enabled)
+               return;
+       do {
+               old_propbaser = READ_ONCE(dist->propbaser);
+               propbaser = old_propbaser;
+               propbaser = update_64bit_reg(propbaser, addr & 4, len, val);
+               propbaser = vgic_sanitise_propbaser(propbaser);
+       } while (cmpxchg64(&dist->propbaser, old_propbaser,
+                          propbaser) != old_propbaser);
+ }
+ static unsigned long vgic_mmio_read_pendbase(struct kvm_vcpu *vcpu,
+                                            gpa_t addr, unsigned int len)
+ {
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       u64 value = vgic_cpu->pendbaser;
+       value &= ~GICR_PENDBASER_PTZ;
+       return extract_bytes(value, addr & 7, len);
+ }
+ static void vgic_mmio_write_pendbase(struct kvm_vcpu *vcpu,
+                                    gpa_t addr, unsigned int len,
+                                    unsigned long val)
+ {
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       u64 old_pendbaser, pendbaser;
+       /* Storing a value with LPIs already enabled is undefined */
+       if (vgic_cpu->lpis_enabled)
+               return;
+       do {
+               old_pendbaser = READ_ONCE(vgic_cpu->pendbaser);
+               pendbaser = old_pendbaser;
+               pendbaser = update_64bit_reg(pendbaser, addr & 4, len, val);
+               pendbaser = vgic_sanitise_pendbaser(pendbaser);
+       } while (cmpxchg64(&vgic_cpu->pendbaser, old_pendbaser,
+                          pendbaser) != old_pendbaser);
+ }
+ /*
+  * The GICv3 per-IRQ registers are split to control PPIs and SGIs in the
+  * redistributors, while SPIs are covered by registers in the distributor
+  * block. Trying to set private IRQs in this block gets ignored.
+  * We take some special care here to fix the calculation of the register
+  * offset.
+  */
+ #define REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(off, rd, wr, ur, uw, bpi, acc) \
+       {                                                               \
+               .reg_offset = off,                                      \
+               .bits_per_irq = bpi,                                    \
+               .len = (bpi * VGIC_NR_PRIVATE_IRQS) / 8,                \
+               .access_flags = acc,                                    \
+               .read = vgic_mmio_read_raz,                             \
+               .write = vgic_mmio_write_wi,                            \
+       }, {                                                            \
+               .reg_offset = off + (bpi * VGIC_NR_PRIVATE_IRQS) / 8,   \
+               .bits_per_irq = bpi,                                    \
+               .len = (bpi * (1024 - VGIC_NR_PRIVATE_IRQS)) / 8,       \
+               .access_flags = acc,                                    \
+               .read = rd,                                             \
+               .write = wr,                                            \
+               .uaccess_read = ur,                                     \
+               .uaccess_write = uw,                                    \
+       }
+ static const struct vgic_register_region vgic_v3_dist_registers[] = {
+       REGISTER_DESC_WITH_LENGTH_UACCESS(GICD_CTLR,
+               vgic_mmio_read_v3_misc, vgic_mmio_write_v3_misc,
+               NULL, vgic_mmio_uaccess_write_v3_misc,
+               16, VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICD_STATUSR,
+               vgic_mmio_read_rao, vgic_mmio_write_wi, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGROUPR,
+               vgic_mmio_read_group, vgic_mmio_write_group, NULL, NULL, 1,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISENABLER,
+               vgic_mmio_read_enable, vgic_mmio_write_senable,
+               NULL, vgic_uaccess_write_senable, 1,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICENABLER,
+               vgic_mmio_read_enable, vgic_mmio_write_cenable,
+              NULL, vgic_uaccess_write_cenable, 1,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISPENDR,
+               vgic_mmio_read_pending, vgic_mmio_write_spending,
+               vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 1,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICPENDR,
+               vgic_mmio_read_pending, vgic_mmio_write_cpending,
+               vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 1,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ISACTIVER,
+               vgic_mmio_read_active, vgic_mmio_write_sactive,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 1,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICACTIVER,
+               vgic_mmio_read_active, vgic_mmio_write_cactive,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive,
+               1, VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IPRIORITYR,
+               vgic_mmio_read_priority, vgic_mmio_write_priority, NULL, NULL,
+               8, VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ITARGETSR,
+               vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 8,
+               VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_ICFGR,
+               vgic_mmio_read_config, vgic_mmio_write_config, NULL, NULL, 2,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IGRPMODR,
+               vgic_mmio_read_raz, vgic_mmio_write_wi, NULL, NULL, 1,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_BITS_PER_IRQ_SHARED(GICD_IROUTER,
+               vgic_mmio_read_irouter, vgic_mmio_write_irouter, NULL, NULL, 64,
+               VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICD_IDREGS,
+               vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
+               VGIC_ACCESS_32bit),
+ };
+ static const struct vgic_register_region vgic_v3_rd_registers[] = {
+       /* RD_base registers */
+       REGISTER_DESC_WITH_LENGTH(GICR_CTLR,
+               vgic_mmio_read_v3r_ctlr, vgic_mmio_write_v3r_ctlr, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICR_STATUSR,
+               vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICR_IIDR,
+               vgic_mmio_read_v3r_iidr, vgic_mmio_write_wi, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICR_TYPER,
+               vgic_mmio_read_v3r_typer, vgic_mmio_write_wi, 8,
+               VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICR_WAKER,
+               vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICR_PROPBASER,
+               vgic_mmio_read_propbase, vgic_mmio_write_propbase, 8,
+               VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICR_PENDBASER,
+               vgic_mmio_read_pendbase, vgic_mmio_write_pendbase, 8,
+               VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(GICR_IDREGS,
+               vgic_mmio_read_v3_idregs, vgic_mmio_write_wi, 48,
+               VGIC_ACCESS_32bit),
+       /* SGI_base registers */
+       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGROUPR0,
+               vgic_mmio_read_group, vgic_mmio_write_group, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISENABLER0,
+               vgic_mmio_read_enable, vgic_mmio_write_senable,
+               NULL, vgic_uaccess_write_senable, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICENABLER0,
+               vgic_mmio_read_enable, vgic_mmio_write_cenable,
+               NULL, vgic_uaccess_write_cenable, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISPENDR0,
+               vgic_mmio_read_pending, vgic_mmio_write_spending,
+               vgic_v3_uaccess_read_pending, vgic_v3_uaccess_write_pending, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICPENDR0,
+               vgic_mmio_read_pending, vgic_mmio_write_cpending,
+               vgic_mmio_read_raz, vgic_mmio_uaccess_write_wi, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ISACTIVER0,
+               vgic_mmio_read_active, vgic_mmio_write_sactive,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_sactive, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH_UACCESS(SZ_64K + GICR_ICACTIVER0,
+               vgic_mmio_read_active, vgic_mmio_write_cactive,
+               vgic_uaccess_read_active, vgic_mmio_uaccess_write_cactive, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IPRIORITYR0,
+               vgic_mmio_read_priority, vgic_mmio_write_priority, 32,
+               VGIC_ACCESS_32bit | VGIC_ACCESS_8bit),
+       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_ICFGR0,
+               vgic_mmio_read_config, vgic_mmio_write_config, 8,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_IGRPMODR0,
+               vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+               VGIC_ACCESS_32bit),
+       REGISTER_DESC_WITH_LENGTH(SZ_64K + GICR_NSACR,
+               vgic_mmio_read_raz, vgic_mmio_write_wi, 4,
+               VGIC_ACCESS_32bit),
+ };
+ unsigned int vgic_v3_init_dist_iodev(struct vgic_io_device *dev)
+ {
+       dev->regions = vgic_v3_dist_registers;
+       dev->nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
+       kvm_iodevice_init(&dev->dev, &kvm_io_gic_ops);
+       return SZ_64K;
+ }
+ /**
+  * vgic_register_redist_iodev - register a single redist iodev
+  * @vcpu:    The VCPU to which the redistributor belongs
+  *
+  * Register a KVM iodev for this VCPU's redistributor using the address
+  * provided.
+  *
+  * Return 0 on success, -ERRNO otherwise.
+  */
+ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
+ {
+       struct kvm *kvm = vcpu->kvm;
+       struct vgic_dist *vgic = &kvm->arch.vgic;
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
+       struct vgic_redist_region *rdreg;
+       gpa_t rd_base;
+       int ret;
+       if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
+               return 0;
+       /*
+        * We may be creating VCPUs before having set the base address for the
+        * redistributor region, in which case we will come back to this
+        * function for all VCPUs when the base address is set.  Just return
+        * without doing any work for now.
+        */
+       rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
+       if (!rdreg)
+               return 0;
+       if (!vgic_v3_check_base(kvm))
+               return -EINVAL;
+       vgic_cpu->rdreg = rdreg;
+       rd_base = rdreg->base + rdreg->free_index * KVM_VGIC_V3_REDIST_SIZE;
+       kvm_iodevice_init(&rd_dev->dev, &kvm_io_gic_ops);
+       rd_dev->base_addr = rd_base;
+       rd_dev->iodev_type = IODEV_REDIST;
+       rd_dev->regions = vgic_v3_rd_registers;
+       rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
+       rd_dev->redist_vcpu = vcpu;
+       mutex_lock(&kvm->slots_lock);
+       ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
+                                     2 * SZ_64K, &rd_dev->dev);
+       mutex_unlock(&kvm->slots_lock);
+       if (ret)
+               return ret;
+       rdreg->free_index++;
+       return 0;
+ }
+ static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
+ {
+       struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
+       kvm_io_bus_unregister_dev(vcpu->kvm, KVM_MMIO_BUS, &rd_dev->dev);
+ }
+ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
+ {
+       struct kvm_vcpu *vcpu;
+       int c, ret = 0;
+       kvm_for_each_vcpu(c, vcpu, kvm) {
+               ret = vgic_register_redist_iodev(vcpu);
+               if (ret)
+                       break;
+       }
+       if (ret) {
+               /* The current c failed, so we start with the previous one. */
+               mutex_lock(&kvm->slots_lock);
+               for (c--; c >= 0; c--) {
+                       vcpu = kvm_get_vcpu(kvm, c);
+                       vgic_unregister_redist_iodev(vcpu);
+               }
+               mutex_unlock(&kvm->slots_lock);
+       }
+       return ret;
+ }
+ /**
+  * vgic_v3_insert_redist_region - Insert a new redistributor region
+  *
+  * Performs various checks before inserting the rdist region in the list.
+  * Those tests depend on whether the size of the rdist region is known
+  * (ie. count != 0). The list is sorted by rdist region index.
+  *
+  * @kvm: kvm handle
+  * @index: redist region index
+  * @base: base of the new rdist region
+  * @count: number of redistributors the region is made of (0 in the old style
+  * single region, whose size is induced from the number of vcpus)
+  *
+  * Return 0 on success, < 0 otherwise
+  */
+ static int vgic_v3_insert_redist_region(struct kvm *kvm, uint32_t index,
+                                       gpa_t base, uint32_t count)
+ {
+       struct vgic_dist *d = &kvm->arch.vgic;
+       struct vgic_redist_region *rdreg;
+       struct list_head *rd_regions = &d->rd_regions;
+       size_t size = count * KVM_VGIC_V3_REDIST_SIZE;
+       int ret;
+       /* single rdist region already set ?*/
+       if (!count && !list_empty(rd_regions))
+               return -EINVAL;
+       /* cross the end of memory ? */
+       if (base + size < base)
+               return -EINVAL;
+       if (list_empty(rd_regions)) {
+               if (index != 0)
+                       return -EINVAL;
+       } else {
+               rdreg = list_last_entry(rd_regions,
+                                       struct vgic_redist_region, list);
+               if (index != rdreg->index + 1)
+                       return -EINVAL;
+               /* Cannot add an explicitly sized regions after legacy region */
+               if (!rdreg->count)
+                       return -EINVAL;
+       }
+       /*
+        * For legacy single-region redistributor regions (!count),
+        * check that the redistributor region does not overlap with the
+        * distributor's address space.
+        */
+       if (!count && !IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
+               vgic_dist_overlap(kvm, base, size))
+               return -EINVAL;
+       /* collision with any other rdist region? */
+       if (vgic_v3_rdist_overlap(kvm, base, size))
+               return -EINVAL;
+       rdreg = kzalloc(sizeof(*rdreg), GFP_KERNEL);
+       if (!rdreg)
+               return -ENOMEM;
+       rdreg->base = VGIC_ADDR_UNDEF;
+       ret = vgic_check_ioaddr(kvm, &rdreg->base, base, SZ_64K);
+       if (ret)
+               goto free;
+       rdreg->base = base;
+       rdreg->count = count;
+       rdreg->free_index = 0;
+       rdreg->index = index;
+       list_add_tail(&rdreg->list, rd_regions);
+       return 0;
+ free:
+       kfree(rdreg);
+       return ret;
+ }
+ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
+ {
+       int ret;
+       ret = vgic_v3_insert_redist_region(kvm, index, addr, count);
+       if (ret)
+               return ret;
+       /*
+        * Register iodevs for each existing VCPU.  Adding more VCPUs
+        * afterwards will register the iodevs when needed.
+        */
+       ret = vgic_register_all_redist_iodevs(kvm);
+       if (ret)
+               return ret;
+       return 0;
+ }
+ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr)
+ {
+       const struct vgic_register_region *region;
+       struct vgic_io_device iodev;
+       struct vgic_reg_attr reg_attr;
+       struct kvm_vcpu *vcpu;
+       gpa_t addr;
+       int ret;
+       ret = vgic_v3_parse_attr(dev, attr, &reg_attr);
+       if (ret)
+               return ret;
+       vcpu = reg_attr.vcpu;
+       addr = reg_attr.addr;
+       switch (attr->group) {
+       case KVM_DEV_ARM_VGIC_GRP_DIST_REGS:
+               iodev.regions = vgic_v3_dist_registers;
+               iodev.nr_regions = ARRAY_SIZE(vgic_v3_dist_registers);
+               iodev.base_addr = 0;
+               break;
+       case KVM_DEV_ARM_VGIC_GRP_REDIST_REGS:{
+               iodev.regions = vgic_v3_rd_registers;
+               iodev.nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
+               iodev.base_addr = 0;
+               break;
+       }
+       case KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS: {
+               u64 reg, id;
+               id = (attr->attr & KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK);
+               return vgic_v3_has_cpu_sysregs_attr(vcpu, 0, id, &reg);
+       }
+       default:
+               return -ENXIO;
+       }
+       /* We only support aligned 32-bit accesses. */
+       if (addr & 3)
+               return -ENXIO;
+       region = vgic_get_mmio_region(vcpu, &iodev, addr, sizeof(u32));
+       if (!region)
+               return -ENXIO;
+       return 0;
+ }
+ /*
+  * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI
+  * generation register ICC_SGI1R_EL1) with a given VCPU.
+  * If the VCPU's MPIDR matches, return the level0 affinity, otherwise
+  * return -1.
+  */
+ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu)
+ {
+       unsigned long affinity;
+       int level0;
+       /*
+        * Split the current VCPU's MPIDR into affinity level 0 and the
+        * rest as this is what we have to compare against.
+        */
+       affinity = kvm_vcpu_get_mpidr_aff(vcpu);
+       level0 = MPIDR_AFFINITY_LEVEL(affinity, 0);
+       affinity &= ~MPIDR_LEVEL_MASK;
+       /* bail out if the upper three levels don't match */
+       if (sgi_aff != affinity)
+               return -1;
+       /* Is this VCPU's bit set in the mask ? */
+       if (!(sgi_cpu_mask & BIT(level0)))
+               return -1;
+       return level0;
+ }
+ /*
+  * The ICC_SGI* registers encode the affinity differently from the MPIDR,
+  * so provide a wrapper to use the existing defines to isolate a certain
+  * affinity level.
+  */
+ #define SGI_AFFINITY_LEVEL(reg, level) \
+       ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \
+       >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
+ /**
+  * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs
+  * @vcpu: The VCPU requesting a SGI
+  * @reg: The value written into ICC_{ASGI1,SGI0,SGI1}R by that VCPU
+  * @allow_group1: Does the sysreg access allow generation of G1 SGIs
+  *
+  * With GICv3 (and ARE=1) CPUs trigger SGIs by writing to a system register.
+  * This will trap in sys_regs.c and call this function.
+  * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the
+  * target processors as well as a bitmask of 16 Aff0 CPUs.
+  * If the interrupt routing mode bit is not set, we iterate over all VCPUs to
+  * check for matching ones. If this bit is set, we signal all, but not the
+  * calling VCPU.
+  */
+ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
+ {
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_vcpu *c_vcpu;
+       u16 target_cpus;
+       u64 mpidr;
+       int sgi, c;
+       int vcpu_id = vcpu->vcpu_id;
+       bool broadcast;
+       unsigned long flags;
+       sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
+       broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
+       target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT;
+       mpidr = SGI_AFFINITY_LEVEL(reg, 3);
+       mpidr |= SGI_AFFINITY_LEVEL(reg, 2);
+       mpidr |= SGI_AFFINITY_LEVEL(reg, 1);
+       /*
+        * We iterate over all VCPUs to find the MPIDRs matching the request.
+        * If we have handled one CPU, we clear its bit to detect early
+        * if we are already finished. This avoids iterating through all
+        * VCPUs when most of the times we just signal a single VCPU.
+        */
+       kvm_for_each_vcpu(c, c_vcpu, kvm) {
+               struct vgic_irq *irq;
+               /* Exit early if we have dealt with all requested CPUs */
+               if (!broadcast && target_cpus == 0)
+                       break;
+               /* Don't signal the calling VCPU */
+               if (broadcast && c == vcpu_id)
+                       continue;
+               if (!broadcast) {
+                       int level0;
+                       level0 = match_mpidr(mpidr, target_cpus, c_vcpu);
+                       if (level0 == -1)
+                               continue;
+                       /* remove this matching VCPU from the mask */
+                       target_cpus &= ~BIT(level0);
+               }
+               irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
+               raw_spin_lock_irqsave(&irq->irq_lock, flags);
+               /*
+                * An access targetting Group0 SGIs can only generate
+                * those, while an access targetting Group1 SGIs can
+                * generate interrupts of either group.
+                */
+               if (!irq->group || allow_group1) {
+                       if (!irq->hw) {
+                               irq->pending_latch = true;
+                               vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
+                       } else {
+                               /* HW SGI? Ask the GIC to inject it */
+                               int err;
+                               err = irq_set_irqchip_state(irq->host_irq,
+                                                           IRQCHIP_STATE_PENDING,
+                                                           true);
+                               WARN_RATELIMIT(err, "IRQ %d", irq->host_irq);
+                               raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+                       }
+               } else {
+                       raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
+               }
+               vgic_put_irq(vcpu->kvm, irq);
+       }
+ }
+ int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                        int offset, u32 *val)
+ {
+       struct vgic_io_device dev = {
+               .regions = vgic_v3_dist_registers,
+               .nr_regions = ARRAY_SIZE(vgic_v3_dist_registers),
+       };
+       return vgic_uaccess(vcpu, &dev, is_write, offset, val);
+ }
+ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                          int offset, u32 *val)
+ {
+       struct vgic_io_device rd_dev = {
+               .regions = vgic_v3_rd_registers,
+               .nr_regions = ARRAY_SIZE(vgic_v3_rd_registers),
+       };
+       return vgic_uaccess(vcpu, &rd_dev, is_write, offset, val);
+ }
+ int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                                   u32 intid, u64 *val)
+ {
+       if (intid % 32)
+               return -EINVAL;
+       if (is_write)
+               vgic_write_irq_line_level_info(vcpu, intid, *val);
+       else
+               *val = vgic_read_irq_line_level_info(vcpu, intid);
+       return 0;
+ }
index 0000000000000000000000000000000000000000,769e4802645ee8ef30f3761ba7c0ddc73bf652b2..64fcd75111108c6b7b3b5b74e434339138c44a57
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,321 +1,321 @@@
 - * As per Documentation/virt/kvm/devices/arm-vgic-v3.txt,
+ /* SPDX-License-Identifier: GPL-2.0-only */
+ /*
+  * Copyright (C) 2015, 2016 ARM Ltd.
+  */
+ #ifndef __KVM_ARM_VGIC_NEW_H__
+ #define __KVM_ARM_VGIC_NEW_H__
+ #include <linux/irqchip/arm-gic-common.h>
+ #define PRODUCT_ID_KVM                0x4b    /* ASCII code K */
+ #define IMPLEMENTER_ARM               0x43b
+ #define VGIC_ADDR_UNDEF               (-1)
+ #define IS_VGIC_ADDR_UNDEF(_x)  ((_x) == VGIC_ADDR_UNDEF)
+ #define INTERRUPT_ID_BITS_SPIS        10
+ #define INTERRUPT_ID_BITS_ITS 16
+ #define VGIC_PRI_BITS         5
+ #define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)
+ #define VGIC_AFFINITY_0_SHIFT 0
+ #define VGIC_AFFINITY_0_MASK (0xffUL << VGIC_AFFINITY_0_SHIFT)
+ #define VGIC_AFFINITY_1_SHIFT 8
+ #define VGIC_AFFINITY_1_MASK (0xffUL << VGIC_AFFINITY_1_SHIFT)
+ #define VGIC_AFFINITY_2_SHIFT 16
+ #define VGIC_AFFINITY_2_MASK (0xffUL << VGIC_AFFINITY_2_SHIFT)
+ #define VGIC_AFFINITY_3_SHIFT 24
+ #define VGIC_AFFINITY_3_MASK (0xffUL << VGIC_AFFINITY_3_SHIFT)
+ #define VGIC_AFFINITY_LEVEL(reg, level) \
+       ((((reg) & VGIC_AFFINITY_## level ##_MASK) \
+       >> VGIC_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level))
+ /*
+  * The Userspace encodes the affinity differently from the MPIDR,
+  * Below macro converts vgic userspace format to MPIDR reg format.
+  */
+ #define VGIC_TO_MPIDR(val) (VGIC_AFFINITY_LEVEL(val, 0) | \
+                           VGIC_AFFINITY_LEVEL(val, 1) | \
+                           VGIC_AFFINITY_LEVEL(val, 2) | \
+                           VGIC_AFFINITY_LEVEL(val, 3))
+ /*
 - * As per Documentation/virt/kvm/devices/arm-vgic-its.txt,
++ * As per Documentation/virt/kvm/devices/arm-vgic-v3.rst,
+  * below macros are defined for CPUREG encoding.
+  */
+ #define KVM_REG_ARM_VGIC_SYSREG_OP0_MASK   0x000000000000c000
+ #define KVM_REG_ARM_VGIC_SYSREG_OP0_SHIFT  14
+ #define KVM_REG_ARM_VGIC_SYSREG_OP1_MASK   0x0000000000003800
+ #define KVM_REG_ARM_VGIC_SYSREG_OP1_SHIFT  11
+ #define KVM_REG_ARM_VGIC_SYSREG_CRN_MASK   0x0000000000000780
+ #define KVM_REG_ARM_VGIC_SYSREG_CRN_SHIFT  7
+ #define KVM_REG_ARM_VGIC_SYSREG_CRM_MASK   0x0000000000000078
+ #define KVM_REG_ARM_VGIC_SYSREG_CRM_SHIFT  3
+ #define KVM_REG_ARM_VGIC_SYSREG_OP2_MASK   0x0000000000000007
+ #define KVM_REG_ARM_VGIC_SYSREG_OP2_SHIFT  0
+ #define KVM_DEV_ARM_VGIC_SYSREG_MASK (KVM_REG_ARM_VGIC_SYSREG_OP0_MASK | \
+                                     KVM_REG_ARM_VGIC_SYSREG_OP1_MASK | \
+                                     KVM_REG_ARM_VGIC_SYSREG_CRN_MASK | \
+                                     KVM_REG_ARM_VGIC_SYSREG_CRM_MASK | \
+                                     KVM_REG_ARM_VGIC_SYSREG_OP2_MASK)
+ /*
++ * As per Documentation/virt/kvm/devices/arm-vgic-its.rst,
+  * below macros are defined for ITS table entry encoding.
+  */
+ #define KVM_ITS_CTE_VALID_SHIFT               63
+ #define KVM_ITS_CTE_VALID_MASK                BIT_ULL(63)
+ #define KVM_ITS_CTE_RDBASE_SHIFT      16
+ #define KVM_ITS_CTE_ICID_MASK         GENMASK_ULL(15, 0)
+ #define KVM_ITS_ITE_NEXT_SHIFT                48
+ #define KVM_ITS_ITE_PINTID_SHIFT      16
+ #define KVM_ITS_ITE_PINTID_MASK               GENMASK_ULL(47, 16)
+ #define KVM_ITS_ITE_ICID_MASK         GENMASK_ULL(15, 0)
+ #define KVM_ITS_DTE_VALID_SHIFT               63
+ #define KVM_ITS_DTE_VALID_MASK                BIT_ULL(63)
+ #define KVM_ITS_DTE_NEXT_SHIFT                49
+ #define KVM_ITS_DTE_NEXT_MASK         GENMASK_ULL(62, 49)
+ #define KVM_ITS_DTE_ITTADDR_SHIFT     5
+ #define KVM_ITS_DTE_ITTADDR_MASK      GENMASK_ULL(48, 5)
+ #define KVM_ITS_DTE_SIZE_MASK         GENMASK_ULL(4, 0)
+ #define KVM_ITS_L1E_VALID_MASK                BIT_ULL(63)
+ /* we only support 64 kB translation table page size */
+ #define KVM_ITS_L1E_ADDR_MASK         GENMASK_ULL(51, 16)
+ #define KVM_VGIC_V3_RDIST_INDEX_MASK  GENMASK_ULL(11, 0)
+ #define KVM_VGIC_V3_RDIST_FLAGS_MASK  GENMASK_ULL(15, 12)
+ #define KVM_VGIC_V3_RDIST_FLAGS_SHIFT 12
+ #define KVM_VGIC_V3_RDIST_BASE_MASK   GENMASK_ULL(51, 16)
+ #define KVM_VGIC_V3_RDIST_COUNT_MASK  GENMASK_ULL(63, 52)
+ #define KVM_VGIC_V3_RDIST_COUNT_SHIFT 52
+ #ifdef CONFIG_DEBUG_SPINLOCK
+ #define DEBUG_SPINLOCK_BUG_ON(p) BUG_ON(p)
+ #else
+ #define DEBUG_SPINLOCK_BUG_ON(p)
+ #endif
+ /* Requires the irq_lock to be held by the caller. */
+ static inline bool irq_is_pending(struct vgic_irq *irq)
+ {
+       if (irq->config == VGIC_CONFIG_EDGE)
+               return irq->pending_latch;
+       else
+               return irq->pending_latch || irq->line_level;
+ }
+ static inline bool vgic_irq_is_mapped_level(struct vgic_irq *irq)
+ {
+       return irq->config == VGIC_CONFIG_LEVEL && irq->hw;
+ }
+ static inline int vgic_irq_get_lr_count(struct vgic_irq *irq)
+ {
+       /* Account for the active state as an interrupt */
+       if (vgic_irq_is_sgi(irq->intid) && irq->source)
+               return hweight8(irq->source) + irq->active;
+       return irq_is_pending(irq) || irq->active;
+ }
+ static inline bool vgic_irq_is_multi_sgi(struct vgic_irq *irq)
+ {
+       return vgic_irq_get_lr_count(irq) > 1;
+ }
+ /*
+  * This struct provides an intermediate representation of the fields contained
+  * in the GICH_VMCR and ICH_VMCR registers, such that code exporting the GIC
+  * state to userspace can generate either GICv2 or GICv3 CPU interface
+  * registers regardless of the hardware backed GIC used.
+  */
+ struct vgic_vmcr {
+       u32     grpen0;
+       u32     grpen1;
+       u32     ackctl;
+       u32     fiqen;
+       u32     cbpr;
+       u32     eoim;
+       u32     abpr;
+       u32     bpr;
+       u32     pmr;  /* Priority mask field in the GICC_PMR and
+                      * ICC_PMR_EL1 priority field format */
+ };
+ struct vgic_reg_attr {
+       struct kvm_vcpu *vcpu;
+       gpa_t addr;
+ };
+ int vgic_v3_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
+                      struct vgic_reg_attr *reg_attr);
+ int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr,
+                      struct vgic_reg_attr *reg_attr);
+ const struct vgic_register_region *
+ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev,
+                    gpa_t addr, int len);
+ struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
+                             u32 intid);
+ void __vgic_put_lpi_locked(struct kvm *kvm, struct vgic_irq *irq);
+ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq);
+ bool vgic_get_phys_line_level(struct vgic_irq *irq);
+ void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending);
+ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active);
+ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
+                          unsigned long flags);
+ void vgic_kick_vcpus(struct kvm *kvm);
+ int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr,
+                     phys_addr_t addr, phys_addr_t alignment);
+ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu);
+ void vgic_v2_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
+ void vgic_v2_clear_lr(struct kvm_vcpu *vcpu, int lr);
+ void vgic_v2_set_underflow(struct kvm_vcpu *vcpu);
+ void vgic_v2_set_npie(struct kvm_vcpu *vcpu);
+ int vgic_v2_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
+ int vgic_v2_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                        int offset, u32 *val);
+ int vgic_v2_cpuif_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                         int offset, u32 *val);
+ void vgic_v2_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void vgic_v2_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void vgic_v2_enable(struct kvm_vcpu *vcpu);
+ int vgic_v2_probe(const struct gic_kvm_info *info);
+ int vgic_v2_map_resources(struct kvm *kvm);
+ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
+                            enum vgic_type);
+ void vgic_v2_init_lrs(void);
+ void vgic_v2_load(struct kvm_vcpu *vcpu);
+ void vgic_v2_put(struct kvm_vcpu *vcpu);
+ void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
+ void vgic_v2_save_state(struct kvm_vcpu *vcpu);
+ void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+ static inline void vgic_get_irq_kref(struct vgic_irq *irq)
+ {
+       if (irq->intid < VGIC_MIN_LPI)
+               return;
+       kref_get(&irq->refcount);
+ }
+ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu);
+ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr);
+ void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr);
+ void vgic_v3_set_underflow(struct kvm_vcpu *vcpu);
+ void vgic_v3_set_npie(struct kvm_vcpu *vcpu);
+ void vgic_v3_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void vgic_v3_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void vgic_v3_enable(struct kvm_vcpu *vcpu);
+ int vgic_v3_probe(const struct gic_kvm_info *info);
+ int vgic_v3_map_resources(struct kvm *kvm);
+ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
+ int vgic_v3_save_pending_tables(struct kvm *kvm);
+ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
+ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
+ bool vgic_v3_check_base(struct kvm *kvm);
+ void vgic_v3_load(struct kvm_vcpu *vcpu);
+ void vgic_v3_put(struct kvm_vcpu *vcpu);
+ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
+ bool vgic_has_its(struct kvm *kvm);
+ int kvm_vgic_register_its_device(void);
+ void vgic_enable_lpis(struct kvm_vcpu *vcpu);
+ void vgic_flush_pending_lpis(struct kvm_vcpu *vcpu);
+ int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
+ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr);
+ int vgic_v3_dist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                        int offset, u32 *val);
+ int vgic_v3_redist_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                        int offset, u32 *val);
+ int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                        u64 id, u64 *val);
+ int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
+                               u64 *reg);
+ int vgic_v3_line_level_info_uaccess(struct kvm_vcpu *vcpu, bool is_write,
+                                   u32 intid, u64 *val);
+ int kvm_register_vgic_device(unsigned long type);
+ void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ void vgic_get_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
+ int vgic_lazy_init(struct kvm *kvm);
+ int vgic_init(struct kvm *kvm);
+ void vgic_debug_init(struct kvm *kvm);
+ void vgic_debug_destroy(struct kvm *kvm);
+ bool lock_all_vcpus(struct kvm *kvm);
+ void unlock_all_vcpus(struct kvm *kvm);
+ static inline int vgic_v3_max_apr_idx(struct kvm_vcpu *vcpu)
+ {
+       struct vgic_cpu *cpu_if = &vcpu->arch.vgic_cpu;
+       /*
+        * num_pri_bits are initialized with HW supported values.
+        * We can rely safely on num_pri_bits even if VM has not
+        * restored ICC_CTLR_EL1 before restoring APnR registers.
+        */
+       switch (cpu_if->num_pri_bits) {
+       case 7: return 3;
+       case 6: return 1;
+       default: return 0;
+       }
+ }
+ static inline bool
+ vgic_v3_redist_region_full(struct vgic_redist_region *region)
+ {
+       if (!region->count)
+               return false;
+       return (region->free_index >= region->count);
+ }
+ struct vgic_redist_region *vgic_v3_rdist_free_slot(struct list_head *rdregs);
+ static inline size_t
+ vgic_v3_rd_region_size(struct kvm *kvm, struct vgic_redist_region *rdreg)
+ {
+       if (!rdreg->count)
+               return atomic_read(&kvm->online_vcpus) * KVM_VGIC_V3_REDIST_SIZE;
+       else
+               return rdreg->count * KVM_VGIC_V3_REDIST_SIZE;
+ }
+ struct vgic_redist_region *vgic_v3_rdist_region_from_index(struct kvm *kvm,
+                                                          u32 index);
+ bool vgic_v3_rdist_overlap(struct kvm *kvm, gpa_t base, size_t size);
+ static inline bool vgic_dist_overlap(struct kvm *kvm, gpa_t base, size_t size)
+ {
+       struct vgic_dist *d = &kvm->arch.vgic;
+       return (base + size > d->vgic_dist_base) &&
+               (base < d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE);
+ }
+ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr);
+ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+                        u32 devid, u32 eventid, struct vgic_irq **irq);
+ struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
+ int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi);
+ void vgic_lpi_translation_cache_init(struct kvm *kvm);
+ void vgic_lpi_translation_cache_destroy(struct kvm *kvm);
+ void vgic_its_invalidate_cache(struct kvm *kvm);
+ bool vgic_supports_direct_msis(struct kvm *kvm);
+ int vgic_v4_init(struct kvm *kvm);
+ void vgic_v4_teardown(struct kvm *kvm);
+ void vgic_v4_configure_vsgis(struct kvm *kvm);
+ #endif
diff --combined arch/mips/kvm/mips.c
index 2261c63174c5f4dc0d700c7e8fa67f525dc16b32,99ed08aff31eb35bdae54f1153b8dda432fff085..3b0148c99c0dfac480f1579a60a7dc3429a8b06c
  #define VECTORSPACING 0x100   /* for EI/VI mode */
  #endif
  
- #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
  struct kvm_stats_debugfs_item debugfs_entries[] = {
-       { "wait",         VCPU_STAT(wait_exits),         KVM_STAT_VCPU },
-       { "cache",        VCPU_STAT(cache_exits),        KVM_STAT_VCPU },
-       { "signal",       VCPU_STAT(signal_exits),       KVM_STAT_VCPU },
-       { "interrupt",    VCPU_STAT(int_exits),          KVM_STAT_VCPU },
-       { "cop_unusable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
-       { "tlbmod",       VCPU_STAT(tlbmod_exits),       KVM_STAT_VCPU },
-       { "tlbmiss_ld",   VCPU_STAT(tlbmiss_ld_exits),   KVM_STAT_VCPU },
-       { "tlbmiss_st",   VCPU_STAT(tlbmiss_st_exits),   KVM_STAT_VCPU },
-       { "addrerr_st",   VCPU_STAT(addrerr_st_exits),   KVM_STAT_VCPU },
-       { "addrerr_ld",   VCPU_STAT(addrerr_ld_exits),   KVM_STAT_VCPU },
-       { "syscall",      VCPU_STAT(syscall_exits),      KVM_STAT_VCPU },
-       { "resvd_inst",   VCPU_STAT(resvd_inst_exits),   KVM_STAT_VCPU },
-       { "break_inst",   VCPU_STAT(break_inst_exits),   KVM_STAT_VCPU },
-       { "trap_inst",    VCPU_STAT(trap_inst_exits),    KVM_STAT_VCPU },
-       { "msa_fpe",      VCPU_STAT(msa_fpe_exits),      KVM_STAT_VCPU },
-       { "fpe",          VCPU_STAT(fpe_exits),          KVM_STAT_VCPU },
-       { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU },
-       { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+       VCPU_STAT("wait", wait_exits),
+       VCPU_STAT("cache", cache_exits),
+       VCPU_STAT("signal", signal_exits),
+       VCPU_STAT("interrupt", int_exits),
+       VCPU_STAT("cop_unusable", cop_unusable_exits),
+       VCPU_STAT("tlbmod", tlbmod_exits),
+       VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits),
+       VCPU_STAT("tlbmiss_st", tlbmiss_st_exits),
+       VCPU_STAT("addrerr_st", addrerr_st_exits),
+       VCPU_STAT("addrerr_ld", addrerr_ld_exits),
+       VCPU_STAT("syscall", syscall_exits),
+       VCPU_STAT("resvd_inst", resvd_inst_exits),
+       VCPU_STAT("break_inst", break_inst_exits),
+       VCPU_STAT("trap_inst", trap_inst_exits),
+       VCPU_STAT("msa_fpe", msa_fpe_exits),
+       VCPU_STAT("fpe", fpe_exits),
+       VCPU_STAT("msa_disabled", msa_disabled_exits),
+       VCPU_STAT("flush_dcache", flush_dcache_exits),
  #ifdef CONFIG_KVM_MIPS_VZ
-       { "vz_gpsi",      VCPU_STAT(vz_gpsi_exits),      KVM_STAT_VCPU },
-       { "vz_gsfc",      VCPU_STAT(vz_gsfc_exits),      KVM_STAT_VCPU },
-       { "vz_hc",        VCPU_STAT(vz_hc_exits),        KVM_STAT_VCPU },
-       { "vz_grr",       VCPU_STAT(vz_grr_exits),       KVM_STAT_VCPU },
-       { "vz_gva",       VCPU_STAT(vz_gva_exits),       KVM_STAT_VCPU },
-       { "vz_ghfc",      VCPU_STAT(vz_ghfc_exits),      KVM_STAT_VCPU },
-       { "vz_gpa",       VCPU_STAT(vz_gpa_exits),       KVM_STAT_VCPU },
-       { "vz_resvd",     VCPU_STAT(vz_resvd_exits),     KVM_STAT_VCPU },
+       VCPU_STAT("vz_gpsi", vz_gpsi_exits),
+       VCPU_STAT("vz_gsfc", vz_gsfc_exits),
+       VCPU_STAT("vz_hc", vz_hc_exits),
+       VCPU_STAT("vz_grr", vz_grr_exits),
+       VCPU_STAT("vz_gva", vz_gva_exits),
+       VCPU_STAT("vz_ghfc", vz_ghfc_exits),
+       VCPU_STAT("vz_gpa", vz_gpa_exits),
+       VCPU_STAT("vz_resvd", vz_resvd_exits),
  #endif
-       { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
-       { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
-       { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
-       { "halt_wakeup",  VCPU_STAT(halt_wakeup),        KVM_STAT_VCPU },
+       VCPU_STAT("halt_successful_poll", halt_successful_poll),
+       VCPU_STAT("halt_attempted_poll", halt_attempted_poll),
+       VCPU_STAT("halt_poll_invalid", halt_poll_invalid),
+       VCPU_STAT("halt_wakeup", halt_wakeup),
+       VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns),
+       VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns),
        {NULL}
  };
  
@@@ -80,13 -81,13 +81,13 @@@ bool kvm_trace_guest_mode_change
  
  int kvm_guest_mode_change_trace_reg(void)
  {
 -      kvm_trace_guest_mode_change = 1;
 +      kvm_trace_guest_mode_change = true;
        return 0;
  }
  
  void kvm_guest_mode_change_trace_unreg(void)
  {
 -      kvm_trace_guest_mode_change = 0;
 +      kvm_trace_guest_mode_change = false;
  }
  
  /*
@@@ -284,8 -285,7 +285,7 @@@ static enum hrtimer_restart kvm_mips_co
        kvm_mips_callbacks->queue_timer_int(vcpu);
  
        vcpu->arch.wait = 0;
-       if (swq_has_sleeper(&vcpu->wq))
-               swake_up_one(&vcpu->wq);
+       rcuwait_wake_up(&vcpu->wait);
  
        return kvm_mips_count_timeout(vcpu);
  }
@@@ -439,8 -439,9 +439,9 @@@ int kvm_arch_vcpu_ioctl_set_guest_debug
        return -ENOIOCTLCMD;
  }
  
- int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
  {
+       struct kvm_run *run = vcpu->run;
        int r = -EINTR;
  
        vcpu_load(vcpu);
@@@ -511,8 -512,7 +512,7 @@@ int kvm_vcpu_ioctl_interrupt(struct kvm
  
        dvcpu->arch.wait = 0;
  
-       if (swq_has_sleeper(&dvcpu->wq))
-               swake_up_one(&dvcpu->wq);
+       rcuwait_wake_up(&dvcpu->wait);
  
        return 0;
  }
index ac232f456396c8eb1e0ae6ae043df7db3b963a06,8ba0985f501655da3bbad4b0d3d1b7bc2197f3c7..a5eed844e948bd853fbb22428a0c3d76d306c80b
@@@ -816,7 -816,7 +816,7 @@@ SYM_CODE_START(ret_from_fork
  
        /* kernel thread */
  1:    movl    %edi, %eax
 -      CALL_NOSPEC %ebx
 +      CALL_NOSPEC ebx
        /*
         * A kernel thread is allowed to return here after successfully
         * calling do_execve().  Exit to userspace to complete the execve()
@@@ -1501,7 -1501,7 +1501,7 @@@ SYM_CODE_START_LOCAL_NOALIGN(common_exc
  
        TRACE_IRQS_OFF
        movl    %esp, %eax                      # pt_regs pointer
 -      CALL_NOSPEC %edi
 +      CALL_NOSPEC edi
        jmp     ret_from_exception
  SYM_CODE_END(common_exception_read_cr2)
  
@@@ -1522,7 -1522,7 +1522,7 @@@ SYM_CODE_START_LOCAL_NOALIGN(common_exc
  
        TRACE_IRQS_OFF
        movl    %esp, %eax                      # pt_regs pointer
 -      CALL_NOSPEC %edi
 +      CALL_NOSPEC edi
        jmp     ret_from_exception
  SYM_CODE_END(common_exception)
  
@@@ -1536,6 -1536,7 +1536,6 @@@ SYM_CODE_START(debug
        jmp     common_exception
  SYM_CODE_END(debug)
  
 -#ifdef CONFIG_DOUBLEFAULT
  SYM_CODE_START(double_fault)
  1:
        /*
        hlt
        jmp 1b
  SYM_CODE_END(double_fault)
 -#endif
  
  /*
   * NMI is doubly nasty.  It can happen on the first instruction of
@@@ -1691,14 -1693,6 +1691,6 @@@ SYM_CODE_START(general_protection
        jmp     common_exception
  SYM_CODE_END(general_protection)
  
- #ifdef CONFIG_KVM_GUEST
- SYM_CODE_START(async_page_fault)
-       ASM_CLAC
-       pushl   $do_async_page_fault
-       jmp     common_exception_read_cr2
- SYM_CODE_END(async_page_fault)
- #endif
  SYM_CODE_START(rewind_stack_do_exit)
        /* Prevent any naive code from trying to unwind to our caller. */
        xorl    %ebp, %ebp
index 64fe3d82157e631c6940056f8bf7efecfa454716,9ab3ea6d02fc64c0726536ef9b90509c5e39c081..eead1e2bebd5c12f1bda7539760ae728b268a09b
@@@ -348,7 -348,7 +348,7 @@@ SYM_CODE_START(ret_from_fork
        /* kernel thread */
        UNWIND_HINT_EMPTY
        movq    %r12, %rdi
 -      CALL_NOSPEC %rbx
 +      CALL_NOSPEC rbx
        /*
         * A kernel thread is allowed to return here after successfully
         * calling do_execve().  Exit to userspace to complete the execve()
@@@ -1202,10 -1202,6 +1202,6 @@@ idtentry xendebug              do_debug                has_error_
  idtentry general_protection   do_general_protection   has_error_code=1
  idtentry page_fault           do_page_fault           has_error_code=1        read_cr2=1
  
- #ifdef CONFIG_KVM_GUEST
- idtentry async_page_fault     do_async_page_fault     has_error_code=1        read_cr2=1
- #endif
  #ifdef CONFIG_X86_MCE
  idtentry machine_check                do_mce                  has_error_code=0        paranoid=1
  #endif
index 4e91f6118d5d9727c07794e0f075b6493e223b13,53ef6b7bd3802526f7edbc0831af78d21b249d8f..7a4d2062385cc889956857dea49d3cf51adda8f8
  
  #include <linux/types.h>
  #include <asm/page.h>
 -
 -/*
 - * While not explicitly listed in the TLFS, Hyper-V always runs with a page size
 - * of 4096. These definitions are used when communicating with Hyper-V using
 - * guest physical pages and guest physical page addresses, since the guest page
 - * size may not be 4096 on all architectures.
 - */
 -#define HV_HYP_PAGE_SHIFT      12
 -#define HV_HYP_PAGE_SIZE       BIT(HV_HYP_PAGE_SHIFT)
 -#define HV_HYP_PAGE_MASK       (~(HV_HYP_PAGE_SIZE - 1))
 -
  /*
   * The below CPUID leaves are present if VersionAndFeatures.HypervisorPresent
   * is set by CPUID(HvCpuIdFunctionVersionAndFeatures).
  #define HYPERV_CPUID_MAX                      0x4000ffff
  
  /*
 - * Feature identification. EAX indicates which features are available
 - * to the partition based upon the current partition privileges.
 - * These are HYPERV_CPUID_FEATURES.EAX bits.
 + * Aliases for Group A features that have X64 in the name.
 + * On x86/x64 these are HYPERV_CPUID_FEATURES.EAX bits.
   */
  
 -/* VP Runtime (HV_X64_MSR_VP_RUNTIME) available */
 -#define HV_X64_MSR_VP_RUNTIME_AVAILABLE               BIT(0)
 -/* Partition Reference Counter (HV_X64_MSR_TIME_REF_COUNT) available*/
 -#define HV_MSR_TIME_REF_COUNT_AVAILABLE               BIT(1)
 -/*
 - * Basic SynIC MSRs (HV_X64_MSR_SCONTROL through HV_X64_MSR_EOM
 - * and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15) available
 - */
 -#define HV_X64_MSR_SYNIC_AVAILABLE            BIT(2)
 -/*
 - * Synthetic Timer MSRs (HV_X64_MSR_STIMER0_CONFIG through
 - * HV_X64_MSR_STIMER3_COUNT) available
 - */
 -#define HV_MSR_SYNTIMER_AVAILABLE             BIT(3)
 -/*
 - * APIC access MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and HV_X64_MSR_TPR)
 - * are available
 - */
 -#define HV_X64_MSR_APIC_ACCESS_AVAILABLE      BIT(4)
 -/* Hypercall MSRs (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL) available*/
 -#define HV_X64_MSR_HYPERCALL_AVAILABLE                BIT(5)
 -/* Access virtual processor index MSR (HV_X64_MSR_VP_INDEX) available*/
 -#define HV_X64_MSR_VP_INDEX_AVAILABLE         BIT(6)
 -/* Virtual system reset MSR (HV_X64_MSR_RESET) is available*/
 -#define HV_X64_MSR_RESET_AVAILABLE            BIT(7)
 -/*
 - * Access statistics pages MSRs (HV_X64_MSR_STATS_PARTITION_RETAIL_PAGE,
 - * HV_X64_MSR_STATS_PARTITION_INTERNAL_PAGE, HV_X64_MSR_STATS_VP_RETAIL_PAGE,
 - * HV_X64_MSR_STATS_VP_INTERNAL_PAGE) available
 - */
 -#define HV_X64_MSR_STAT_PAGES_AVAILABLE               BIT(8)
 -/* Partition reference TSC MSR is available */
 -#define HV_MSR_REFERENCE_TSC_AVAILABLE                BIT(9)
 -/* Partition Guest IDLE MSR is available */
 -#define HV_X64_MSR_GUEST_IDLE_AVAILABLE               BIT(10)
 -/*
 - * There is a single feature flag that signifies if the partition has access
 - * to MSRs with local APIC and TSC frequencies.
 - */
 -#define HV_X64_ACCESS_FREQUENCY_MSRS          BIT(11)
 -/* AccessReenlightenmentControls privilege */
 -#define HV_X64_ACCESS_REENLIGHTENMENT         BIT(13)
 -/* AccessTscInvariantControls privilege */
 -#define HV_X64_ACCESS_TSC_INVARIANT           BIT(15)
 +#define HV_X64_MSR_VP_RUNTIME_AVAILABLE               \
 +              HV_MSR_VP_RUNTIME_AVAILABLE
 +#define HV_X64_MSR_SYNIC_AVAILABLE            \
 +              HV_MSR_SYNIC_AVAILABLE
 +#define HV_X64_MSR_APIC_ACCESS_AVAILABLE      \
 +              HV_MSR_APIC_ACCESS_AVAILABLE
 +#define HV_X64_MSR_HYPERCALL_AVAILABLE                \
 +              HV_MSR_HYPERCALL_AVAILABLE
 +#define HV_X64_MSR_VP_INDEX_AVAILABLE         \
 +              HV_MSR_VP_INDEX_AVAILABLE
 +#define HV_X64_MSR_RESET_AVAILABLE            \
 +              HV_MSR_RESET_AVAILABLE
 +#define HV_X64_MSR_GUEST_IDLE_AVAILABLE               \
 +              HV_MSR_GUEST_IDLE_AVAILABLE
 +#define HV_X64_ACCESS_FREQUENCY_MSRS          \
 +              HV_ACCESS_FREQUENCY_MSRS
 +#define HV_X64_ACCESS_REENLIGHTENMENT         \
 +              HV_ACCESS_REENLIGHTENMENT
 +#define HV_X64_ACCESS_TSC_INVARIANT           \
 +              HV_ACCESS_TSC_INVARIANT
  
  /*
 - * Feature identification: indicates which flags were specified at partition
 - * creation. The format is the same as the partition creation flag structure
 - * defined in section Partition Creation Flags.
 - * These are HYPERV_CPUID_FEATURES.EBX bits.
 + * Aliases for Group B features that have X64 in the name.
 + * On x86/x64 these are HYPERV_CPUID_FEATURES.EBX bits.
   */
 -#define HV_X64_CREATE_PARTITIONS              BIT(0)
 -#define HV_X64_ACCESS_PARTITION_ID            BIT(1)
 -#define HV_X64_ACCESS_MEMORY_POOL             BIT(2)
 -#define HV_X64_ADJUST_MESSAGE_BUFFERS         BIT(3)
 -#define HV_X64_POST_MESSAGES                  BIT(4)
 -#define HV_X64_SIGNAL_EVENTS                  BIT(5)
 -#define HV_X64_CREATE_PORT                    BIT(6)
 -#define HV_X64_CONNECT_PORT                   BIT(7)
 -#define HV_X64_ACCESS_STATS                   BIT(8)
 -#define HV_X64_DEBUGGING                      BIT(11)
 -#define HV_X64_CPU_POWER_MANAGEMENT           BIT(12)
 +#define HV_X64_POST_MESSAGES          HV_POST_MESSAGES
 +#define HV_X64_SIGNAL_EVENTS          HV_SIGNAL_EVENTS
  
  /*
 - * Feature identification. EDX indicates which miscellaneous features
 - * are available to the partition.
 - * These are HYPERV_CPUID_FEATURES.EDX bits.
 + * Group D Features.  The bit assignments are custom to each architecture.
 + * On x86/x64 these are HYPERV_CPUID_FEATURES.EDX bits.
   */
  /* The MWAIT instruction is available (per section MONITOR / MWAIT) */
  #define HV_X64_MWAIT_AVAILABLE                                BIT(0)
  #define HV_FEATURE_FREQUENCY_MSRS_AVAILABLE           BIT(8)
  /* Crash MSR available */
  #define HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE          BIT(10)
+ /* Support for debug MSRs available */
+ #define HV_FEATURE_DEBUG_MSRS_AVAILABLE                       BIT(11)
  /* stimer Direct Mode is available */
  #define HV_STIMER_DIRECT_MODE_AVAILABLE                       BIT(19)
  
   * processor, except for virtual processors that are reported as sibling SMT
   * threads.
   */
 -#define HV_X64_NO_NONARCH_CORESHARING                  BIT(18)
 +#define HV_X64_NO_NONARCH_CORESHARING                 BIT(18)
  
  /* Nested features. These are HYPERV_CPUID_NESTED_FEATURES.EAX bits. */
  #define HV_X64_NESTED_DIRECT_FLUSH                    BIT(17)
@@@ -247,6 -297,43 +249,6 @@@ union hv_x64_msr_hypercall_contents 
        } __packed;
  };
  
 -/*
 - * TSC page layout.
 - */
 -struct ms_hyperv_tsc_page {
 -      volatile u32 tsc_sequence;
 -      u32 reserved1;
 -      volatile u64 tsc_scale;
 -      volatile s64 tsc_offset;
 -      u64 reserved2[509];
 -}  __packed;
 -
 -/*
 - * The guest OS needs to register the guest ID with the hypervisor.
 - * The guest ID is a 64 bit entity and the structure of this ID is
 - * specified in the Hyper-V specification:
 - *
 - * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
 - *
 - * While the current guideline does not specify how Linux guest ID(s)
 - * need to be generated, our plan is to publish the guidelines for
 - * Linux and other guest operating systems that currently are hosted
 - * on Hyper-V. The implementation here conforms to this yet
 - * unpublished guidelines.
 - *
 - *
 - * Bit(s)
 - * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
 - * 62:56 - Os Type; Linux is 0x100
 - * 55:48 - Distro specific identification
 - * 47:16 - Linux kernel version number
 - * 15:0  - Distro specific identification
 - *
 - *
 - */
 -
 -#define HV_LINUX_VENDOR_ID              0x8100
 -
  struct hv_reenlightenment_control {
        __u64 vector:8;
        __u64 reserved1:8;
@@@ -270,12 -357,34 +272,12 @@@ struct hv_tsc_emulation_status 
  #define HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK        \
                (~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
  
 -/*
 - * Crash notification (HV_X64_MSR_CRASH_CTL) flags.
 - */
 -#define HV_CRASH_CTL_CRASH_NOTIFY_MSG         BIT_ULL(62)
 -#define HV_CRASH_CTL_CRASH_NOTIFY             BIT_ULL(63)
  #define HV_X64_MSR_CRASH_PARAMS               \
                (1 + (HV_X64_MSR_CRASH_P4 - HV_X64_MSR_CRASH_P0))
  
  #define HV_IPI_LOW_VECTOR     0x10
  #define HV_IPI_HIGH_VECTOR    0xff
  
 -/* Declare the various hypercall operations. */
 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE    0x0002
 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST     0x0003
 -#define HVCALL_NOTIFY_LONG_SPIN_WAIT          0x0008
 -#define HVCALL_SEND_IPI                               0x000b
 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX  0x0013
 -#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX   0x0014
 -#define HVCALL_SEND_IPI_EX                    0x0015
 -#define HVCALL_POST_MESSAGE                   0x005c
 -#define HVCALL_SIGNAL_EVENT                   0x005d
 -#define HVCALL_POST_DEBUG_DATA                        0x0069
 -#define HVCALL_RETRIEVE_DEBUG_DATA            0x006a
 -#define HVCALL_RESET_DEBUG_SESSION            0x006b
 -#define HVCALL_RETARGET_INTERRUPT             0x007e
 -#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
 -#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
 -
  #define HV_X64_MSR_VP_ASSIST_PAGE_ENABLE      0x00000001
  #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT       12
  #define HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK        \
  #define HV_X64_MSR_TSC_REFERENCE_ENABLE               0x00000001
  #define HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT        12
  
 -#define HV_PROCESSOR_POWER_STATE_C0           0
 -#define HV_PROCESSOR_POWER_STATE_C1           1
 -#define HV_PROCESSOR_POWER_STATE_C2           2
 -#define HV_PROCESSOR_POWER_STATE_C3           3
 -
 -#define HV_FLUSH_ALL_PROCESSORS                       BIT(0)
 -#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES   BIT(1)
 -#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY     BIT(2)
 -#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT    BIT(3)
 -
 -enum HV_GENERIC_SET_FORMAT {
 -      HV_GENERIC_SET_SPARSE_4K,
 -      HV_GENERIC_SET_ALL,
 -};
 -
 -#define HV_PARTITION_ID_SELF                    ((u64)-1)
 -
 -#define HV_HYPERCALL_RESULT_MASK      GENMASK_ULL(15, 0)
 -#define HV_HYPERCALL_FAST_BIT         BIT(16)
 -#define HV_HYPERCALL_VARHEAD_OFFSET   17
 -#define HV_HYPERCALL_REP_COMP_OFFSET  32
 -#define HV_HYPERCALL_REP_COMP_MASK    GENMASK_ULL(43, 32)
 -#define HV_HYPERCALL_REP_START_OFFSET 48
 -#define HV_HYPERCALL_REP_START_MASK   GENMASK_ULL(59, 48)
 -
 -/* hypercall status code */
 -#define HV_STATUS_SUCCESS                     0
 -#define HV_STATUS_INVALID_HYPERCALL_CODE      2
 -#define HV_STATUS_INVALID_HYPERCALL_INPUT     3
 -#define HV_STATUS_INVALID_ALIGNMENT           4
 -#define HV_STATUS_INVALID_PARAMETER           5
 -#define HV_STATUS_OPERATION_DENIED            8
 -#define HV_STATUS_INSUFFICIENT_MEMORY         11
 -#define HV_STATUS_INVALID_PORT_ID             17
 -#define HV_STATUS_INVALID_CONNECTION_ID               18
 -#define HV_STATUS_INSUFFICIENT_BUFFERS                19
 -
 -/*
 - * The Hyper-V TimeRefCount register and the TSC
 - * page provide a guest VM clock with 100ns tick rate
 - */
 -#define HV_CLOCK_HZ (NSEC_PER_SEC/100)
 -
 -typedef struct _HV_REFERENCE_TSC_PAGE {
 -      __u32 tsc_sequence;
 -      __u32 res1;
 -      __u64 tsc_scale;
 -      __s64 tsc_offset;
 -}  __packed HV_REFERENCE_TSC_PAGE, *PHV_REFERENCE_TSC_PAGE;
 -
 -/* Define the number of synthetic interrupt sources. */
 -#define HV_SYNIC_SINT_COUNT           (16)
 -/* Define the expected SynIC version. */
 -#define HV_SYNIC_VERSION_1            (0x1)
 -/* Valid SynIC vectors are 16-255. */
 -#define HV_SYNIC_FIRST_VALID_VECTOR   (16)
 -
 -#define HV_SYNIC_CONTROL_ENABLE               (1ULL << 0)
 -#define HV_SYNIC_SIMP_ENABLE          (1ULL << 0)
 -#define HV_SYNIC_SIEFP_ENABLE         (1ULL << 0)
 -#define HV_SYNIC_SINT_MASKED          (1ULL << 16)
 -#define HV_SYNIC_SINT_AUTO_EOI                (1ULL << 17)
 -#define HV_SYNIC_SINT_VECTOR_MASK     (0xFF)
 -
 -#define HV_SYNIC_STIMER_COUNT         (4)
 -
 -/* Define synthetic interrupt controller message constants. */
 -#define HV_MESSAGE_SIZE                       (256)
 -#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
 -#define HV_MESSAGE_PAYLOAD_QWORD_COUNT        (30)
  
  /* Define hypervisor message types. */
  enum hv_message_type {
        HVMSG_GPA_INTERCEPT             = 0x80000001,
  
        /* Timer notification messages. */
 -      HVMSG_TIMER_EXPIRED                     = 0x80000010,
 +      HVMSG_TIMER_EXPIRED             = 0x80000010,
  
        /* Error messages. */
        HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020,
        HVMSG_UNRECOVERABLE_EXCEPTION   = 0x80000021,
 -      HVMSG_UNSUPPORTED_FEATURE               = 0x80000022,
 +      HVMSG_UNSUPPORTED_FEATURE       = 0x80000022,
  
        /* Trace buffer complete messages. */
        HVMSG_EVENTLOG_BUFFERCOMPLETE   = 0x80000040,
  
        /* Platform-specific processor intercept messages. */
 -      HVMSG_X64_IOPORT_INTERCEPT              = 0x80010000,
 +      HVMSG_X64_IOPORT_INTERCEPT      = 0x80010000,
        HVMSG_X64_MSR_INTERCEPT         = 0x80010001,
 -      HVMSG_X64_CPUID_INTERCEPT               = 0x80010002,
 +      HVMSG_X64_CPUID_INTERCEPT       = 0x80010002,
        HVMSG_X64_EXCEPTION_INTERCEPT   = 0x80010003,
 -      HVMSG_X64_APIC_EOI                      = 0x80010004,
 -      HVMSG_X64_LEGACY_FP_ERROR               = 0x80010005
 -};
 -
 -/* Define synthetic interrupt controller message flags. */
 -union hv_message_flags {
 -      __u8 asu8;
 -      struct {
 -              __u8 msg_pending:1;
 -              __u8 reserved:7;
 -      } __packed;
 -};
 -
 -/* Define port identifier type. */
 -union hv_port_id {
 -      __u32 asu32;
 -      struct {
 -              __u32 id:24;
 -              __u32 reserved:8;
 -      } __packed u;
 +      HVMSG_X64_APIC_EOI              = 0x80010004,
 +      HVMSG_X64_LEGACY_FP_ERROR       = 0x80010005
  };
  
 -/* Define synthetic interrupt controller message header. */
 -struct hv_message_header {
 -      __u32 message_type;
 -      __u8 payload_size;
 -      union hv_message_flags message_flags;
 -      __u8 reserved[2];
 -      union {
 -              __u64 sender;
 -              union hv_port_id port;
 -      };
 -} __packed;
 -
 -/* Define synthetic interrupt controller message format. */
 -struct hv_message {
 -      struct hv_message_header header;
 -      union {
 -              __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
 -      } u;
 -} __packed;
 -
 -/* Define the synthetic interrupt message page layout. */
 -struct hv_message_page {
 -      struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
 -} __packed;
 -
 -/* Define timer message payload structure. */
 -struct hv_timer_message_payload {
 -      __u32 timer_index;
 -      __u32 reserved;
 -      __u64 expiration_time;  /* When the timer expired */
 -      __u64 delivery_time;    /* When the message was delivered */
 -} __packed;
 -
  struct hv_nested_enlightenments_control {
        struct {
                __u32 directhypercall:1;
@@@ -543,11 -773,187 +545,11 @@@ struct hv_enlightened_vmcs 
  
  #define HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL                    0xFFFF
  
 -/* Define synthetic interrupt controller flag constants. */
 -#define HV_EVENT_FLAGS_COUNT          (256 * 8)
 -#define HV_EVENT_FLAGS_LONG_COUNT     (256 / sizeof(unsigned long))
 -
 -/*
 - * Synthetic timer configuration.
 - */
 -union hv_stimer_config {
 -      u64 as_uint64;
 -      struct {
 -              u64 enable:1;
 -              u64 periodic:1;
 -              u64 lazy:1;
 -              u64 auto_enable:1;
 -              u64 apic_vector:8;
 -              u64 direct_mode:1;
 -              u64 reserved_z0:3;
 -              u64 sintx:4;
 -              u64 reserved_z1:44;
 -      } __packed;
 -};
 -
 -
 -/* Define the synthetic interrupt controller event flags format. */
 -union hv_synic_event_flags {
 -      unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
 -};
 -
 -/* Define SynIC control register. */
 -union hv_synic_scontrol {
 -      u64 as_uint64;
 -      struct {
 -              u64 enable:1;
 -              u64 reserved:63;
 -      } __packed;
 -};
 -
 -/* Define synthetic interrupt source. */
 -union hv_synic_sint {
 -      u64 as_uint64;
 -      struct {
 -              u64 vector:8;
 -              u64 reserved1:8;
 -              u64 masked:1;
 -              u64 auto_eoi:1;
 -              u64 polling:1;
 -              u64 reserved2:45;
 -      } __packed;
 -};
 -
 -/* Define the format of the SIMP register */
 -union hv_synic_simp {
 -      u64 as_uint64;
 -      struct {
 -              u64 simp_enabled:1;
 -              u64 preserved:11;
 -              u64 base_simp_gpa:52;
 -      } __packed;
 -};
 -
 -/* Define the format of the SIEFP register */
 -union hv_synic_siefp {
 -      u64 as_uint64;
 -      struct {
 -              u64 siefp_enabled:1;
 -              u64 preserved:11;
 -              u64 base_siefp_gpa:52;
 -      } __packed;
 -};
 -
 -struct hv_vpset {
 -      u64 format;
 -      u64 valid_bank_mask;
 -      u64 bank_contents[];
 -} __packed;
 -
 -/* HvCallSendSyntheticClusterIpi hypercall */
 -struct hv_send_ipi {
 -      u32 vector;
 -      u32 reserved;
 -      u64 cpu_mask;
 -} __packed;
 -
 -/* HvCallSendSyntheticClusterIpiEx hypercall */
 -struct hv_send_ipi_ex {
 -      u32 vector;
 -      u32 reserved;
 -      struct hv_vpset vp_set;
 -} __packed;
 -
 -/* HvFlushGuestPhysicalAddressSpace hypercalls */
 -struct hv_guest_mapping_flush {
 -      u64 address_space;
 -      u64 flags;
 -} __packed;
 -
 -/*
 - *  HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
 - *  by the bitwidth of "additional_pages" in union hv_gpa_page_range.
 - */
 -#define HV_MAX_FLUSH_PAGES (2048)
 -
 -/* HvFlushGuestPhysicalAddressList hypercall */
 -union hv_gpa_page_range {
 -      u64 address_space;
 -      struct {
 -              u64 additional_pages:11;
 -              u64 largepage:1;
 -              u64 basepfn:52;
 -      } page;
 -};
 -
 -/*
 - * All input flush parameters should be in single page. The max flush
 - * count is equal with how many entries of union hv_gpa_page_range can
 - * be populated into the input parameter page.
 - */
 -#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) /        \
 -                              sizeof(union hv_gpa_page_range))
 -
 -struct hv_guest_mapping_flush_list {
 -      u64 address_space;
 -      u64 flags;
 -      union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
 -};
 -
 -/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
 -struct hv_tlb_flush {
 -      u64 address_space;
 -      u64 flags;
 -      u64 processor_mask;
 -      u64 gva_list[];
 -} __packed;
 -
 -/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
 -struct hv_tlb_flush_ex {
 -      u64 address_space;
 -      u64 flags;
 -      struct hv_vpset hv_vp_set;
 -      u64 gva_list[];
 -} __packed;
 -
  struct hv_partition_assist_pg {
        u32 tlb_lock_count;
  };
  
 -union hv_msi_entry {
 -      u64 as_uint64;
 -      struct {
 -              u32 address;
 -              u32 data;
 -      } __packed;
 -};
 -
 -struct hv_interrupt_entry {
 -      u32 source;                     /* 1 for MSI(-X) */
 -      u32 reserved1;
 -      union hv_msi_entry msi_entry;
 -} __packed;
  
 -/*
 - * flags for hv_device_interrupt_target.flags
 - */
 -#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST          1
 -#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET      2
 -
 -struct hv_device_interrupt_target {
 -      u32 vector;
 -      u32 flags;
 -      union {
 -              u64 vp_mask;
 -              struct hv_vpset vp_set;
 -      };
 -} __packed;
 +#include <asm-generic/hyperv-tlfs.h>
  
 -/* HvRetargetDeviceInterrupt hypercall */
 -struct hv_retarget_device_interrupt {
 -      u64 partition_id;               /* use "self" */
 -      u64 device_id;
 -      struct hv_interrupt_entry int_entry;
 -      u64 reserved2;
 -      struct hv_device_interrupt_target int_target;
 -} __packed __aligned(8);
  #endif
index 1c0b62d269629b021fba0244ab6bd36687e3e77a,58337a25396a4f540f56d58b9aa896d356e75d26..1da5858501ca314d3607f37d74295c4ca488ec76
  #define KVM_REQ_GET_VMCS12_PAGES      KVM_ARCH_REQ(24)
  #define KVM_REQ_APICV_UPDATE \
        KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+ #define KVM_REQ_TLB_FLUSH_CURRENT     KVM_ARCH_REQ(26)
+ #define KVM_REQ_HV_TLB_FLUSH \
+       KVM_ARCH_REQ_FLAGS(27, KVM_REQUEST_NO_WAKEUP)
+ #define KVM_REQ_APF_READY             KVM_ARCH_REQ(28)
  
  #define CR0_RESERVED_BITS                                               \
        (~(unsigned long)(X86_CR0_PE | X86_CR0_MP | X86_CR0_EM | X86_CR0_TS \
  #define UNMAPPED_GVA (~(gpa_t)0)
  
  /* KVM Hugepage definitions for x86 */
- enum {
-       PT_PAGE_TABLE_LEVEL   = 1,
-       PT_DIRECTORY_LEVEL    = 2,
-       PT_PDPE_LEVEL         = 3,
-       /* set max level to the biggest one */
-       PT_MAX_HUGEPAGE_LEVEL = PT_PDPE_LEVEL,
- };
- #define KVM_NR_PAGE_SIZES     (PT_MAX_HUGEPAGE_LEVEL - \
-                                PT_PAGE_TABLE_LEVEL + 1)
+ #define KVM_MAX_HUGEPAGE_LEVEL        PG_LEVEL_1G
+ #define KVM_NR_PAGE_SIZES     (KVM_MAX_HUGEPAGE_LEVEL - PG_LEVEL_4K + 1)
  #define KVM_HPAGE_GFN_SHIFT(x)        (((x) - 1) * 9)
  #define KVM_HPAGE_SHIFT(x)    (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
  #define KVM_HPAGE_SIZE(x)     (1UL << KVM_HPAGE_SHIFT(x))
  
  static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
  {
-       /* KVM_HPAGE_GFN_SHIFT(PT_PAGE_TABLE_LEVEL) must be 0. */
+       /* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
        return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
                (base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
  }
@@@ -164,9 -161,13 +161,13 @@@ enum kvm_reg 
        NR_VCPU_REGS,
  
        VCPU_EXREG_PDPTR = NR_VCPU_REGS,
+       VCPU_EXREG_CR0,
        VCPU_EXREG_CR3,
+       VCPU_EXREG_CR4,
        VCPU_EXREG_RFLAGS,
        VCPU_EXREG_SEGMENTS,
+       VCPU_EXREG_EXIT_INFO_1,
+       VCPU_EXREG_EXIT_INFO_2,
  };
  
  enum {
  
  enum exit_fastpath_completion {
        EXIT_FASTPATH_NONE,
-       EXIT_FASTPATH_SKIP_EMUL_INS,
+       EXIT_FASTPATH_REENTER_GUEST,
+       EXIT_FASTPATH_EXIT_HANDLED,
  };
+ typedef enum exit_fastpath_completion fastpath_t;
  
  struct x86_emulate_ctxt;
  struct x86_exception;
@@@ -372,12 -375,12 +375,12 @@@ struct rsvd_bits_validate 
  };
  
  struct kvm_mmu_root_info {
-       gpa_t cr3;
+       gpa_t pgd;
        hpa_t hpa;
  };
  
  #define KVM_MMU_ROOT_INFO_INVALID \
-       ((struct kvm_mmu_root_info) { .cr3 = INVALID_PAGE, .hpa = INVALID_PAGE })
+       ((struct kvm_mmu_root_info) { .pgd = INVALID_PAGE, .hpa = INVALID_PAGE })
  
  #define KVM_MMU_NUM_PREV_ROOTS 3
  
@@@ -403,7 -406,7 +406,7 @@@ struct kvm_mmu 
        void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
                           u64 *spte, const void *pte);
        hpa_t root_hpa;
-       gpa_t root_cr3;
+       gpa_t root_pgd;
        union kvm_mmu_role mmu_role;
        u8 root_level;
        u8 shadow_root_level;
@@@ -598,6 -601,7 +601,7 @@@ struct kvm_vcpu_arch 
        u64 ia32_xss;
        u64 microcode_version;
        u64 arch_capabilities;
+       u64 perf_capabilities;
  
        /*
         * Paging state of the vcpu
  
        u64 xcr0;
        u64 guest_supported_xcr0;
-       u32 guest_xstate_size;
  
        struct kvm_pio_request pio;
        void *pio_data;
        struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
  
        int maxphyaddr;
+       int tdp_level;
  
        /* emulate context */
  
                struct gfn_to_pfn_cache cache;
        } st;
  
+       u64 l1_tsc_offset;
        u64 tsc_offset;
        u64 last_guest_tsc;
        u64 last_host_tsc;
  
        struct {
                bool halted;
-               gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
+               gfn_t gfns[ASYNC_PF_PER_VCPU];
                struct gfn_to_hva_cache data;
-               u64 msr_val;
+               u64 msr_en_val; /* MSR_KVM_ASYNC_PF_EN */
+               u64 msr_int_val; /* MSR_KVM_ASYNC_PF_INT */
+               u16 vec;
                u32 id;
                bool send_user_only;
-               u32 host_apf_reason;
+               u32 host_apf_flags;
                unsigned long nested_apf_token;
                bool delivery_as_pf_vmexit;
+               bool pageready_pending;
        } apf;
  
        /* OSVW MSRs (AMD only) */
@@@ -855,6 -863,18 +863,18 @@@ struct kvm_apic_map 
        struct kvm_lapic *phys_map[];
  };
  
+ /* Hyper-V synthetic debugger (SynDbg)*/
+ struct kvm_hv_syndbg {
+       struct {
+               u64 control;
+               u64 status;
+               u64 send_page;
+               u64 recv_page;
+               u64 pending_page;
+       } control;
+       u64 options;
+ };
  /* Hyper-V emulation context */
  struct kvm_hv {
        struct mutex hv_lock;
        u64 hv_crash_param[HV_X64_MSR_CRASH_PARAMS];
        u64 hv_crash_ctl;
  
 -      HV_REFERENCE_TSC_PAGE tsc_ref;
 +      struct ms_hyperv_tsc_page tsc_ref;
  
        struct idr conn_to_evt;
  
        atomic_t num_mismatched_vp_indexes;
  
        struct hv_partition_assist_pg *hv_pa_pg;
+       struct kvm_hv_syndbg hv_syndbg;
  };
  
  enum kvm_irqchip_mode {
@@@ -1028,6 -1049,8 +1049,8 @@@ struct kvm_vcpu_stat 
        u64 irq_injections;
        u64 nmi_injections;
        u64 req_event;
+       u64 halt_poll_success_ns;
+       u64 halt_poll_fail_ns;
  };
  
  struct x86_instruction_info;
@@@ -1059,7 -1082,7 +1082,7 @@@ struct kvm_x86_ops 
        void (*hardware_disable)(void);
        void (*hardware_unsetup)(void);
        bool (*cpu_has_accelerated_tpr)(void);
-       bool (*has_emulated_msr)(int index);
+       bool (*has_emulated_msr)(u32 index);
        void (*cpuid_update)(struct kvm_vcpu *vcpu);
  
        unsigned int vm_size;
        void (*set_segment)(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
        void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
-       void (*decache_cr0_guest_bits)(struct kvm_vcpu *vcpu);
-       void (*decache_cr4_guest_bits)(struct kvm_vcpu *vcpu);
        void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0);
        int (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4);
        void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer);
        unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
  
-       void (*tlb_flush)(struct kvm_vcpu *vcpu, bool invalidate_gpa);
+       void (*tlb_flush_all)(struct kvm_vcpu *vcpu);
+       void (*tlb_flush_current)(struct kvm_vcpu *vcpu);
        int  (*tlb_remote_flush)(struct kvm *kvm);
        int  (*tlb_remote_flush_with_range)(struct kvm *kvm,
                        struct kvm_tlb_range *range);
         */
        void (*tlb_flush_gva)(struct kvm_vcpu *vcpu, gva_t addr);
  
-       void (*run)(struct kvm_vcpu *vcpu);
+       /*
+        * Flush any TLB entries created by the guest.  Like tlb_flush_gva(),
+        * does not need to flush GPA->HPA mappings.
+        */
+       void (*tlb_flush_guest)(struct kvm_vcpu *vcpu);
+       enum exit_fastpath_completion (*run)(struct kvm_vcpu *vcpu);
        int (*handle_exit)(struct kvm_vcpu *vcpu,
                enum exit_fastpath_completion exit_fastpath);
        int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
        void (*set_nmi)(struct kvm_vcpu *vcpu);
        void (*queue_exception)(struct kvm_vcpu *vcpu);
        void (*cancel_injection)(struct kvm_vcpu *vcpu);
-       int (*interrupt_allowed)(struct kvm_vcpu *vcpu);
-       int (*nmi_allowed)(struct kvm_vcpu *vcpu);
+       int (*interrupt_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
+       int (*nmi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
        bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
        void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
        void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
        bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu);
        void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
        void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
-       void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
+       void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu);
        int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
        int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
        int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
  
        bool (*has_wbinvd_exit)(void);
  
-       u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
        /* Returns actual tsc_offset set in active VMCS */
        u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
  
                               struct x86_instruction_info *info,
                               enum x86_intercept_stage stage,
                               struct x86_exception *exception);
-       void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu,
-               enum exit_fastpath_completion *exit_fastpath);
+       void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
  
-       int (*check_nested_events)(struct kvm_vcpu *vcpu);
        void (*request_immediate_exit)(struct kvm_vcpu *vcpu);
  
        void (*sched_in)(struct kvm_vcpu *kvm, int cpu);
  
        /* pmu operations of sub-arch */
        const struct kvm_pmu_ops *pmu_ops;
+       const struct kvm_x86_nested_ops *nested_ops;
  
        /*
         * Architecture specific hooks for vCPU blocking due to
  
        void (*setup_mce)(struct kvm_vcpu *vcpu);
  
-       int (*get_nested_state)(struct kvm_vcpu *vcpu,
-                               struct kvm_nested_state __user *user_kvm_nested_state,
-                               unsigned user_data_size);
-       int (*set_nested_state)(struct kvm_vcpu *vcpu,
-                               struct kvm_nested_state __user *user_kvm_nested_state,
-                               struct kvm_nested_state *kvm_state);
-       bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
-       int (*smi_allowed)(struct kvm_vcpu *vcpu);
+       int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
        int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
        int (*pre_leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
-       int (*enable_smi_window)(struct kvm_vcpu *vcpu);
+       void (*enable_smi_window)(struct kvm_vcpu *vcpu);
  
        int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
        int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp);
  
        int (*get_msr_feature)(struct kvm_msr_entry *entry);
  
-       int (*nested_enable_evmcs)(struct kvm_vcpu *vcpu,
-                                  uint16_t *vmcs_version);
-       uint16_t (*nested_get_evmcs_version)(struct kvm_vcpu *vcpu);
        bool (*need_emulation_on_page_fault)(struct kvm_vcpu *vcpu);
  
        bool (*apic_init_signal_blocked)(struct kvm_vcpu *vcpu);
        int (*enable_direct_tlbflush)(struct kvm_vcpu *vcpu);
+       void (*migrate_timers)(struct kvm_vcpu *vcpu);
+ };
+ struct kvm_x86_nested_ops {
+       int (*check_events)(struct kvm_vcpu *vcpu);
+       bool (*hv_timer_pending)(struct kvm_vcpu *vcpu);
+       int (*get_state)(struct kvm_vcpu *vcpu,
+                        struct kvm_nested_state __user *user_kvm_nested_state,
+                        unsigned user_data_size);
+       int (*set_state)(struct kvm_vcpu *vcpu,
+                        struct kvm_nested_state __user *user_kvm_nested_state,
+                        struct kvm_nested_state *kvm_state);
+       bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+       int (*enable_evmcs)(struct kvm_vcpu *vcpu,
+                           uint16_t *vmcs_version);
+       uint16_t (*get_evmcs_version)(struct kvm_vcpu *vcpu);
  };
  
  struct kvm_x86_init_ops {
@@@ -1279,7 -1311,8 +1311,7 @@@ extern struct kmem_cache *x86_fpu_cache
  #define __KVM_HAVE_ARCH_VM_ALLOC
  static inline struct kvm *kvm_arch_alloc_vm(void)
  {
 -      return __vmalloc(kvm_x86_ops.vm_size,
 -                       GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
 +      return __vmalloc(kvm_x86_ops.vm_size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
  }
  void kvm_arch_free_vm(struct kvm *kvm);
  
@@@ -1451,6 -1484,8 +1483,8 @@@ void kvm_queue_exception_p(struct kvm_v
  void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr);
  void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
  void kvm_inject_page_fault(struct kvm_vcpu *vcpu, struct x86_exception *fault);
+ bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
+                                   struct x86_exception *fault);
  int kvm_read_guest_page_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
                            gfn_t gfn, void *data, int offset, int len,
                            u32 access);
@@@ -1478,6 -1513,8 +1512,8 @@@ void kvm_pic_clear_all(struct kvm_pic *
  
  void kvm_inject_nmi(struct kvm_vcpu *vcpu);
  
+ void kvm_update_dr7(struct kvm_vcpu *vcpu);
  int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
  int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
  void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
@@@ -1508,8 -1545,11 +1544,11 @@@ int kvm_emulate_hypercall(struct kvm_vc
  int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
                       void *insn, int insn_len);
  void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
+ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                           gva_t gva, hpa_t root_hpa);
  void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
- void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
+ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
+                    bool skip_mmu_sync);
  
  void kvm_configure_mmu(bool enable_tdp, int tdp_page_level);
  
@@@ -1573,8 -1613,6 +1612,6 @@@ enum 
  };
  
  #define HF_GIF_MASK           (1 << 0)
- #define HF_HIF_MASK           (1 << 1)
- #define HF_VINTR_MASK         (1 << 2)
  #define HF_NMI_MASK           (1 << 3)
  #define HF_IRET_MASK          (1 << 4)
  #define HF_GUEST_MASK         (1 << 5) /* VCPU is in guest-mode */
@@@ -1640,7 -1678,8 +1677,8 @@@ void kvm_arch_async_page_present(struc
                                 struct kvm_async_pf *work);
  void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
                               struct kvm_async_pf *work);
- bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu);
+ void kvm_arch_async_page_present_queued(struct kvm_vcpu *vcpu);
+ bool kvm_arch_can_dequeue_async_page_present(struct kvm_vcpu *vcpu);
  extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
  
  int kvm_skip_emulated_instruction(struct kvm_vcpu *vcpu);
diff --combined arch/x86/kernel/traps.c
index 428186d9de46c626c7034bf6344152c25e4f81b7,821fac47eef6fc983871bb7bb21c2e575b3f4ee5..4cc541051994d53b63de16dbfcf4bee22bd47ebc
  #include <linux/mm.h>
  #include <linux/smp.h>
  #include <linux/io.h>
 +#include <linux/hardirq.h>
 +#include <linux/atomic.h>
 +
  #include <asm/stacktrace.h>
  #include <asm/processor.h>
  #include <asm/debugreg.h>
 -#include <linux/atomic.h>
  #include <asm/text-patching.h>
  #include <asm/ftrace.h>
  #include <asm/traps.h>
@@@ -84,6 -82,78 +84,6 @@@ static inline void cond_local_irq_disab
                local_irq_disable();
  }
  
 -/*
 - * In IST context, we explicitly disable preemption.  This serves two
 - * purposes: it makes it much less likely that we would accidentally
 - * schedule in IST context and it will force a warning if we somehow
 - * manage to schedule by accident.
 - */
 -void ist_enter(struct pt_regs *regs)
 -{
 -      if (user_mode(regs)) {
 -              RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 -      } else {
 -              /*
 -               * We might have interrupted pretty much anything.  In
 -               * fact, if we're a machine check, we can even interrupt
 -               * NMI processing.  We don't want in_nmi() to return true,
 -               * but we need to notify RCU.
 -               */
 -              rcu_nmi_enter();
 -      }
 -
 -      preempt_disable();
 -
 -      /* This code is a bit fragile.  Test it. */
 -      RCU_LOCKDEP_WARN(!rcu_is_watching(), "ist_enter didn't work");
 -}
 -NOKPROBE_SYMBOL(ist_enter);
 -
 -void ist_exit(struct pt_regs *regs)
 -{
 -      preempt_enable_no_resched();
 -
 -      if (!user_mode(regs))
 -              rcu_nmi_exit();
 -}
 -
 -/**
 - * ist_begin_non_atomic() - begin a non-atomic section in an IST exception
 - * @regs:     regs passed to the IST exception handler
 - *
 - * IST exception handlers normally cannot schedule.  As a special
 - * exception, if the exception interrupted userspace code (i.e.
 - * user_mode(regs) would return true) and the exception was not
 - * a double fault, it can be safe to schedule.  ist_begin_non_atomic()
 - * begins a non-atomic section within an ist_enter()/ist_exit() region.
 - * Callers are responsible for enabling interrupts themselves inside
 - * the non-atomic section, and callers must call ist_end_non_atomic()
 - * before ist_exit().
 - */
 -void ist_begin_non_atomic(struct pt_regs *regs)
 -{
 -      BUG_ON(!user_mode(regs));
 -
 -      /*
 -       * Sanity check: we need to be on the normal thread stack.  This
 -       * will catch asm bugs and any attempt to use ist_preempt_enable
 -       * from double_fault.
 -       */
 -      BUG_ON(!on_thread_stack());
 -
 -      preempt_enable_no_resched();
 -}
 -
 -/**
 - * ist_end_non_atomic() - begin a non-atomic section in an IST exception
 - *
 - * Ends a non-atomic section started with ist_begin_non_atomic().
 - */
 -void ist_end_non_atomic(void)
 -{
 -      preempt_disable();
 -}
 -
  int is_valid_bugaddr(unsigned long addr)
  {
        unsigned short ud;
@@@ -256,6 -326,7 +256,6 @@@ __visible void __noreturn handle_stack_
  }
  #endif
  
 -#if defined(CONFIG_X86_64) || defined(CONFIG_DOUBLEFAULT)
  /*
   * Runs on an IST stack for x86_64 and on a special task stack for x86_32.
   *
@@@ -292,7 -363,7 +292,7 @@@ dotraplinkage void do_double_fault(stru
         * The net result is that our #GP handler will think that we
         * entered from usermode with the bad user context.
         *
 -       * No need for ist_enter here because we don't use RCU.
 +       * No need for nmi_enter() here because we don't use RCU.
         */
        if (((long)regs->sp >> P4D_SHIFT) == ESPFIX_PGD_ENTRY &&
                regs->cs == __KERNEL_CS &&
        }
  #endif
  
 -      ist_enter(regs);
 +      nmi_enter();
        notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
  
        tsk->thread.error_code = error_code;
        die("double fault", regs, error_code);
        panic("Machine halted.");
  }
 -#endif
  
  dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
  {
@@@ -520,13 -592,19 +520,13 @@@ dotraplinkage void notrace do_int3(stru
                return;
  
        /*
 -       * Unlike any other non-IST entry, we can be called from a kprobe in
 -       * non-CONTEXT_KERNEL kernel mode or even during context tracking
 -       * state changes.  Make sure that we wake up RCU even if we're coming
 -       * from kernel code.
 -       *
 -       * This means that we can't schedule even if we came from a
 -       * preemptible kernel context.  That's okay.
 +       * Unlike any other non-IST entry, we can be called from pretty much
 +       * any location in the kernel through kprobes -- text_poke() will most
 +       * likely be handled by poke_int3_handler() above. This means this
 +       * handler is effectively NMI-like.
         */
 -      if (!user_mode(regs)) {
 -              rcu_nmi_enter();
 -              preempt_disable();
 -      }
 -      RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 +      if (!user_mode(regs))
 +              nmi_enter();
  
  #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
        if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
        cond_local_irq_disable(regs);
  
  exit:
 -      if (!user_mode(regs)) {
 -              preempt_enable_no_resched();
 -              rcu_nmi_exit();
 -      }
 +      if (!user_mode(regs))
 +              nmi_exit();
  }
  NOKPROBE_SYMBOL(do_int3);
  
@@@ -653,7 -733,7 +653,7 @@@ dotraplinkage void do_debug(struct pt_r
        unsigned long dr6;
        int si_code;
  
 -      ist_enter(regs);
 +      nmi_enter();
  
        get_debugreg(dr6, 6);
        /*
        debug_stack_usage_dec();
  
  exit:
 -      ist_exit(regs);
 +      nmi_exit();
  }
  NOKPROBE_SYMBOL(do_debug);
  
@@@ -903,7 -983,5 +903,5 @@@ void __init trap_init(void
  
        idt_setup_ist_traps();
  
-       x86_init.irqs.trap_init();
        idt_setup_debugidt_traps();
  }
diff --combined arch/x86/kvm/hyperv.c
index 6bc6d7613f76d4b5042febd5d2da4ca3ec54a3ee,4e1695db788a7a3ac650368990b3e3bec9ac74ab..238b78e069fe24365339537f94067eed000f8f1e
@@@ -21,6 -21,7 +21,7 @@@
  #include "x86.h"
  #include "lapic.h"
  #include "ioapic.h"
+ #include "cpuid.h"
  #include "hyperv.h"
  
  #include <linux/cpu.h>
@@@ -266,6 -267,123 +267,123 @@@ static int synic_set_msr(struct kvm_vcp
        return ret;
  }
  
+ static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
+ {
+       struct kvm_cpuid_entry2 *entry;
+       entry = kvm_find_cpuid_entry(vcpu,
+                                    HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES,
+                                    0);
+       if (!entry)
+               return false;
+       return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+ }
+ static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
+ {
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_hv *hv = &kvm->arch.hyperv;
+       if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
+               hv->hv_syndbg.control.status =
+                       vcpu->run->hyperv.u.syndbg.status;
+       return 1;
+ }
+ static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
+ {
+       struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
+       struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv;
+       hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
+       hv_vcpu->exit.u.syndbg.msr = msr;
+       hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
+       hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
+       hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
+       hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
+       vcpu->arch.complete_userspace_io =
+                       kvm_hv_syndbg_complete_userspace;
+       kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
+ }
+ static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
+ {
+       struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
+       if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
+               return 1;
+       trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
+                                   vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data);
+       switch (msr) {
+       case HV_X64_MSR_SYNDBG_CONTROL:
+               syndbg->control.control = data;
+               if (!host)
+                       syndbg_exit(vcpu, msr);
+               break;
+       case HV_X64_MSR_SYNDBG_STATUS:
+               syndbg->control.status = data;
+               break;
+       case HV_X64_MSR_SYNDBG_SEND_BUFFER:
+               syndbg->control.send_page = data;
+               break;
+       case HV_X64_MSR_SYNDBG_RECV_BUFFER:
+               syndbg->control.recv_page = data;
+               break;
+       case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
+               syndbg->control.pending_page = data;
+               if (!host)
+                       syndbg_exit(vcpu, msr);
+               break;
+       case HV_X64_MSR_SYNDBG_OPTIONS:
+               syndbg->options = data;
+               break;
+       default:
+               break;
+       }
+       return 0;
+ }
+ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
+ {
+       struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
+       if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
+               return 1;
+       switch (msr) {
+       case HV_X64_MSR_SYNDBG_CONTROL:
+               *pdata = syndbg->control.control;
+               break;
+       case HV_X64_MSR_SYNDBG_STATUS:
+               *pdata = syndbg->control.status;
+               break;
+       case HV_X64_MSR_SYNDBG_SEND_BUFFER:
+               *pdata = syndbg->control.send_page;
+               break;
+       case HV_X64_MSR_SYNDBG_RECV_BUFFER:
+               *pdata = syndbg->control.recv_page;
+               break;
+       case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
+               *pdata = syndbg->control.pending_page;
+               break;
+       case HV_X64_MSR_SYNDBG_OPTIONS:
+               *pdata = syndbg->options;
+               break;
+       default:
+               break;
+       }
+       trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id,
+                                   vcpu_to_hv_vcpu(vcpu)->vp_index, msr,
+                                   *pdata);
+       return 0;
+ }
  static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
                         bool host)
  {
@@@ -800,6 -918,8 +918,8 @@@ static bool kvm_hv_msr_partition_wide(u
        case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
        case HV_X64_MSR_TSC_EMULATION_CONTROL:
        case HV_X64_MSR_TSC_EMULATION_STATUS:
+       case HV_X64_MSR_SYNDBG_OPTIONS:
+       case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
                r = true;
                break;
        }
@@@ -900,7 -1020,7 +1020,7 @@@ static int kvm_hv_msr_set_crash_data(st
   * These two equivalencies are implemented in this function.
   */
  static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
 -                                      HV_REFERENCE_TSC_PAGE *tsc_ref)
 +                                      struct ms_hyperv_tsc_page *tsc_ref)
  {
        u64 max_mul;
  
@@@ -941,7 -1061,7 +1061,7 @@@ void kvm_hv_setup_tsc_page(struct kvm *
        u64 gfn;
  
        BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
 -      BUILD_BUG_ON(offsetof(HV_REFERENCE_TSC_PAGE, tsc_sequence) != 0);
 +      BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
  
        if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                return;
@@@ -1061,6 -1181,9 +1181,9 @@@ static int kvm_hv_set_msr_pw(struct kvm
                if (!host)
                        return 1;
                break;
+       case HV_X64_MSR_SYNDBG_OPTIONS:
+       case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
+               return syndbg_set_msr(vcpu, msr, data, host);
        default:
                vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n",
                            msr, data);
@@@ -1190,7 -1313,8 +1313,8 @@@ static int kvm_hv_set_msr(struct kvm_vc
        return 0;
  }
  
- static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+ static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
+                            bool host)
  {
        u64 data = 0;
        struct kvm *kvm = vcpu->kvm;
        case HV_X64_MSR_TSC_EMULATION_STATUS:
                data = hv->hv_tsc_emulation_status;
                break;
+       case HV_X64_MSR_SYNDBG_OPTIONS:
+       case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
+               return syndbg_get_msr(vcpu, msr, pdata, host);
        default:
                vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
                return 1;
@@@ -1316,7 -1443,7 +1443,7 @@@ int kvm_hv_get_msr_common(struct kvm_vc
                int r;
  
                mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock);
-               r = kvm_hv_get_msr_pw(vcpu, msr, pdata);
+               r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
                mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock);
                return r;
        } else
@@@ -1425,8 -1552,7 +1552,7 @@@ static u64 kvm_hv_flush_tlb(struct kvm_
         * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
         * analyze it here, flush TLB regardless of the specified address space.
         */
-       kvm_make_vcpus_request_mask(kvm,
-                                   KVM_REQ_TLB_FLUSH | KVM_REQUEST_NO_WAKEUP,
+       kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH,
                                    NULL, vcpu_mask, &hv_vcpu->tlb_flush);
  
  ret_success:
@@@ -1530,7 -1656,7 +1656,7 @@@ ret_success
  
  bool kvm_hv_hypercall_enabled(struct kvm *kvm)
  {
-       return READ_ONCE(kvm->arch.hyperv.hv_hypercall) & HV_X64_MSR_HYPERCALL_ENABLE;
+       return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0;
  }
  
  static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
@@@ -1709,6 -1835,34 +1835,34 @@@ int kvm_hv_hypercall(struct kvm_vcpu *v
                }
                ret = kvm_hv_send_ipi(vcpu, ingpa, outgpa, true, false);
                break;
+       case HVCALL_POST_DEBUG_DATA:
+       case HVCALL_RETRIEVE_DEBUG_DATA:
+               if (unlikely(fast)) {
+                       ret = HV_STATUS_INVALID_PARAMETER;
+                       break;
+               }
+               fallthrough;
+       case HVCALL_RESET_DEBUG_SESSION: {
+               struct kvm_hv_syndbg *syndbg = vcpu_to_hv_syndbg(vcpu);
+               if (!kvm_hv_is_syndbg_enabled(vcpu)) {
+                       ret = HV_STATUS_INVALID_HYPERCALL_CODE;
+                       break;
+               }
+               if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
+                       ret = HV_STATUS_OPERATION_DENIED;
+                       break;
+               }
+               vcpu->run->exit_reason = KVM_EXIT_HYPERV;
+               vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
+               vcpu->run->hyperv.u.hcall.input = param;
+               vcpu->run->hyperv.u.hcall.params[0] = ingpa;
+               vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+               vcpu->arch.complete_userspace_io =
+                               kvm_hv_hypercall_complete_userspace;
+               return 0;
+       }
        default:
                ret = HV_STATUS_INVALID_HYPERCALL_CODE;
                break;
@@@ -1796,12 -1950,15 +1950,15 @@@ int kvm_vcpu_ioctl_get_hv_cpuid(struct 
                { .function = HYPERV_CPUID_FEATURES },
                { .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
                { .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
+               { .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
+               { .function = HYPERV_CPUID_SYNDBG_INTERFACE },
+               { .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES },
                { .function = HYPERV_CPUID_NESTED_FEATURES },
        };
        int i, nent = ARRAY_SIZE(cpuid_entries);
  
-       if (kvm_x86_ops.nested_get_evmcs_version)
-               evmcs_ver = kvm_x86_ops.nested_get_evmcs_version(vcpu);
+       if (kvm_x86_ops.nested_ops->get_evmcs_version)
+               evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
  
        /* Skip NESTED_FEATURES if eVMCS is not supported */
        if (!evmcs_ver)
                case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
                        memcpy(signature, "Linux KVM Hv", 12);
  
-                       ent->eax = HYPERV_CPUID_NESTED_FEATURES;
+                       ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
                        ent->ebx = signature[0];
                        ent->ecx = signature[1];
                        ent->edx = signature[2];
                        ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
                        ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
  
 -                      ent->ebx |= HV_X64_DEBUGGING;
++                      ent->ebx |= HV_DEBUGGING;
+                       ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
+                       ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
                        /*
                         * Direct Synthetic timers only make sense with in-kernel
                         * LAPIC
  
                        break;
  
+               case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
+                       memcpy(signature, "Linux KVM Hv", 12);
+                       ent->eax = 0;
+                       ent->ebx = signature[0];
+                       ent->ecx = signature[1];
+                       ent->edx = signature[2];
+                       break;
+               case HYPERV_CPUID_SYNDBG_INTERFACE:
+                       memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
+                       ent->eax = signature[0];
+                       break;
+               case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
+                       ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
+                       break;
                default:
                        break;
                }
diff --combined arch/x86/kvm/mmu/mmu.c
index fd59fee846315d10f6208bcd46f14115cf0fcadd,5de1929cfc5531f65302ab45de129fe2833c7292..fdd05c233308a7236e7f1b19b1116ef6095fef84
@@@ -16,6 -16,7 +16,7 @@@
   */
  
  #include "irq.h"
+ #include "ioapic.h"
  #include "mmu.h"
  #include "x86.h"
  #include "kvm_cache_regs.h"
@@@ -78,6 -79,9 +79,9 @@@ module_param_cb(nx_huge_pages_recovery_
                &nx_huge_pages_recovery_ratio, 0644);
  __MODULE_PARM_TYPE(nx_huge_pages_recovery_ratio, "uint");
  
+ static bool __read_mostly force_flush_and_sync_on_reuse;
+ module_param_named(flush_on_reuse, force_flush_and_sync_on_reuse, bool, 0644);
  /*
   * When setting this variable to true it enables Two-Dimensional-Paging
   * where the hardware walks 2 page tables:
@@@ -244,7 -248,6 +248,6 @@@ static u64 __read_mostly shadow_x_mask
  static u64 __read_mostly shadow_user_mask;
  static u64 __read_mostly shadow_accessed_mask;
  static u64 __read_mostly shadow_dirty_mask;
- static u64 __read_mostly shadow_mmio_mask;
  static u64 __read_mostly shadow_mmio_value;
  static u64 __read_mostly shadow_mmio_access_mask;
  static u64 __read_mostly shadow_present_mask;
@@@ -331,19 -334,19 +334,19 @@@ static void kvm_flush_remote_tlbs_with_
        kvm_flush_remote_tlbs_with_range(kvm, &range);
  }
  
- void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask, u64 mmio_value, u64 access_mask)
+ void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask)
  {
        BUG_ON((u64)(unsigned)access_mask != access_mask);
-       BUG_ON((mmio_mask & mmio_value) != mmio_value);
+       WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << shadow_nonpresent_or_rsvd_mask_len));
+       WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
        shadow_mmio_value = mmio_value | SPTE_MMIO_MASK;
-       shadow_mmio_mask = mmio_mask | SPTE_SPECIAL_MASK;
        shadow_mmio_access_mask = access_mask;
  }
  EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
  
  static bool is_mmio_spte(u64 spte)
  {
-       return (spte & shadow_mmio_mask) == shadow_mmio_value;
+       return (spte & SPTE_SPECIAL_MASK) == SPTE_MMIO_MASK;
  }
  
  static inline bool sp_ad_disabled(struct kvm_mmu_page *sp)
@@@ -566,7 -569,6 +569,6 @@@ static void kvm_mmu_reset_all_pte_masks
        shadow_dirty_mask = 0;
        shadow_nx_mask = 0;
        shadow_x_mask = 0;
-       shadow_mmio_mask = 0;
        shadow_present_mask = 0;
        shadow_acc_track_mask = 0;
  
         * the most significant bits of legal physical address space.
         */
        shadow_nonpresent_or_rsvd_mask = 0;
-       low_phys_bits = boot_cpu_data.x86_cache_bits;
-       if (boot_cpu_data.x86_cache_bits <
-           52 - shadow_nonpresent_or_rsvd_mask_len) {
+       low_phys_bits = boot_cpu_data.x86_phys_bits;
+       if (boot_cpu_has_bug(X86_BUG_L1TF) &&
+           !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
+                         52 - shadow_nonpresent_or_rsvd_mask_len)) {
+               low_phys_bits = boot_cpu_data.x86_cache_bits
+                       - shadow_nonpresent_or_rsvd_mask_len;
                shadow_nonpresent_or_rsvd_mask =
-                       rsvd_bits(boot_cpu_data.x86_cache_bits -
-                                 shadow_nonpresent_or_rsvd_mask_len,
-                                 boot_cpu_data.x86_cache_bits - 1);
-               low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
-       } else
-               WARN_ON_ONCE(boot_cpu_has_bug(X86_BUG_L1TF));
+                       rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
+       }
  
        shadow_nonpresent_or_rsvd_lower_gfn_mask =
                GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
@@@ -620,7 -621,7 +621,7 @@@ static int is_large_pte(u64 pte
  
  static int is_last_spte(u64 pte, int level)
  {
-       if (level == PT_PAGE_TABLE_LEVEL)
+       if (level == PG_LEVEL_4K)
                return 1;
        if (is_large_pte(pte))
                return 1;
@@@ -1196,7 -1197,7 +1197,7 @@@ static void update_gfn_disallow_lpage_c
        struct kvm_lpage_info *linfo;
        int i;
  
-       for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
+       for (i = PG_LEVEL_2M; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
                linfo = lpage_info_slot(gfn, slot, i);
                linfo->disallow_lpage += count;
                WARN_ON(linfo->disallow_lpage < 0);
@@@ -1225,7 -1226,7 +1226,7 @@@ static void account_shadowed(struct kv
        slot = __gfn_to_memslot(slots, gfn);
  
        /* the non-leaf shadow pages are keeping readonly. */
-       if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+       if (sp->role.level > PG_LEVEL_4K)
                return kvm_slot_page_track_add_page(kvm, slot, gfn,
                                                    KVM_PAGE_TRACK_WRITE);
  
@@@ -1253,7 -1254,7 +1254,7 @@@ static void unaccount_shadowed(struct k
        gfn = sp->gfn;
        slots = kvm_memslots_for_spte_role(kvm, sp->role);
        slot = __gfn_to_memslot(slots, gfn);
-       if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+       if (sp->role.level > PG_LEVEL_4K)
                return kvm_slot_page_track_remove_page(kvm, slot, gfn,
                                                       KVM_PAGE_TRACK_WRITE);
  
@@@ -1398,7 -1399,7 +1399,7 @@@ static struct kvm_rmap_head *__gfn_to_r
        unsigned long idx;
  
        idx = gfn_to_index(gfn, slot->base_gfn, level);
-       return &slot->arch.rmap[level - PT_PAGE_TABLE_LEVEL][idx];
+       return &slot->arch.rmap[level - PG_LEVEL_4K][idx];
  }
  
  static struct kvm_rmap_head *gfn_to_rmap(struct kvm *kvm, gfn_t gfn,
@@@ -1529,8 -1530,7 +1530,7 @@@ static void drop_spte(struct kvm *kvm, 
  static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
  {
        if (is_large_pte(*sptep)) {
-               WARN_ON(page_header(__pa(sptep))->role.level ==
-                       PT_PAGE_TABLE_LEVEL);
+               WARN_ON(page_header(__pa(sptep))->role.level == PG_LEVEL_4K);
                drop_spte(kvm, sptep);
                --kvm->stat.lpages;
                return true;
@@@ -1682,7 -1682,7 +1682,7 @@@ static void kvm_mmu_write_protect_pt_ma
  
        while (mask) {
                rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
-                                         PT_PAGE_TABLE_LEVEL, slot);
+                                         PG_LEVEL_4K, slot);
                __rmap_write_protect(kvm, rmap_head, false);
  
                /* clear the first set bit */
@@@ -1708,7 -1708,7 +1708,7 @@@ void kvm_mmu_clear_dirty_pt_masked(stru
  
        while (mask) {
                rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
-                                         PT_PAGE_TABLE_LEVEL, slot);
+                                         PG_LEVEL_4K, slot);
                __rmap_clear_dirty(kvm, rmap_head);
  
                /* clear the first set bit */
@@@ -1760,7 -1760,7 +1760,7 @@@ bool kvm_mmu_slot_gfn_write_protect(str
        int i;
        bool write_protected = false;
  
-       for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
+       for (i = PG_LEVEL_4K; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
                rmap_head = __gfn_to_rmap(gfn, i, slot);
                write_protected |= __rmap_write_protect(kvm, rmap_head, true);
        }
@@@ -1948,8 -1948,8 +1948,8 @@@ static int kvm_handle_hva_range(struct 
                        gfn_start = hva_to_gfn_memslot(hva_start, memslot);
                        gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
  
-                       for_each_slot_rmap_range(memslot, PT_PAGE_TABLE_LEVEL,
-                                                PT_MAX_HUGEPAGE_LEVEL,
+                       for_each_slot_rmap_range(memslot, PG_LEVEL_4K,
+                                                KVM_MAX_HUGEPAGE_LEVEL,
                                                 gfn_start, gfn_end - 1,
                                                 &iterator)
                                ret |= handler(kvm, iterator.rmap, memslot,
@@@ -2153,10 -2153,6 +2153,6 @@@ static int nonpaging_sync_page(struct k
        return 0;
  }
  
- static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root)
- {
- }
  static void nonpaging_update_pte(struct kvm_vcpu *vcpu,
                                 struct kvm_mmu_page *sp, u64 *spte,
                                 const void *pte)
@@@ -2313,7 -2309,7 +2309,7 @@@ static void kvm_mmu_flush_or_zap(struc
                return;
  
        if (local_flush)
-               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
  }
  
  #ifdef CONFIG_KVM_MMU_AUDIT
@@@ -2347,7 -2343,7 +2343,7 @@@ static bool kvm_sync_pages(struct kvm_v
                if (!s->unsync)
                        continue;
  
-               WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
+               WARN_ON(s->role.level != PG_LEVEL_4K);
                ret |= kvm_sync_page(vcpu, s, invalid_list);
        }
  
@@@ -2376,7 -2372,7 +2372,7 @@@ static int mmu_pages_next(struct kvm_mm
                int level = sp->role.level;
  
                parents->idx[level-1] = idx;
-               if (level == PT_PAGE_TABLE_LEVEL)
+               if (level == PG_LEVEL_4K)
                        break;
  
                parents->parent[level-2] = sp;
@@@ -2398,7 -2394,7 +2394,7 @@@ static int mmu_pages_first(struct kvm_m
  
        sp = pvec->page[0].sp;
        level = sp->role.level;
-       WARN_ON(level == PT_PAGE_TABLE_LEVEL);
+       WARN_ON(level == PG_LEVEL_4K);
  
        parents->parent[level-2] = sp;
  
@@@ -2520,11 -2516,11 +2516,11 @@@ static struct kvm_mmu_page *kvm_mmu_get
                                break;
  
                        WARN_ON(!list_empty(&invalid_list));
-                       kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+                       kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
                }
  
                if (sp->unsync_children)
-                       kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+                       kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
  
                __clear_sp_write_flooding_count(sp);
                trace_kvm_mmu_get_page(sp, false);
                 * be inconsistent with guest page table.
                 */
                account_shadowed(vcpu->kvm, sp);
-               if (level == PT_PAGE_TABLE_LEVEL &&
-                     rmap_write_protect(vcpu, gfn))
+               if (level == PG_LEVEL_4K && rmap_write_protect(vcpu, gfn))
                        kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
  
-               if (level > PT_PAGE_TABLE_LEVEL && need_sync)
+               if (level > PG_LEVEL_4K && need_sync)
                        flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
        }
        clear_page(sp->spt);
@@@ -2601,7 -2596,7 +2596,7 @@@ static void shadow_walk_init(struct kvm
  
  static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
  {
-       if (iterator->level < PT_PAGE_TABLE_LEVEL)
+       if (iterator->level < PG_LEVEL_4K)
                return false;
  
        iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
@@@ -2722,7 -2717,7 +2717,7 @@@ static int mmu_zap_unsync_children(stru
        struct mmu_page_path parents;
        struct kvm_mmu_pages pages;
  
-       if (parent->role.level == PT_PAGE_TABLE_LEVEL)
+       if (parent->role.level == PG_LEVEL_4K)
                return 0;
  
        while (mmu_unsync_walk(parent, &pages)) {
@@@ -2921,7 -2916,7 +2916,7 @@@ static bool mmu_need_write_protect(stru
                if (sp->unsync)
                        continue;
  
-               WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
+               WARN_ON(sp->role.level != PG_LEVEL_4K);
                kvm_unsync_page(vcpu, sp);
        }
  
@@@ -3020,7 -3015,7 +3015,7 @@@ static int set_spte(struct kvm_vcpu *vc
        if (!speculative)
                spte |= spte_shadow_accessed_mask(spte);
  
-       if (level > PT_PAGE_TABLE_LEVEL && (pte_access & ACC_EXEC_MASK) &&
+       if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
            is_nx_huge_page_enabled()) {
                pte_access &= ~ACC_EXEC_MASK;
        }
        if (pte_access & ACC_USER_MASK)
                spte |= shadow_user_mask;
  
-       if (level > PT_PAGE_TABLE_LEVEL)
+       if (level > PG_LEVEL_4K)
                spte |= PT_PAGE_SIZE_MASK;
        if (tdp_enabled)
                spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn,
@@@ -3103,8 -3098,7 +3098,7 @@@ static int mmu_set_spte(struct kvm_vcp
                 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
                 * the parent of the now unreachable PTE.
                 */
-               if (level > PT_PAGE_TABLE_LEVEL &&
-                   !is_large_pte(*sptep)) {
+               if (level > PG_LEVEL_4K && !is_large_pte(*sptep)) {
                        struct kvm_mmu_page *child;
                        u64 pte = *sptep;
  
        if (set_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
                if (write_fault)
                        ret = RET_PF_EMULATE;
-               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
        }
  
        if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
@@@ -3228,7 -3222,7 +3222,7 @@@ static void direct_pte_prefetch(struct 
        if (sp_ad_disabled(sp))
                return;
  
-       if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+       if (sp->role.level > PG_LEVEL_4K)
                return;
  
        __direct_pte_prefetch(vcpu, sp, sptep);
@@@ -3241,12 -3235,8 +3235,8 @@@ static int host_pfn_mapping_level(struc
        pte_t *pte;
        int level;
  
-       BUILD_BUG_ON(PT_PAGE_TABLE_LEVEL != (int)PG_LEVEL_4K ||
-                    PT_DIRECTORY_LEVEL != (int)PG_LEVEL_2M ||
-                    PT_PDPE_LEVEL != (int)PG_LEVEL_1G);
        if (!PageCompound(pfn_to_page(pfn)) && !kvm_is_zone_device_pfn(pfn))
-               return PT_PAGE_TABLE_LEVEL;
+               return PG_LEVEL_4K;
  
        /*
         * Note, using the already-retrieved memslot and __gfn_to_hva_memslot()
  
        pte = lookup_address_in_mm(vcpu->kvm->mm, hva, &level);
        if (unlikely(!pte))
-               return PT_PAGE_TABLE_LEVEL;
+               return PG_LEVEL_4K;
  
        return level;
  }
@@@ -3274,28 -3264,28 +3264,28 @@@ static int kvm_mmu_hugepage_adjust(stru
        kvm_pfn_t mask;
        int level;
  
-       if (unlikely(max_level == PT_PAGE_TABLE_LEVEL))
-               return PT_PAGE_TABLE_LEVEL;
+       if (unlikely(max_level == PG_LEVEL_4K))
+               return PG_LEVEL_4K;
  
        if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn))
-               return PT_PAGE_TABLE_LEVEL;
+               return PG_LEVEL_4K;
  
        slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, true);
        if (!slot)
-               return PT_PAGE_TABLE_LEVEL;
+               return PG_LEVEL_4K;
  
        max_level = min(max_level, max_page_level);
-       for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
+       for ( ; max_level > PG_LEVEL_4K; max_level--) {
                linfo = lpage_info_slot(gfn, slot, max_level);
                if (!linfo->disallow_lpage)
                        break;
        }
  
-       if (max_level == PT_PAGE_TABLE_LEVEL)
-               return PT_PAGE_TABLE_LEVEL;
+       if (max_level == PG_LEVEL_4K)
+               return PG_LEVEL_4K;
  
        level = host_pfn_mapping_level(vcpu, gfn, pfn, slot);
-       if (level == PT_PAGE_TABLE_LEVEL)
+       if (level == PG_LEVEL_4K)
                return level;
  
        level = min(level, max_level);
@@@ -3317,7 -3307,7 +3307,7 @@@ static void disallowed_hugepage_adjust(
        int level = *levelp;
        u64 spte = *it.sptep;
  
-       if (it.level == level && level > PT_PAGE_TABLE_LEVEL &&
+       if (it.level == level && level > PG_LEVEL_4K &&
            is_nx_huge_page_enabled() &&
            is_shadow_present_pte(spte) &&
            !is_large_pte(spte)) {
@@@ -3574,7 -3564,7 +3564,7 @@@ static bool fast_page_fault(struct kvm_
                         *
                         * See the comments in kvm_arch_commit_memory_region().
                         */
-                       if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+                       if (sp->role.level > PG_LEVEL_4K)
                                break;
                }
  
                /*
                 * Currently, fast page fault only works for direct mapping
                 * since the gfn is not stable for indirect shadow page. See
 -               * Documentation/virt/kvm/locking.txt to get more detail.
 +               * Documentation/virt/kvm/locking.rst to get more detail.
                 */
                fault_handled = fast_pf_fix_direct_spte(vcpu, sp,
                                                        iterator.sptep, spte,
@@@ -3666,7 -3656,7 +3656,7 @@@ void kvm_mmu_free_roots(struct kvm_vcp
                                                           &invalid_list);
                        mmu->root_hpa = INVALID_PAGE;
                }
-               mmu->root_cr3 = 0;
+               mmu->root_pgd = 0;
        }
  
        kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@@ -3686,58 -3676,64 +3676,64 @@@ static int mmu_check_root(struct kvm_vc
        return ret;
  }
  
- static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
+ static hpa_t mmu_alloc_root(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
+                           u8 level, bool direct)
  {
        struct kvm_mmu_page *sp;
+       spin_lock(&vcpu->kvm->mmu_lock);
+       if (make_mmu_pages_available(vcpu)) {
+               spin_unlock(&vcpu->kvm->mmu_lock);
+               return INVALID_PAGE;
+       }
+       sp = kvm_mmu_get_page(vcpu, gfn, gva, level, direct, ACC_ALL);
+       ++sp->root_count;
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       return __pa(sp->spt);
+ }
+ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
+ {
+       u8 shadow_root_level = vcpu->arch.mmu->shadow_root_level;
+       hpa_t root;
        unsigned i;
  
-       if (vcpu->arch.mmu->shadow_root_level >= PT64_ROOT_4LEVEL) {
-               spin_lock(&vcpu->kvm->mmu_lock);
-               if(make_mmu_pages_available(vcpu) < 0) {
-                       spin_unlock(&vcpu->kvm->mmu_lock);
+       if (shadow_root_level >= PT64_ROOT_4LEVEL) {
+               root = mmu_alloc_root(vcpu, 0, 0, shadow_root_level, true);
+               if (!VALID_PAGE(root))
                        return -ENOSPC;
-               }
-               sp = kvm_mmu_get_page(vcpu, 0, 0,
-                               vcpu->arch.mmu->shadow_root_level, 1, ACC_ALL);
-               ++sp->root_count;
-               spin_unlock(&vcpu->kvm->mmu_lock);
-               vcpu->arch.mmu->root_hpa = __pa(sp->spt);
-       } else if (vcpu->arch.mmu->shadow_root_level == PT32E_ROOT_LEVEL) {
+               vcpu->arch.mmu->root_hpa = root;
+       } else if (shadow_root_level == PT32E_ROOT_LEVEL) {
                for (i = 0; i < 4; ++i) {
-                       hpa_t root = vcpu->arch.mmu->pae_root[i];
+                       MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
  
-                       MMU_WARN_ON(VALID_PAGE(root));
-                       spin_lock(&vcpu->kvm->mmu_lock);
-                       if (make_mmu_pages_available(vcpu) < 0) {
-                               spin_unlock(&vcpu->kvm->mmu_lock);
+                       root = mmu_alloc_root(vcpu, i << (30 - PAGE_SHIFT),
+                                             i << 30, PT32_ROOT_LEVEL, true);
+                       if (!VALID_PAGE(root))
                                return -ENOSPC;
-                       }
-                       sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
-                                       i << 30, PT32_ROOT_LEVEL, 1, ACC_ALL);
-                       root = __pa(sp->spt);
-                       ++sp->root_count;
-                       spin_unlock(&vcpu->kvm->mmu_lock);
                        vcpu->arch.mmu->pae_root[i] = root | PT_PRESENT_MASK;
                }
                vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
        } else
                BUG();
  
-       /* root_cr3 is ignored for direct MMUs. */
-       vcpu->arch.mmu->root_cr3 = 0;
+       /* root_pgd is ignored for direct MMUs. */
+       vcpu->arch.mmu->root_pgd = 0;
  
        return 0;
  }
  
  static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu)
  {
-       struct kvm_mmu_page *sp;
        u64 pdptr, pm_mask;
-       gfn_t root_gfn, root_cr3;
+       gfn_t root_gfn, root_pgd;
+       hpa_t root;
        int i;
  
-       root_cr3 = vcpu->arch.mmu->get_guest_pgd(vcpu);
-       root_gfn = root_cr3 >> PAGE_SHIFT;
+       root_pgd = vcpu->arch.mmu->get_guest_pgd(vcpu);
+       root_gfn = root_pgd >> PAGE_SHIFT;
  
        if (mmu_check_root(vcpu, root_gfn))
                return 1;
         * write-protect the guests page table root.
         */
        if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
-               hpa_t root = vcpu->arch.mmu->root_hpa;
-               MMU_WARN_ON(VALID_PAGE(root));
+               MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->root_hpa));
  
-               spin_lock(&vcpu->kvm->mmu_lock);
-               if (make_mmu_pages_available(vcpu) < 0) {
-                       spin_unlock(&vcpu->kvm->mmu_lock);
+               root = mmu_alloc_root(vcpu, root_gfn, 0,
+                                     vcpu->arch.mmu->shadow_root_level, false);
+               if (!VALID_PAGE(root))
                        return -ENOSPC;
-               }
-               sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                               vcpu->arch.mmu->shadow_root_level, 0, ACC_ALL);
-               root = __pa(sp->spt);
-               ++sp->root_count;
-               spin_unlock(&vcpu->kvm->mmu_lock);
                vcpu->arch.mmu->root_hpa = root;
-               goto set_root_cr3;
+               goto set_root_pgd;
        }
  
        /*
                pm_mask |= PT_ACCESSED_MASK | PT_WRITABLE_MASK | PT_USER_MASK;
  
        for (i = 0; i < 4; ++i) {
-               hpa_t root = vcpu->arch.mmu->pae_root[i];
-               MMU_WARN_ON(VALID_PAGE(root));
+               MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu->pae_root[i]));
                if (vcpu->arch.mmu->root_level == PT32E_ROOT_LEVEL) {
                        pdptr = vcpu->arch.mmu->get_pdptr(vcpu, i);
                        if (!(pdptr & PT_PRESENT_MASK)) {
                        if (mmu_check_root(vcpu, root_gfn))
                                return 1;
                }
-               spin_lock(&vcpu->kvm->mmu_lock);
-               if (make_mmu_pages_available(vcpu) < 0) {
-                       spin_unlock(&vcpu->kvm->mmu_lock);
-                       return -ENOSPC;
-               }
-               sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, PT32_ROOT_LEVEL,
-                                     0, ACC_ALL);
-               root = __pa(sp->spt);
-               ++sp->root_count;
-               spin_unlock(&vcpu->kvm->mmu_lock);
  
+               root = mmu_alloc_root(vcpu, root_gfn, i << 30,
+                                     PT32_ROOT_LEVEL, false);
+               if (!VALID_PAGE(root))
+                       return -ENOSPC;
                vcpu->arch.mmu->pae_root[i] = root | pm_mask;
        }
        vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root);
                vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root);
        }
  
- set_root_cr3:
-       vcpu->arch.mmu->root_cr3 = root_cr3;
+ set_root_pgd:
+       vcpu->arch.mmu->root_pgd = root_pgd;
  
        return 0;
  }
@@@ -4083,18 -4063,16 +4063,16 @@@ static bool try_async_pf(struct kvm_vcp
                         gpa_t cr2_or_gpa, kvm_pfn_t *pfn, bool write,
                         bool *writable)
  {
-       struct kvm_memory_slot *slot;
+       struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
        bool async;
  
-       /*
-        * Don't expose private memslots to L2.
-        */
-       if (is_guest_mode(vcpu) && !kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+       /* Don't expose private memslots to L2. */
+       if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
                *pfn = KVM_PFN_NOSLOT;
+               *writable = false;
                return false;
        }
  
-       slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
        async = false;
        *pfn = __gfn_to_pfn_memslot(slot, gfn, false, &async, write, writable);
        if (!async)
@@@ -4135,7 -4113,7 +4113,7 @@@ static int direct_page_fault(struct kvm
                return r;
  
        if (lpage_disallowed)
-               max_level = PT_PAGE_TABLE_LEVEL;
+               max_level = PG_LEVEL_4K;
  
        if (fast_page_fault(vcpu, gpa, error_code))
                return RET_PF_RETRY;
@@@ -4171,7 -4149,7 +4149,7 @@@ static int nonpaging_page_fault(struct 
  
        /* This path builds a PAE pagetable, we can map 2mb pages at maximum. */
        return direct_page_fault(vcpu, gpa & PAGE_MASK, error_code, prefault,
-                                PT_DIRECTORY_LEVEL, false);
+                                PG_LEVEL_2M, false);
  }
  
  int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
  #endif
  
        vcpu->arch.l1tf_flush_l1d = true;
-       switch (vcpu->arch.apf.host_apf_reason) {
+       switch (vcpu->arch.apf.host_apf_flags) {
        default:
                trace_kvm_page_fault(fault_address, error_code);
  
                                insn_len);
                break;
        case KVM_PV_REASON_PAGE_NOT_PRESENT:
-               vcpu->arch.apf.host_apf_reason = 0;
+               vcpu->arch.apf.host_apf_flags = 0;
                local_irq_disable();
-               kvm_async_pf_task_wait(fault_address, 0);
+               kvm_async_pf_task_wait_schedule(fault_address);
                local_irq_enable();
                break;
        case KVM_PV_REASON_PAGE_READY:
-               vcpu->arch.apf.host_apf_reason = 0;
+               vcpu->arch.apf.host_apf_flags = 0;
                local_irq_disable();
                kvm_async_pf_task_wake(fault_address);
                local_irq_enable();
@@@ -4217,8 -4195,8 +4195,8 @@@ int kvm_tdp_page_fault(struct kvm_vcpu 
  {
        int max_level;
  
-       for (max_level = PT_MAX_HUGEPAGE_LEVEL;
-            max_level > PT_PAGE_TABLE_LEVEL;
+       for (max_level = KVM_MAX_HUGEPAGE_LEVEL;
+            max_level > PG_LEVEL_4K;
             max_level--) {
                int page_num = KVM_PAGES_PER_HPAGE(max_level);
                gfn_t base = (gpa >> PAGE_SHIFT) & ~(page_num - 1);
@@@ -4237,7 -4215,7 +4215,7 @@@ static void nonpaging_init_context(stru
        context->page_fault = nonpaging_page_fault;
        context->gva_to_gpa = nonpaging_gva_to_gpa;
        context->sync_page = nonpaging_sync_page;
-       context->invlpg = nonpaging_invlpg;
+       context->invlpg = NULL;
        context->update_pte = nonpaging_update_pte;
        context->root_level = 0;
        context->shadow_root_level = PT32E_ROOT_LEVEL;
        context->nx = false;
  }
  
- static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t cr3,
+ static inline bool is_root_usable(struct kvm_mmu_root_info *root, gpa_t pgd,
                                  union kvm_mmu_page_role role)
  {
-       return (role.direct || cr3 == root->cr3) &&
+       return (role.direct || pgd == root->pgd) &&
               VALID_PAGE(root->hpa) && page_header(root->hpa) &&
               role.word == page_header(root->hpa)->role.word;
  }
  
  /*
-  * Find out if a previously cached root matching the new CR3/role is available.
+  * Find out if a previously cached root matching the new pgd/role is available.
   * The current root is also inserted into the cache.
   * If a matching root was found, it is assigned to kvm_mmu->root_hpa and true is
   * returned.
   * Otherwise, the LRU root from the cache is assigned to kvm_mmu->root_hpa and
   * false is returned. This root should now be freed by the caller.
   */
- static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_pgd,
                                  union kvm_mmu_page_role new_role)
  {
        uint i;
        struct kvm_mmu_root_info root;
        struct kvm_mmu *mmu = vcpu->arch.mmu;
  
-       root.cr3 = mmu->root_cr3;
+       root.pgd = mmu->root_pgd;
        root.hpa = mmu->root_hpa;
  
-       if (is_root_usable(&root, new_cr3, new_role))
+       if (is_root_usable(&root, new_pgd, new_role))
                return true;
  
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
                swap(root, mmu->prev_roots[i]);
  
-               if (is_root_usable(&root, new_cr3, new_role))
+               if (is_root_usable(&root, new_pgd, new_role))
                        break;
        }
  
        mmu->root_hpa = root.hpa;
-       mmu->root_cr3 = root.cr3;
+       mmu->root_pgd = root.pgd;
  
        return i < KVM_MMU_NUM_PREV_ROOTS;
  }
  
- static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
-                           union kvm_mmu_page_role new_role,
-                           bool skip_tlb_flush)
+ static bool fast_pgd_switch(struct kvm_vcpu *vcpu, gpa_t new_pgd,
+                           union kvm_mmu_page_role new_role)
  {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
  
         * later if necessary.
         */
        if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL &&
-           mmu->root_level >= PT64_ROOT_4LEVEL) {
-               if (mmu_check_root(vcpu, new_cr3 >> PAGE_SHIFT))
-                       return false;
-               if (cached_root_available(vcpu, new_cr3, new_role)) {
-                       /*
-                        * It is possible that the cached previous root page is
-                        * obsolete because of a change in the MMU generation
-                        * number. However, changing the generation number is
-                        * accompanied by KVM_REQ_MMU_RELOAD, which will free
-                        * the root set here and allocate a new one.
-                        */
-                       kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
-                       if (!skip_tlb_flush) {
-                               kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
-                               kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-                       }
-                       /*
-                        * The last MMIO access's GVA and GPA are cached in the
-                        * VCPU. When switching to a new CR3, that GVA->GPA
-                        * mapping may no longer be valid. So clear any cached
-                        * MMIO info even when we don't need to sync the shadow
-                        * page tables.
-                        */
-                       vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
-                       __clear_sp_write_flooding_count(
-                               page_header(mmu->root_hpa));
-                       return true;
-               }
-       }
+           mmu->root_level >= PT64_ROOT_4LEVEL)
+               return !mmu_check_root(vcpu, new_pgd >> PAGE_SHIFT) &&
+                      cached_root_available(vcpu, new_pgd, new_role);
  
        return false;
  }
  
- static void __kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3,
+ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd,
                              union kvm_mmu_page_role new_role,
-                             bool skip_tlb_flush)
+                             bool skip_tlb_flush, bool skip_mmu_sync)
  {
-       if (!fast_cr3_switch(vcpu, new_cr3, new_role, skip_tlb_flush))
-               kvm_mmu_free_roots(vcpu, vcpu->arch.mmu,
-                                  KVM_MMU_ROOT_CURRENT);
+       if (!fast_pgd_switch(vcpu, new_pgd, new_role)) {
+               kvm_mmu_free_roots(vcpu, vcpu->arch.mmu, KVM_MMU_ROOT_CURRENT);
+               return;
+       }
+       /*
+        * It's possible that the cached previous root page is obsolete because
+        * of a change in the MMU generation number. However, changing the
+        * generation number is accompanied by KVM_REQ_MMU_RELOAD, which will
+        * free the root set here and allocate a new one.
+        */
+       kvm_make_request(KVM_REQ_LOAD_MMU_PGD, vcpu);
+       if (!skip_mmu_sync || force_flush_and_sync_on_reuse)
+               kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
+       if (!skip_tlb_flush || force_flush_and_sync_on_reuse)
+               kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
+       /*
+        * The last MMIO access's GVA and GPA are cached in the VCPU. When
+        * switching to a new CR3, that GVA->GPA mapping may no longer be
+        * valid. So clear any cached MMIO info even when we don't need to sync
+        * the shadow page tables.
+        */
+       vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
+       __clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa));
  }
  
- void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush)
+ void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
+                    bool skip_mmu_sync)
  {
-       __kvm_mmu_new_cr3(vcpu, new_cr3, kvm_mmu_calc_root_page_role(vcpu),
-                         skip_tlb_flush);
+       __kvm_mmu_new_pgd(vcpu, new_pgd, kvm_mmu_calc_root_page_role(vcpu),
+                         skip_tlb_flush, skip_mmu_sync);
  }
- EXPORT_SYMBOL_GPL(kvm_mmu_new_cr3);
+ EXPORT_SYMBOL_GPL(kvm_mmu_new_pgd);
  
  static unsigned long get_cr3(struct kvm_vcpu *vcpu)
  {
        return kvm_read_cr3(vcpu);
  }
  
- static void inject_page_fault(struct kvm_vcpu *vcpu,
-                             struct x86_exception *fault)
- {
-       vcpu->arch.mmu->inject_page_fault(vcpu, fault);
- }
  static bool sync_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn,
                           unsigned int access, int *nr_present)
  {
@@@ -4391,11 -4357,11 +4357,11 @@@ static inline bool is_last_gpte(struct 
        gpte &= level - mmu->last_nonleaf_level;
  
        /*
-        * PT_PAGE_TABLE_LEVEL always terminates.  The RHS has bit 7 set
-        * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
-        * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+        * PG_LEVEL_4K always terminates.  The RHS has bit 7 set
+        * iff level <= PG_LEVEL_4K, which for our purpose means
+        * level == PG_LEVEL_4K; set PT_PAGE_SIZE_MASK in gpte then.
         */
-       gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
+       gpte |= level - PG_LEVEL_4K - 1;
  
        return gpte & PT_PAGE_SIZE_MASK;
  }
@@@ -4909,7 -4875,7 +4875,7 @@@ kvm_calc_tdp_mmu_root_page_role(struct 
        union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, base_only);
  
        role.base.ad_disabled = (shadow_accessed_mask == 0);
-       role.base.level = kvm_x86_ops.get_tdp_level(vcpu);
+       role.base.level = vcpu->arch.tdp_level;
        role.base.direct = true;
        role.base.gpte_is_8_bytes = true;
  
@@@ -4928,9 -4894,9 +4894,9 @@@ static void init_kvm_tdp_mmu(struct kvm
        context->mmu_role.as_u64 = new_role.as_u64;
        context->page_fault = kvm_tdp_page_fault;
        context->sync_page = nonpaging_sync_page;
-       context->invlpg = nonpaging_invlpg;
+       context->invlpg = NULL;
        context->update_pte = nonpaging_update_pte;
-       context->shadow_root_level = kvm_x86_ops.get_tdp_level(vcpu);
+       context->shadow_root_level = vcpu->arch.tdp_level;
        context->direct_map = true;
        context->get_guest_pgd = get_cr3;
        context->get_pdptr = kvm_pdptr_read;
@@@ -4986,7 -4952,7 +4952,7 @@@ kvm_calc_shadow_mmu_root_page_role(stru
        return role;
  }
  
- void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
+ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer)
  {
        struct kvm_mmu *context = vcpu->arch.mmu;
        union kvm_mmu_role new_role =
        if (new_role.as_u64 == context->mmu_role.as_u64)
                return;
  
-       if (!is_paging(vcpu))
+       if (!(cr0 & X86_CR0_PG))
                nonpaging_init_context(vcpu, context);
-       else if (is_long_mode(vcpu))
+       else if (efer & EFER_LMA)
                paging64_init_context(vcpu, context);
-       else if (is_pae(vcpu))
+       else if (cr4 & X86_CR4_PAE)
                paging32E_init_context(vcpu, context);
        else
                paging32_init_context(vcpu, context);
@@@ -5047,7 -5013,7 +5013,7 @@@ void kvm_init_shadow_ept_mmu(struct kvm
                kvm_calc_shadow_ept_root_page_role(vcpu, accessed_dirty,
                                                   execonly, level);
  
-       __kvm_mmu_new_cr3(vcpu, new_eptp, new_role.base, false);
+       __kvm_mmu_new_pgd(vcpu, new_eptp, new_role.base, true, true);
  
        if (new_role.as_u64 == context->mmu_role.as_u64)
                return;
@@@ -5077,7 -5043,11 +5043,11 @@@ static void init_kvm_softmmu(struct kvm
  {
        struct kvm_mmu *context = vcpu->arch.mmu;
  
-       kvm_init_shadow_mmu(vcpu);
+       kvm_init_shadow_mmu(vcpu,
+                           kvm_read_cr0_bits(vcpu, X86_CR0_PG),
+                           kvm_read_cr4_bits(vcpu, X86_CR4_PAE),
+                           vcpu->arch.efer);
        context->get_guest_pgd     = get_cr3;
        context->get_pdptr         = kvm_pdptr_read;
        context->inject_page_fault = kvm_inject_page_fault;
@@@ -5096,6 -5066,12 +5066,12 @@@ static void init_kvm_nested_mmu(struct 
        g_context->get_pdptr         = kvm_pdptr_read;
        g_context->inject_page_fault = kvm_inject_page_fault;
  
+       /*
+        * L2 page tables are never shadowed, so there is no need to sync
+        * SPTEs.
+        */
+       g_context->invlpg            = NULL;
        /*
         * Note that arch.mmu->gva_to_gpa translates l2_gpa to l1_gpa using
         * L1's nested page tables (e.g. EPT12). The nested translation
@@@ -5183,7 -5159,7 +5159,7 @@@ int kvm_mmu_load(struct kvm_vcpu *vcpu
        if (r)
                goto out;
        kvm_mmu_load_pgd(vcpu);
-       kvm_x86_ops.tlb_flush(vcpu, true);
+       kvm_x86_ops.tlb_flush_current(vcpu);
  out:
        return r;
  }
@@@ -5202,7 -5178,7 +5178,7 @@@ static void mmu_pte_write_new_pte(struc
                                  struct kvm_mmu_page *sp, u64 *spte,
                                  const void *new)
  {
-       if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
+       if (sp->role.level != PG_LEVEL_4K) {
                ++vcpu->kvm->stat.mmu_pde_zapped;
                return;
          }
@@@ -5260,7 -5236,7 +5236,7 @@@ static bool detect_write_flooding(struc
         * Skip write-flooding detected for the sp whose level is 1, because
         * it can become unsync, then the guest page is not write-protected.
         */
-       if (sp->role.level == PT_PAGE_TABLE_LEVEL)
+       if (sp->role.level == PG_LEVEL_4K)
                return false;
  
        atomic_inc(&sp->write_flooding_count);
@@@ -5497,37 -5473,54 +5473,54 @@@ emulate
  }
  EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
  
- void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
+ void kvm_mmu_invalidate_gva(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
+                           gva_t gva, hpa_t root_hpa)
  {
-       struct kvm_mmu *mmu = vcpu->arch.mmu;
        int i;
  
-       /* INVLPG on a * non-canonical address is a NOP according to the SDM.  */
-       if (is_noncanonical_address(gva, vcpu))
+       /* It's actually a GPA for vcpu->arch.guest_mmu.  */
+       if (mmu != &vcpu->arch.guest_mmu) {
+               /* INVLPG on a non-canonical address is a NOP according to the SDM.  */
+               if (is_noncanonical_address(gva, vcpu))
+                       return;
+               kvm_x86_ops.tlb_flush_gva(vcpu, gva);
+       }
+       if (!mmu->invlpg)
                return;
  
-       mmu->invlpg(vcpu, gva, mmu->root_hpa);
+       if (root_hpa == INVALID_PAGE) {
+               mmu->invlpg(vcpu, gva, mmu->root_hpa);
  
-       /*
-        * INVLPG is required to invalidate any global mappings for the VA,
-        * irrespective of PCID. Since it would take us roughly similar amount
-        * of work to determine whether any of the prev_root mappings of the VA
-        * is marked global, or to just sync it blindly, so we might as well
-        * just always sync it.
-        *
-        * Mappings not reachable via the current cr3 or the prev_roots will be
-        * synced when switching to that cr3, so nothing needs to be done here
-        * for them.
-        */
-       for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
-               if (VALID_PAGE(mmu->prev_roots[i].hpa))
-                       mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+               /*
+                * INVLPG is required to invalidate any global mappings for the VA,
+                * irrespective of PCID. Since it would take us roughly similar amount
+                * of work to determine whether any of the prev_root mappings of the VA
+                * is marked global, or to just sync it blindly, so we might as well
+                * just always sync it.
+                *
+                * Mappings not reachable via the current cr3 or the prev_roots will be
+                * synced when switching to that cr3, so nothing needs to be done here
+                * for them.
+                */
+               for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
+                       if (VALID_PAGE(mmu->prev_roots[i].hpa))
+                               mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
+       } else {
+               mmu->invlpg(vcpu, gva, root_hpa);
+       }
+ }
+ EXPORT_SYMBOL_GPL(kvm_mmu_invalidate_gva);
  
-       kvm_x86_ops.tlb_flush_gva(vcpu, gva);
+ void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
+ {
+       kvm_mmu_invalidate_gva(vcpu, vcpu->arch.mmu, gva, INVALID_PAGE);
        ++vcpu->stat.invlpg;
  }
  EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
  
  void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid)
  {
        struct kvm_mmu *mmu = vcpu->arch.mmu;
  
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
                if (VALID_PAGE(mmu->prev_roots[i].hpa) &&
-                   pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) {
+                   pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].pgd)) {
                        mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa);
                        tlb_flush = true;
                }
@@@ -5574,9 -5567,9 +5567,9 @@@ void kvm_configure_mmu(bool enable_tdp
        if (tdp_enabled)
                max_page_level = tdp_page_level;
        else if (boot_cpu_has(X86_FEATURE_GBPAGES))
-               max_page_level = PT_PDPE_LEVEL;
+               max_page_level = PG_LEVEL_1G;
        else
-               max_page_level = PT_DIRECTORY_LEVEL;
+               max_page_level = PG_LEVEL_2M;
  }
  EXPORT_SYMBOL_GPL(kvm_configure_mmu);
  
@@@ -5632,24 -5625,24 +5625,24 @@@ static __always_inline boo
  slot_handle_all_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
                      slot_level_handler fn, bool lock_flush_tlb)
  {
-       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
-                                PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+       return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
+                                KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
  }
  
  static __always_inline bool
  slot_handle_large_level(struct kvm *kvm, struct kvm_memory_slot *memslot,
                        slot_level_handler fn, bool lock_flush_tlb)
  {
-       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL + 1,
-                                PT_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
+       return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K + 1,
+                                KVM_MAX_HUGEPAGE_LEVEL, lock_flush_tlb);
  }
  
  static __always_inline bool
  slot_handle_leaf(struct kvm *kvm, struct kvm_memory_slot *memslot,
                 slot_level_handler fn, bool lock_flush_tlb)
  {
-       return slot_handle_level(kvm, memslot, fn, PT_PAGE_TABLE_LEVEL,
-                                PT_PAGE_TABLE_LEVEL, lock_flush_tlb);
+       return slot_handle_level(kvm, memslot, fn, PG_LEVEL_4K,
+                                PG_LEVEL_4K, lock_flush_tlb);
  }
  
  static void free_mmu_pages(struct kvm_mmu *mmu)
@@@ -5672,7 -5665,7 +5665,7 @@@ static int alloc_mmu_pages(struct kvm_v
         * SVM's 32-bit NPT support, TDP paging doesn't use PAE paging and can
         * skip allocating the PDP table.
         */
-       if (tdp_enabled && kvm_x86_ops.get_tdp_level(vcpu) > PT32E_ROOT_LEVEL)
+       if (tdp_enabled && vcpu->arch.tdp_level > PT32E_ROOT_LEVEL)
                return 0;
  
        page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_DMA32);
@@@ -5695,13 -5688,13 +5688,13 @@@ int kvm_mmu_create(struct kvm_vcpu *vcp
        vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
  
        vcpu->arch.root_mmu.root_hpa = INVALID_PAGE;
-       vcpu->arch.root_mmu.root_cr3 = 0;
+       vcpu->arch.root_mmu.root_pgd = 0;
        vcpu->arch.root_mmu.translate_gpa = translate_gpa;
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
  
        vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE;
-       vcpu->arch.guest_mmu.root_cr3 = 0;
+       vcpu->arch.guest_mmu.root_pgd = 0;
        vcpu->arch.guest_mmu.translate_gpa = translate_gpa;
        for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
                vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID;
@@@ -5859,7 -5852,8 +5852,8 @@@ void kvm_zap_gfn_range(struct kvm *kvm
                                continue;
  
                        slot_handle_level_range(kvm, memslot, kvm_zap_rmapp,
-                                               PT_PAGE_TABLE_LEVEL, PT_MAX_HUGEPAGE_LEVEL,
+                                               PG_LEVEL_4K,
+                                               KVM_MAX_HUGEPAGE_LEVEL,
                                                start, end - 1, true);
                }
        }
@@@ -5881,7 -5875,7 +5875,7 @@@ void kvm_mmu_slot_remove_write_access(s
  
        spin_lock(&kvm->mmu_lock);
        flush = slot_handle_level(kvm, memslot, slot_rmap_write_protect,
-                               start_level, PT_MAX_HUGEPAGE_LEVEL, false);
+                               start_level, KVM_MAX_HUGEPAGE_LEVEL, false);
        spin_unlock(&kvm->mmu_lock);
  
        /*
@@@ -6142,27 -6136,18 +6136,18 @@@ static void kvm_set_mmio_spte_mask(void
        u64 mask;
  
        /*
-        * Set the reserved bits and the present bit of an paging-structure
-        * entry to generate page fault with PFER.RSV = 1.
+        * Set a reserved PA bit in MMIO SPTEs to generate page faults with
+        * PFEC.RSVD=1 on MMIO accesses.  64-bit PTEs (PAE, x86-64, and EPT
+        * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
+        * 52-bit physical addresses then there are no reserved PA bits in the
+        * PTEs and so the reserved PA approach must be disabled.
         */
+       if (shadow_phys_bits < 52)
+               mask = BIT_ULL(51) | PT_PRESENT_MASK;
+       else
+               mask = 0;
  
-       /*
-        * Mask the uppermost physical address bit, which would be reserved as
-        * long as the supported physical address width is less than 52.
-        */
-       mask = 1ull << 51;
-       /* Set the present bit. */
-       mask |= 1ull;
-       /*
-        * If reserved bit is not supported, clear the present bit to disable
-        * mmio page fault.
-        */
-       if (shadow_phys_bits == 52)
-               mask &= ~1ull;
-       kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
+       kvm_mmu_set_mmio_spte_mask(mask, ACC_WRITE_MASK | ACC_USER_MASK);
  }
  
  static bool get_nx_auto_mode(void)
diff --combined arch/x86/mm/fault.c
index dffe8e4d3140efa283c30d75d7eb3552d8ca800c,6486ccec1b0eb21d43392b88af2152377d560468..c5437f2964ee6fd68c9dd23fef687b2004d9ae03
@@@ -30,6 -30,7 +30,7 @@@
  #include <asm/desc.h>                 /* store_idt(), ...             */
  #include <asm/cpu_entry_area.h>               /* exception stack              */
  #include <asm/pgtable_areas.h>                /* VMALLOC_START, ...           */
+ #include <asm/kvm_para.h>             /* kvm_handle_async_pf          */
  
  #define CREATE_TRACE_POINTS
  #include <asm/trace/exceptions.h>
@@@ -190,13 -191,16 +191,13 @@@ static inline pmd_t *vmalloc_sync_one(p
        return pmd_k;
  }
  
 -static void vmalloc_sync(void)
 +void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
  {
 -      unsigned long address;
 -
 -      if (SHARED_KERNEL_PMD)
 -              return;
 +      unsigned long addr;
  
 -      for (address = VMALLOC_START & PMD_MASK;
 -           address >= TASK_SIZE_MAX && address < VMALLOC_END;
 -           address += PMD_SIZE) {
 +      for (addr = start & PMD_MASK;
 +           addr >= TASK_SIZE_MAX && addr < VMALLOC_END;
 +           addr += PMD_SIZE) {
                struct page *page;
  
                spin_lock(&pgd_lock);
                        pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
  
                        spin_lock(pgt_lock);
 -                      vmalloc_sync_one(page_address(page), address);
 +                      vmalloc_sync_one(page_address(page), addr);
                        spin_unlock(pgt_lock);
                }
                spin_unlock(&pgd_lock);
        }
  }
  
 -void vmalloc_sync_mappings(void)
 -{
 -      vmalloc_sync();
 -}
 -
 -void vmalloc_sync_unmappings(void)
 -{
 -      vmalloc_sync();
 -}
 -
 -/*
 - * 32-bit:
 - *
 - *   Handle a fault on the vmalloc or module mapping area
 - */
 -static noinline int vmalloc_fault(unsigned long address)
 -{
 -      unsigned long pgd_paddr;
 -      pmd_t *pmd_k;
 -      pte_t *pte_k;
 -
 -      /* Make sure we are in vmalloc area: */
 -      if (!(address >= VMALLOC_START && address < VMALLOC_END))
 -              return -1;
 -
 -      /*
 -       * Synchronize this task's top level page-table
 -       * with the 'reference' page table.
 -       *
 -       * Do _not_ use "current" here. We might be inside
 -       * an interrupt in the middle of a task switch..
 -       */
 -      pgd_paddr = read_cr3_pa();
 -      pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
 -      if (!pmd_k)
 -              return -1;
 -
 -      if (pmd_large(*pmd_k))
 -              return 0;
 -
 -      pte_k = pte_offset_kernel(pmd_k, address);
 -      if (!pte_present(*pte_k))
 -              return -1;
 -
 -      return 0;
 -}
 -NOKPROBE_SYMBOL(vmalloc_fault);
 -
  /*
   * Did it hit the DOS screen memory VA from vm86 mode?
   */
@@@ -278,6 -330,96 +279,6 @@@ out
  
  #else /* CONFIG_X86_64: */
  
 -void vmalloc_sync_mappings(void)
 -{
 -      /*
 -       * 64-bit mappings might allocate new p4d/pud pages
 -       * that need to be propagated to all tasks' PGDs.
 -       */
 -      sync_global_pgds(VMALLOC_START & PGDIR_MASK, VMALLOC_END);
 -}
 -
 -void vmalloc_sync_unmappings(void)
 -{
 -      /*
 -       * Unmappings never allocate or free p4d/pud pages.
 -       * No work is required here.
 -       */
 -}
 -
 -/*
 - * 64-bit:
 - *
 - *   Handle a fault on the vmalloc area
 - */
 -static noinline int vmalloc_fault(unsigned long address)
 -{
 -      pgd_t *pgd, *pgd_k;
 -      p4d_t *p4d, *p4d_k;
 -      pud_t *pud;
 -      pmd_t *pmd;
 -      pte_t *pte;
 -
 -      /* Make sure we are in vmalloc area: */
 -      if (!(address >= VMALLOC_START && address < VMALLOC_END))
 -              return -1;
 -
 -      /*
 -       * Copy kernel mappings over when needed. This can also
 -       * happen within a race in page table update. In the later
 -       * case just flush:
 -       */
 -      pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
 -      pgd_k = pgd_offset_k(address);
 -      if (pgd_none(*pgd_k))
 -              return -1;
 -
 -      if (pgtable_l5_enabled()) {
 -              if (pgd_none(*pgd)) {
 -                      set_pgd(pgd, *pgd_k);
 -                      arch_flush_lazy_mmu_mode();
 -              } else {
 -                      BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
 -              }
 -      }
 -
 -      /* With 4-level paging, copying happens on the p4d level. */
 -      p4d = p4d_offset(pgd, address);
 -      p4d_k = p4d_offset(pgd_k, address);
 -      if (p4d_none(*p4d_k))
 -              return -1;
 -
 -      if (p4d_none(*p4d) && !pgtable_l5_enabled()) {
 -              set_p4d(p4d, *p4d_k);
 -              arch_flush_lazy_mmu_mode();
 -      } else {
 -              BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
 -      }
 -
 -      BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
 -
 -      pud = pud_offset(p4d, address);
 -      if (pud_none(*pud))
 -              return -1;
 -
 -      if (pud_large(*pud))
 -              return 0;
 -
 -      pmd = pmd_offset(pud, address);
 -      if (pmd_none(*pmd))
 -              return -1;
 -
 -      if (pmd_large(*pmd))
 -              return 0;
 -
 -      pte = pte_offset_kernel(pmd, address);
 -      if (!pte_present(*pte))
 -              return -1;
 -
 -      return 0;
 -}
 -NOKPROBE_SYMBOL(vmalloc_fault);
 -
  #ifdef CONFIG_CPU_SUP_AMD
  static const char errata93_warning[] =
  KERN_ERR 
@@@ -1116,6 -1258,29 +1117,6 @@@ do_kern_addr_fault(struct pt_regs *regs
         */
        WARN_ON_ONCE(hw_error_code & X86_PF_PK);
  
 -      /*
 -       * We can fault-in kernel-space virtual memory on-demand. The
 -       * 'reference' page table is init_mm.pgd.
 -       *
 -       * NOTE! We MUST NOT take any locks for this case. We may
 -       * be in an interrupt or a critical region, and should
 -       * only copy the information from the master page table,
 -       * nothing more.
 -       *
 -       * Before doing this on-demand faulting, ensure that the
 -       * fault is not any of the following:
 -       * 1. A fault on a PTE with a reserved bit set.
 -       * 2. A fault caused by a user-mode access.  (Do not demand-
 -       *    fault kernel memory due to user-mode accesses).
 -       * 3. A fault caused by a page-level protection violation.
 -       *    (A demand fault would be on a non-present page which
 -       *     would have X86_PF_PROT==0).
 -       */
 -      if (!(hw_error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
 -              if (vmalloc_fault(address) >= 0)
 -                      return;
 -      }
 -
        /* Was the fault spurious, caused by lazy TLB invalidation? */
        if (spurious_kernel_fault(hw_error_code, address))
                return;
@@@ -1359,6 -1524,24 +1360,24 @@@ do_page_fault(struct pt_regs *regs, uns
                unsigned long address)
  {
        prefetchw(&current->mm->mmap_sem);
+       /*
+        * KVM has two types of events that are, logically, interrupts, but
+        * are unfortunately delivered using the #PF vector.  These events are
+        * "you just accessed valid memory, but the host doesn't have it right
+        * now, so I'll put you to sleep if you continue" and "that memory
+        * you tried to access earlier is available now."
+        *
+        * We are relying on the interrupted context being sane (valid RSP,
+        * relevant locks not held, etc.), which is fine as long as the
+        * interrupted context had IF=1.  We are also relying on the KVM
+        * async pf type field and CR2 being read consistently instead of
+        * getting values from real and async page faults mixed up.
+        *
+        * Fingers crossed.
+        */
+       if (kvm_handle_async_pf(regs, (u32)address))
+               return;
        trace_page_fault_entries(regs, hw_error_code, address);
  
        if (unlikely(kmmio_fault(regs, address)))
index 262fae9526b1f8549f81eeb705d12be4ddc24a2f,0000000000000000000000000000000000000000..e73a11850055c4de93b69c7e1d5559d779243232
mode 100644,000000..100644
--- /dev/null
@@@ -1,493 -1,0 +1,497 @@@
 +/* SPDX-License-Identifier: GPL-2.0 */
 +
 +/*
 + * This file contains definitions from Hyper-V Hypervisor Top-Level Functional
 + * Specification (TLFS):
 + * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
 + */
 +
 +#ifndef _ASM_GENERIC_HYPERV_TLFS_H
 +#define _ASM_GENERIC_HYPERV_TLFS_H
 +
 +#include <linux/types.h>
 +#include <linux/bits.h>
 +#include <linux/time64.h>
 +
 +/*
 + * While not explicitly listed in the TLFS, Hyper-V always runs with a page size
 + * of 4096. These definitions are used when communicating with Hyper-V using
 + * guest physical pages and guest physical page addresses, since the guest page
 + * size may not be 4096 on all architectures.
 + */
 +#define HV_HYP_PAGE_SHIFT      12
 +#define HV_HYP_PAGE_SIZE       BIT(HV_HYP_PAGE_SHIFT)
 +#define HV_HYP_PAGE_MASK       (~(HV_HYP_PAGE_SIZE - 1))
 +
 +/*
 + * Hyper-V provides two categories of flags relevant to guest VMs.  The
 + * "Features" category indicates specific functionality that is available
 + * to guests on this particular instance of Hyper-V. The "Features"
 + * are presented in four groups, each of which is 32 bits. The group A
 + * and B definitions are common across architectures and are listed here.
 + * However, not all flags are relevant on all architectures.
 + *
 + * Groups C and D vary across architectures and are listed in the
 + * architecture specific portion of hyperv-tlfs.h. Some of these flags exist
 + * on multiple architectures, but the bit positions are different so they
 + * cannot appear in the generic portion of hyperv-tlfs.h.
 + *
 + * The "Enlightenments" category provides recommendations on whether to use
 + * specific enlightenments that are available. The Enlighenments are a single
 + * group of 32 bits, but they vary across architectures and are listed in
 + * the architecture specific portion of hyperv-tlfs.h.
 + */
 +
 +/*
 + * Group A Features.
 + */
 +
 +/* VP Runtime register available */
 +#define HV_MSR_VP_RUNTIME_AVAILABLE           BIT(0)
 +/* Partition Reference Counter available*/
 +#define HV_MSR_TIME_REF_COUNT_AVAILABLE               BIT(1)
 +/* Basic SynIC register available */
 +#define HV_MSR_SYNIC_AVAILABLE                        BIT(2)
 +/* Synthetic Timer registers available */
 +#define HV_MSR_SYNTIMER_AVAILABLE             BIT(3)
 +/* Virtual APIC assist and VP assist page registers available */
 +#define HV_MSR_APIC_ACCESS_AVAILABLE          BIT(4)
 +/* Hypercall and Guest OS ID registers available*/
 +#define HV_MSR_HYPERCALL_AVAILABLE            BIT(5)
 +/* Access virtual processor index register available*/
 +#define HV_MSR_VP_INDEX_AVAILABLE             BIT(6)
 +/* Virtual system reset register available*/
 +#define HV_MSR_RESET_AVAILABLE                        BIT(7)
 +/* Access statistics page registers available */
 +#define HV_MSR_STAT_PAGES_AVAILABLE           BIT(8)
 +/* Partition reference TSC register is available */
 +#define HV_MSR_REFERENCE_TSC_AVAILABLE                BIT(9)
 +/* Partition Guest IDLE register is available */
 +#define HV_MSR_GUEST_IDLE_AVAILABLE           BIT(10)
 +/* Partition local APIC and TSC frequency registers available */
 +#define HV_ACCESS_FREQUENCY_MSRS              BIT(11)
 +/* AccessReenlightenmentControls privilege */
 +#define HV_ACCESS_REENLIGHTENMENT             BIT(13)
 +/* AccessTscInvariantControls privilege */
 +#define HV_ACCESS_TSC_INVARIANT                       BIT(15)
 +
 +/*
 + * Group B features.
 + */
 +#define HV_CREATE_PARTITIONS                  BIT(0)
 +#define HV_ACCESS_PARTITION_ID                        BIT(1)
 +#define HV_ACCESS_MEMORY_POOL                 BIT(2)
 +#define HV_ADJUST_MESSAGE_BUFFERS             BIT(3)
 +#define HV_POST_MESSAGES                      BIT(4)
 +#define HV_SIGNAL_EVENTS                      BIT(5)
 +#define HV_CREATE_PORT                                BIT(6)
 +#define HV_CONNECT_PORT                               BIT(7)
 +#define HV_ACCESS_STATS                               BIT(8)
 +#define HV_DEBUGGING                          BIT(11)
 +#define HV_CPU_POWER_MANAGEMENT                       BIT(12)
 +
 +
 +/*
 + * TSC page layout.
 + */
 +struct ms_hyperv_tsc_page {
 +      volatile u32 tsc_sequence;
 +      u32 reserved1;
 +      volatile u64 tsc_scale;
 +      volatile s64 tsc_offset;
 +} __packed;
 +
 +/*
 + * The guest OS needs to register the guest ID with the hypervisor.
 + * The guest ID is a 64 bit entity and the structure of this ID is
 + * specified in the Hyper-V specification:
 + *
 + * msdn.microsoft.com/en-us/library/windows/hardware/ff542653%28v=vs.85%29.aspx
 + *
 + * While the current guideline does not specify how Linux guest ID(s)
 + * need to be generated, our plan is to publish the guidelines for
 + * Linux and other guest operating systems that currently are hosted
 + * on Hyper-V. The implementation here conforms to this yet
 + * unpublished guidelines.
 + *
 + *
 + * Bit(s)
 + * 63 - Indicates if the OS is Open Source or not; 1 is Open Source
 + * 62:56 - Os Type; Linux is 0x100
 + * 55:48 - Distro specific identification
 + * 47:16 - Linux kernel version number
 + * 15:0  - Distro specific identification
 + *
 + *
 + */
 +
 +#define HV_LINUX_VENDOR_ID              0x8100
 +
 +/*
 + * Crash notification flags.
 + */
 +#define HV_CRASH_CTL_CRASH_NOTIFY_MSG         BIT_ULL(62)
 +#define HV_CRASH_CTL_CRASH_NOTIFY             BIT_ULL(63)
 +
 +/* Declare the various hypercall operations. */
 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE    0x0002
 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST     0x0003
 +#define HVCALL_NOTIFY_LONG_SPIN_WAIT          0x0008
 +#define HVCALL_SEND_IPI                               0x000b
 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX 0x0013
 +#define HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX  0x0014
 +#define HVCALL_SEND_IPI_EX                    0x0015
 +#define HVCALL_GET_VP_REGISTERS                       0x0050
 +#define HVCALL_SET_VP_REGISTERS                       0x0051
 +#define HVCALL_POST_MESSAGE                   0x005c
 +#define HVCALL_SIGNAL_EVENT                   0x005d
++#define HVCALL_POST_DEBUG_DATA                        0x0069
++#define HVCALL_RETRIEVE_DEBUG_DATA            0x006a
++#define HVCALL_RESET_DEBUG_SESSION            0x006b
 +#define HVCALL_RETARGET_INTERRUPT             0x007e
 +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
 +#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
 +
 +#define HV_FLUSH_ALL_PROCESSORS                       BIT(0)
 +#define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES   BIT(1)
 +#define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY     BIT(2)
 +#define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT    BIT(3)
 +
 +enum HV_GENERIC_SET_FORMAT {
 +      HV_GENERIC_SET_SPARSE_4K,
 +      HV_GENERIC_SET_ALL,
 +};
 +
 +#define HV_PARTITION_ID_SELF          ((u64)-1)
 +#define HV_VP_INDEX_SELF              ((u32)-2)
 +
 +#define HV_HYPERCALL_RESULT_MASK      GENMASK_ULL(15, 0)
 +#define HV_HYPERCALL_FAST_BIT         BIT(16)
 +#define HV_HYPERCALL_VARHEAD_OFFSET   17
 +#define HV_HYPERCALL_REP_COMP_OFFSET  32
 +#define HV_HYPERCALL_REP_COMP_1               BIT_ULL(32)
 +#define HV_HYPERCALL_REP_COMP_MASK    GENMASK_ULL(43, 32)
 +#define HV_HYPERCALL_REP_START_OFFSET 48
 +#define HV_HYPERCALL_REP_START_MASK   GENMASK_ULL(59, 48)
 +
 +/* hypercall status code */
 +#define HV_STATUS_SUCCESS                     0
 +#define HV_STATUS_INVALID_HYPERCALL_CODE      2
 +#define HV_STATUS_INVALID_HYPERCALL_INPUT     3
 +#define HV_STATUS_INVALID_ALIGNMENT           4
 +#define HV_STATUS_INVALID_PARAMETER           5
++#define HV_STATUS_OPERATION_DENIED            8
 +#define HV_STATUS_INSUFFICIENT_MEMORY         11
 +#define HV_STATUS_INVALID_PORT_ID             17
 +#define HV_STATUS_INVALID_CONNECTION_ID               18
 +#define HV_STATUS_INSUFFICIENT_BUFFERS                19
 +
 +/*
 + * The Hyper-V TimeRefCount register and the TSC
 + * page provide a guest VM clock with 100ns tick rate
 + */
 +#define HV_CLOCK_HZ (NSEC_PER_SEC/100)
 +
 +/* Define the number of synthetic interrupt sources. */
 +#define HV_SYNIC_SINT_COUNT           (16)
 +/* Define the expected SynIC version. */
 +#define HV_SYNIC_VERSION_1            (0x1)
 +/* Valid SynIC vectors are 16-255. */
 +#define HV_SYNIC_FIRST_VALID_VECTOR   (16)
 +
 +#define HV_SYNIC_CONTROL_ENABLE               (1ULL << 0)
 +#define HV_SYNIC_SIMP_ENABLE          (1ULL << 0)
 +#define HV_SYNIC_SIEFP_ENABLE         (1ULL << 0)
 +#define HV_SYNIC_SINT_MASKED          (1ULL << 16)
 +#define HV_SYNIC_SINT_AUTO_EOI                (1ULL << 17)
 +#define HV_SYNIC_SINT_VECTOR_MASK     (0xFF)
 +
 +#define HV_SYNIC_STIMER_COUNT         (4)
 +
 +/* Define synthetic interrupt controller message constants. */
 +#define HV_MESSAGE_SIZE                       (256)
 +#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240)
 +#define HV_MESSAGE_PAYLOAD_QWORD_COUNT        (30)
 +
 +/* Define synthetic interrupt controller message flags. */
 +union hv_message_flags {
 +      __u8 asu8;
 +      struct {
 +              __u8 msg_pending:1;
 +              __u8 reserved:7;
 +      } __packed;
 +};
 +
 +/* Define port identifier type. */
 +union hv_port_id {
 +      __u32 asu32;
 +      struct {
 +              __u32 id:24;
 +              __u32 reserved:8;
 +      } __packed u;
 +};
 +
 +/* Define synthetic interrupt controller message header. */
 +struct hv_message_header {
 +      __u32 message_type;
 +      __u8 payload_size;
 +      union hv_message_flags message_flags;
 +      __u8 reserved[2];
 +      union {
 +              __u64 sender;
 +              union hv_port_id port;
 +      };
 +} __packed;
 +
 +/* Define synthetic interrupt controller message format. */
 +struct hv_message {
 +      struct hv_message_header header;
 +      union {
 +              __u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
 +      } u;
 +} __packed;
 +
 +/* Define the synthetic interrupt message page layout. */
 +struct hv_message_page {
 +      struct hv_message sint_message[HV_SYNIC_SINT_COUNT];
 +} __packed;
 +
 +/* Define timer message payload structure. */
 +struct hv_timer_message_payload {
 +      __u32 timer_index;
 +      __u32 reserved;
 +      __u64 expiration_time;  /* When the timer expired */
 +      __u64 delivery_time;    /* When the message was delivered */
 +} __packed;
 +
 +
 +/* Define synthetic interrupt controller flag constants. */
 +#define HV_EVENT_FLAGS_COUNT          (256 * 8)
 +#define HV_EVENT_FLAGS_LONG_COUNT     (256 / sizeof(unsigned long))
 +
 +/*
 + * Synthetic timer configuration.
 + */
 +union hv_stimer_config {
 +      u64 as_uint64;
 +      struct {
 +              u64 enable:1;
 +              u64 periodic:1;
 +              u64 lazy:1;
 +              u64 auto_enable:1;
 +              u64 apic_vector:8;
 +              u64 direct_mode:1;
 +              u64 reserved_z0:3;
 +              u64 sintx:4;
 +              u64 reserved_z1:44;
 +      } __packed;
 +};
 +
 +
 +/* Define the synthetic interrupt controller event flags format. */
 +union hv_synic_event_flags {
 +      unsigned long flags[HV_EVENT_FLAGS_LONG_COUNT];
 +};
 +
 +/* Define SynIC control register. */
 +union hv_synic_scontrol {
 +      u64 as_uint64;
 +      struct {
 +              u64 enable:1;
 +              u64 reserved:63;
 +      } __packed;
 +};
 +
 +/* Define synthetic interrupt source. */
 +union hv_synic_sint {
 +      u64 as_uint64;
 +      struct {
 +              u64 vector:8;
 +              u64 reserved1:8;
 +              u64 masked:1;
 +              u64 auto_eoi:1;
 +              u64 polling:1;
 +              u64 reserved2:45;
 +      } __packed;
 +};
 +
 +/* Define the format of the SIMP register */
 +union hv_synic_simp {
 +      u64 as_uint64;
 +      struct {
 +              u64 simp_enabled:1;
 +              u64 preserved:11;
 +              u64 base_simp_gpa:52;
 +      } __packed;
 +};
 +
 +/* Define the format of the SIEFP register */
 +union hv_synic_siefp {
 +      u64 as_uint64;
 +      struct {
 +              u64 siefp_enabled:1;
 +              u64 preserved:11;
 +              u64 base_siefp_gpa:52;
 +      } __packed;
 +};
 +
 +struct hv_vpset {
 +      u64 format;
 +      u64 valid_bank_mask;
 +      u64 bank_contents[];
 +} __packed;
 +
 +/* HvCallSendSyntheticClusterIpi hypercall */
 +struct hv_send_ipi {
 +      u32 vector;
 +      u32 reserved;
 +      u64 cpu_mask;
 +} __packed;
 +
 +/* HvCallSendSyntheticClusterIpiEx hypercall */
 +struct hv_send_ipi_ex {
 +      u32 vector;
 +      u32 reserved;
 +      struct hv_vpset vp_set;
 +} __packed;
 +
 +/* HvFlushGuestPhysicalAddressSpace hypercalls */
 +struct hv_guest_mapping_flush {
 +      u64 address_space;
 +      u64 flags;
 +} __packed;
 +
 +/*
 + *  HV_MAX_FLUSH_PAGES = "additional_pages" + 1. It's limited
 + *  by the bitwidth of "additional_pages" in union hv_gpa_page_range.
 + */
 +#define HV_MAX_FLUSH_PAGES (2048)
 +
 +/* HvFlushGuestPhysicalAddressList hypercall */
 +union hv_gpa_page_range {
 +      u64 address_space;
 +      struct {
 +              u64 additional_pages:11;
 +              u64 largepage:1;
 +              u64 basepfn:52;
 +      } page;
 +};
 +
 +/*
 + * All input flush parameters should be in single page. The max flush
 + * count is equal with how many entries of union hv_gpa_page_range can
 + * be populated into the input parameter page.
 + */
 +#define HV_MAX_FLUSH_REP_COUNT ((HV_HYP_PAGE_SIZE - 2 * sizeof(u64)) /        \
 +                              sizeof(union hv_gpa_page_range))
 +
 +struct hv_guest_mapping_flush_list {
 +      u64 address_space;
 +      u64 flags;
 +      union hv_gpa_page_range gpa_list[HV_MAX_FLUSH_REP_COUNT];
 +};
 +
 +/* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
 +struct hv_tlb_flush {
 +      u64 address_space;
 +      u64 flags;
 +      u64 processor_mask;
 +      u64 gva_list[];
 +} __packed;
 +
 +/* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
 +struct hv_tlb_flush_ex {
 +      u64 address_space;
 +      u64 flags;
 +      struct hv_vpset hv_vp_set;
 +      u64 gva_list[];
 +} __packed;
 +
 +/* HvRetargetDeviceInterrupt hypercall */
 +union hv_msi_entry {
 +      u64 as_uint64;
 +      struct {
 +              u32 address;
 +              u32 data;
 +      } __packed;
 +};
 +
 +struct hv_interrupt_entry {
 +      u32 source;                     /* 1 for MSI(-X) */
 +      u32 reserved1;
 +      union hv_msi_entry msi_entry;
 +} __packed;
 +
 +/*
 + * flags for hv_device_interrupt_target.flags
 + */
 +#define HV_DEVICE_INTERRUPT_TARGET_MULTICAST          1
 +#define HV_DEVICE_INTERRUPT_TARGET_PROCESSOR_SET      2
 +
 +struct hv_device_interrupt_target {
 +      u32 vector;
 +      u32 flags;
 +      union {
 +              u64 vp_mask;
 +              struct hv_vpset vp_set;
 +      };
 +} __packed;
 +
 +struct hv_retarget_device_interrupt {
 +      u64 partition_id;               /* use "self" */
 +      u64 device_id;
 +      struct hv_interrupt_entry int_entry;
 +      u64 reserved2;
 +      struct hv_device_interrupt_target int_target;
 +} __packed __aligned(8);
 +
 +
 +/* HvGetVpRegisters hypercall input with variable size reg name list*/
 +struct hv_get_vp_registers_input {
 +      struct {
 +              u64 partitionid;
 +              u32 vpindex;
 +              u8  inputvtl;
 +              u8  padding[3];
 +      } header;
 +      struct input {
 +              u32 name0;
 +              u32 name1;
 +      } element[];
 +} __packed;
 +
 +
 +/* HvGetVpRegisters returns an array of these output elements */
 +struct hv_get_vp_registers_output {
 +      union {
 +              struct {
 +                      u32 a;
 +                      u32 b;
 +                      u32 c;
 +                      u32 d;
 +              } as32 __packed;
 +              struct {
 +                      u64 low;
 +                      u64 high;
 +              } as64 __packed;
 +      };
 +};
 +
 +/* HvSetVpRegisters hypercall with variable size reg name/value list*/
 +struct hv_set_vp_registers_input {
 +      struct {
 +              u64 partitionid;
 +              u32 vpindex;
 +              u8  inputvtl;
 +              u8  padding[3];
 +      } header;
 +      struct {
 +              u32 name;
 +              u32 padding1;
 +              u64 padding2;
 +              u64 valuelow;
 +              u64 valuehigh;
 +      } element[];
 +} __packed;
 +
 +#endif
diff --combined include/linux/sched.h
index 12938d438d695b43c338a60f5bfbd1abc2316189,658de6164853b6eecc92575b3a634e455eda674e..57a5ce9f33c5a89a3c6653bc21ce9d3b429d0289
@@@ -613,7 -613,7 +613,7 @@@ union rcu_special 
                u8                      blocked;
                u8                      need_qs;
                u8                      exp_hint; /* Hint for performance. */
 -              u8                      deferred_qs;
 +              u8                      need_mb; /* Readers need smp_mb(). */
        } b; /* Bits. */
        u32 s; /* Set of bits. */
  };
@@@ -654,7 -654,6 +654,7 @@@ struct task_struct 
  
  #ifdef CONFIG_SMP
        struct llist_node               wake_entry;
 +      unsigned int                    wake_entry_type;
        int                             on_cpu;
  #ifdef CONFIG_THREAD_INFO_IN_TASK
        /* Current CPU: */
        struct list_head                rcu_tasks_holdout_list;
  #endif /* #ifdef CONFIG_TASKS_RCU */
  
 +#ifdef CONFIG_TASKS_TRACE_RCU
 +      int                             trc_reader_nesting;
 +      int                             trc_ipi_to_cpu;
 +      union rcu_special               trc_reader_special;
 +      bool                            trc_reader_checked;
 +      struct list_head                trc_holdout_list;
 +#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
 +
        struct sched_info               sched_info;
  
        struct list_head                tasks;
        unsigned int                    hardirq_disable_event;
        int                             hardirqs_enabled;
        int                             hardirq_context;
+       u64                             hardirq_chain_key;
        unsigned long                   softirq_disable_ip;
        unsigned long                   softirq_enable_ip;
        unsigned int                    softirq_disable_event;
        unsigned long                   prev_lowest_stack;
  #endif
  
 +#ifdef CONFIG_X86_MCE
 +      u64                             mce_addr;
 +      u64                             mce_status;
 +      struct callback_head            mce_kill_me;
 +#endif
 +
        /*
         * New fields for task_struct should be added above here, so that
         * they are included in the randomized portion of task_struct.
@@@ -1496,8 -1482,7 +1497,8 @@@ extern struct pid *cad_pid
  #define PF_KSWAPD             0x00020000      /* I am kswapd */
  #define PF_MEMALLOC_NOFS      0x00040000      /* All allocation requests will inherit GFP_NOFS */
  #define PF_MEMALLOC_NOIO      0x00080000      /* All allocation requests will inherit GFP_NOIO */
 -#define PF_LESS_THROTTLE      0x00100000      /* Throttle me less: I clean memory */
 +#define PF_LOCAL_THROTTLE     0x00100000      /* Throttle writes only against the bdi I write to,
 +                                               * I am cleaning dirty pages from some other bdi. */
  #define PF_KTHREAD            0x00200000      /* I am a kernel thread */
  #define PF_RANDOMIZE          0x00400000      /* Randomize virtual address space */
  #define PF_SWAPWRITE          0x00800000      /* Allowed to write to swap */
@@@ -1731,15 -1716,7 +1732,15 @@@ extern char *__get_task_comm(char *to, 
  })
  
  #ifdef CONFIG_SMP
 -void scheduler_ipi(void);
 +static __always_inline void scheduler_ipi(void)
 +{
 +      /*
 +       * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
 +       * TIF_NEED_RESCHED remotely (for the first time) will also send
 +       * this IPI.
 +       */
 +      preempt_fold_need_resched();
 +}
  extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
  #else
  static inline void scheduler_ipi(void) { }
diff --combined include/uapi/linux/kvm.h
index fdd632c833b4eaee6b48311086d608ba0f692c7f,6721eb563edad338efb0be06d09381030cc9bb54..4fdf30316582709be3a35ea5f047fcca2dbaaef6
@@@ -116,7 -116,7 +116,7 @@@ struct kvm_irq_level 
         * ACPI gsi notion of irq.
         * For IA-64 (APIC model) IOAPIC0: irq 0-23; IOAPIC1: irq 24-47..
         * For X86 (standard AT mode) PIC0/1: irq 0-15. IOAPIC0: 0-23..
 -       * For ARM: See Documentation/virt/kvm/api.txt
 +       * For ARM: See Documentation/virt/kvm/api.rst
         */
        union {
                __u32 irq;
@@@ -188,10 -188,13 +188,13 @@@ struct kvm_s390_cmma_log 
  struct kvm_hyperv_exit {
  #define KVM_EXIT_HYPERV_SYNIC          1
  #define KVM_EXIT_HYPERV_HCALL          2
+ #define KVM_EXIT_HYPERV_SYNDBG         3
        __u32 type;
+       __u32 pad1;
        union {
                struct {
                        __u32 msr;
+                       __u32 pad2;
                        __u64 control;
                        __u64 evt_page;
                        __u64 msg_page;
                        __u64 result;
                        __u64 params[2];
                } hcall;
+               struct {
+                       __u32 msr;
+                       __u32 pad2;
+                       __u64 control;
+                       __u64 status;
+                       __u64 send_page;
+                       __u64 recv_page;
+                       __u64 pending_page;
+               } syndbg;
        } u;
  };
  
@@@ -1017,6 -1029,8 +1029,8 @@@ struct kvm_ppc_resize_hpt 
  #define KVM_CAP_S390_VCPU_RESETS 179
  #define KVM_CAP_S390_PROTECTED 180
  #define KVM_CAP_PPC_SECURE_GUEST 181
+ #define KVM_CAP_HALT_POLL 182
+ #define KVM_CAP_ASYNC_PF_INT 183
  
  #ifdef KVM_CAP_IRQ_ROUTING
  
@@@ -1107,7 -1121,7 +1121,7 @@@ struct kvm_xen_hvm_config 
   *
   * KVM_IRQFD_FLAG_RESAMPLE indicates resamplefd is valid and specifies
   * the irqfd to operate in resampling mode for level triggered interrupt
 - * emulation.  See Documentation/virt/kvm/api.txt.
 + * emulation.  See Documentation/virt/kvm/api.rst.
   */
  #define KVM_IRQFD_FLAG_RESAMPLE (1 << 1)
  
diff --combined kernel/exit.c
index c81805a6e03b4f49910d09f265843c9be204acf5,9f4beff080b08b9de2d87c267f97e5ef62bc5b8b..c300253a7b8ea9ec5b346387fd3aaa1d3d331354
@@@ -228,8 -228,9 +228,9 @@@ repeat
                goto repeat;
  }
  
void rcuwait_wake_up(struct rcuwait *w)
int rcuwait_wake_up(struct rcuwait *w)
  {
+       int ret = 0;
        struct task_struct *task;
  
        rcu_read_lock();
        /*
         * Order condition vs @task, such that everything prior to the load
         * of @task is visible. This is the condition as to why the user called
-        * rcuwait_trywake() in the first place. Pairs with set_current_state()
+        * rcuwait_wake() in the first place. Pairs with set_current_state()
         * barrier (A) in rcuwait_wait_event().
         *
         *    WAIT                WAKE
  
        task = rcu_dereference(w->task);
        if (task)
-               wake_up_process(task);
+               ret = wake_up_process(task);
        rcu_read_unlock();
+       return ret;
  }
  EXPORT_SYMBOL_GPL(rcuwait_wake_up);
  
@@@ -708,12 -711,8 +711,12 @@@ void __noreturn do_exit(long code
        struct task_struct *tsk = current;
        int group_dead;
  
 -      profile_task_exit(tsk);
 -      kcov_task_exit(tsk);
 +      /*
 +       * We can get here from a kernel oops, sometimes with preemption off.
 +       * Start by checking for critical errors.
 +       * Then fix up important state like USER_DS and preemption.
 +       * Then do everything else.
 +       */
  
        WARN_ON(blk_needs_flush_plug(tsk));
  
         */
        set_fs(USER_DS);
  
 +      if (unlikely(in_atomic())) {
 +              pr_info("note: %s[%d] exited with preempt_count %d\n",
 +                      current->comm, task_pid_nr(current),
 +                      preempt_count());
 +              preempt_count_set(PREEMPT_ENABLED);
 +      }
 +
 +      profile_task_exit(tsk);
 +      kcov_task_exit(tsk);
 +
        ptrace_event(PTRACE_EVENT_EXIT, code);
  
        validate_creds_for_do_exit(tsk);
  
        exit_signals(tsk);  /* sets PF_EXITING */
  
 -      if (unlikely(in_atomic())) {
 -              pr_info("note: %s[%d] exited with preempt_count %d\n",
 -                      current->comm, task_pid_nr(current),
 -                      preempt_count());
 -              preempt_count_set(PREEMPT_ENABLED);
 -      }
 -
        /* sync mm's RSS info before statistics gathering */
        if (tsk->mm)
                sync_mm_rss(tsk->mm);
@@@ -1565,7 -1561,7 +1568,7 @@@ SYSCALL_DEFINE5(waitid, int, which, pid
        if (!infop)
                return err;
  
 -      if (!user_access_begin(infop, sizeof(*infop)))
 +      if (!user_write_access_begin(infop, sizeof(*infop)))
                return -EFAULT;
  
        unsafe_put_user(signo, &infop->si_signo, Efault);
        unsafe_put_user(info.pid, &infop->si_pid, Efault);
        unsafe_put_user(info.uid, &infop->si_uid, Efault);
        unsafe_put_user(info.status, &infop->si_status, Efault);
 -      user_access_end();
 +      user_write_access_end();
        return err;
  Efault:
 -      user_access_end();
 +      user_write_access_end();
        return -EFAULT;
  }
  
@@@ -1692,7 -1688,7 +1695,7 @@@ COMPAT_SYSCALL_DEFINE5(waitid
        if (!infop)
                return err;
  
 -      if (!user_access_begin(infop, sizeof(*infop)))
 +      if (!user_write_access_begin(infop, sizeof(*infop)))
                return -EFAULT;
  
        unsafe_put_user(signo, &infop->si_signo, Efault);
        unsafe_put_user(info.pid, &infop->si_pid, Efault);
        unsafe_put_user(info.uid, &infop->si_uid, Efault);
        unsafe_put_user(info.status, &infop->si_status, Efault);
 -      user_access_end();
 +      user_write_access_end();
        return err;
  Efault:
 -      user_access_end();
 +      user_write_access_end();
        return -EFAULT;
  }
  #endif
diff --combined kernel/locking/lockdep.c
index dd3cc0854c32ea1f78f59a9d2befbca2da5906c8,9ccd675a8b5a026be76a54f4c8305239d1ab4985..4c057dd8e93b224e65dcde606091925cd8773538
@@@ -393,6 -393,25 +393,6 @@@ void lockdep_init_task(struct task_stru
        task->lockdep_recursion = 0;
  }
  
 -/*
 - * Split the recrursion counter in two to readily detect 'off' vs recursion.
 - */
 -#define LOCKDEP_RECURSION_BITS        16
 -#define LOCKDEP_OFF           (1U << LOCKDEP_RECURSION_BITS)
 -#define LOCKDEP_RECURSION_MASK        (LOCKDEP_OFF - 1)
 -
 -void lockdep_off(void)
 -{
 -      current->lockdep_recursion += LOCKDEP_OFF;
 -}
 -EXPORT_SYMBOL(lockdep_off);
 -
 -void lockdep_on(void)
 -{
 -      current->lockdep_recursion -= LOCKDEP_OFF;
 -}
 -EXPORT_SYMBOL(lockdep_on);
 -
  static inline void lockdep_recursion_finish(void)
  {
        if (WARN_ON_ONCE(--current->lockdep_recursion))
@@@ -470,7 -489,7 +470,7 @@@ struct lock_trace 
        struct hlist_node       hash_entry;
        u32                     hash;
        u32                     nr_entries;
 -      unsigned long           entries[0] __aligned(sizeof(unsigned long));
 +      unsigned long           entries[] __aligned(sizeof(unsigned long));
  };
  #define LOCK_TRACE_SIZE_IN_LONGS                              \
        (sizeof(struct lock_trace) / sizeof(unsigned long))
@@@ -3616,13 -3635,10 +3616,10 @@@ mark_held_locks(struct task_struct *cur
  /*
   * Hardirqs will be enabled:
   */
- static void __trace_hardirqs_on_caller(unsigned long ip)
+ static void __trace_hardirqs_on_caller(void)
  {
        struct task_struct *curr = current;
  
-       /* we'll do an OFF -> ON transition: */
-       curr->hardirqs_enabled = 1;
        /*
         * We are going to turn hardirqs on, so set the
         * usage bit for all held locks:
         * this bit from being set before)
         */
        if (curr->softirqs_enabled)
-               if (!mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ))
-                       return;
-       curr->hardirq_enable_ip = ip;
-       curr->hardirq_enable_event = ++curr->irq_events;
-       debug_atomic_inc(hardirqs_on_events);
+               mark_held_locks(curr, LOCK_ENABLED_SOFTIRQ);
  }
  
- void lockdep_hardirqs_on(unsigned long ip)
+ /**
+  * lockdep_hardirqs_on_prepare - Prepare for enabling interrupts
+  * @ip:               Caller address
+  *
+  * Invoked before a possible transition to RCU idle from exit to user or
+  * guest mode. This ensures that all RCU operations are done before RCU
+  * stops watching. After the RCU transition lockdep_hardirqs_on() has to be
+  * invoked to set the final state.
+  */
+ void lockdep_hardirqs_on_prepare(unsigned long ip)
  {
        if (unlikely(!debug_locks || current->lockdep_recursion))
                return;
        if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
                return;
  
+       current->hardirq_chain_key = current->curr_chain_key;
        current->lockdep_recursion++;
-       __trace_hardirqs_on_caller(ip);
+       __trace_hardirqs_on_caller();
        lockdep_recursion_finish();
  }
- NOKPROBE_SYMBOL(lockdep_hardirqs_on);
+ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
+ void noinstr lockdep_hardirqs_on(unsigned long ip)
+ {
+       struct task_struct *curr = current;
+       if (unlikely(!debug_locks || curr->lockdep_recursion))
+               return;
+       if (curr->hardirqs_enabled) {
+               /*
+                * Neither irq nor preemption are disabled here
+                * so this is racy by nature but losing one hit
+                * in a stat is not a big deal.
+                */
+               __debug_atomic_inc(redundant_hardirqs_on);
+               return;
+       }
+       /*
+        * We're enabling irqs and according to our state above irqs weren't
+        * already enabled, yet we find the hardware thinks they are in fact
+        * enabled.. someone messed up their IRQ state tracing.
+        */
+       if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+               return;
+       /*
+        * Ensure the lock stack remained unchanged between
+        * lockdep_hardirqs_on_prepare() and lockdep_hardirqs_on().
+        */
+       DEBUG_LOCKS_WARN_ON(current->hardirq_chain_key !=
+                           current->curr_chain_key);
+       /* we'll do an OFF -> ON transition: */
+       curr->hardirqs_enabled = 1;
+       curr->hardirq_enable_ip = ip;
+       curr->hardirq_enable_event = ++curr->irq_events;
+       debug_atomic_inc(hardirqs_on_events);
+ }
+ EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
  
  /*
   * Hardirqs were disabled:
   */
- void lockdep_hardirqs_off(unsigned long ip)
+ void noinstr lockdep_hardirqs_off(unsigned long ip)
  {
        struct task_struct *curr = current;
  
-       if (unlikely(!debug_locks || current->lockdep_recursion))
+       if (unlikely(!debug_locks || curr->lockdep_recursion))
                return;
  
        /*
                curr->hardirq_disable_ip = ip;
                curr->hardirq_disable_event = ++curr->irq_events;
                debug_atomic_inc(hardirqs_off_events);
-       } else
+       } else {
                debug_atomic_inc(redundant_hardirqs_off);
+       }
  }
NOKPROBE_SYMBOL(lockdep_hardirqs_off);
EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
  
  /*
   * Softirqs will be enabled:
@@@ -4389,8 -4452,8 +4433,8 @@@ static void print_unlock_imbalance_bug(
        dump_stack();
  }
  
- static int match_held_lock(const struct held_lock *hlock,
-                                       const struct lockdep_map *lock)
+ static noinstr int match_held_lock(const struct held_lock *hlock,
+                                  const struct lockdep_map *lock)
  {
        if (hlock->instance == lock)
                return 1;
@@@ -4677,7 -4740,7 +4721,7 @@@ __lock_release(struct lockdep_map *lock
        return 0;
  }
  
- static nokprobe_inline
+ static __always_inline
  int __lock_is_held(const struct lockdep_map *lock, int read)
  {
        struct task_struct *curr = current;
@@@ -4937,7 -5000,7 +4981,7 @@@ void lock_release(struct lockdep_map *l
  }
  EXPORT_SYMBOL_GPL(lock_release);
  
- int lock_is_held_type(const struct lockdep_map *lock, int read)
noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
  {
        unsigned long flags;
        int ret = 0;
This page took 0.394088 seconds and 4 git commands to generate.