]> Git Repo - linux.git/commitdiff
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
authorLinus Torvalds <[email protected]>
Tue, 7 May 2019 00:54:22 +0000 (17:54 -0700)
committerLinus Torvalds <[email protected]>
Tue, 7 May 2019 00:54:22 +0000 (17:54 -0700)
Pull arm64 updates from Will Deacon:
 "Mostly just incremental improvements here:

   - Introduce AT_HWCAP2 for advertising CPU features to userspace

   - Expose SVE2 availability to userspace

   - Support for "data cache clean to point of deep persistence" (DC PODP)

   - Honour "mitigations=off" on the cmdline and advertise status via
     sysfs

   - CPU timer erratum workaround (Neoverse-N1 #1188873)

   - Introduce perf PMU driver for the SMMUv3 performance counters

   - Add config option to disable the kuser helpers page for AArch32 tasks

   - Futex modifications to ensure liveness under contention

   - Rework debug exception handling to seperate kernel and user
     handlers

   - Non-critical fixes and cleanup"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits)
  Documentation: Add ARM64 to kernel-parameters.rst
  arm64/speculation: Support 'mitigations=' cmdline option
  arm64: ssbs: Don't treat CPUs with SSBS as unaffected by SSB
  arm64: enable generic CPU vulnerabilites support
  arm64: add sysfs vulnerability show for speculative store bypass
  arm64: Fix size of __early_cpu_boot_status
  clocksource/arm_arch_timer: Use arch_timer_read_counter to access stable counters
  clocksource/arm_arch_timer: Remove use of workaround static key
  clocksource/arm_arch_timer: Drop use of static key in arch_timer_reg_read_stable
  clocksource/arm_arch_timer: Direcly assign set_next_event workaround
  arm64: Use arch_timer_read_counter instead of arch_counter_get_cntvct
  watchdog/sbsa: Use arch_timer_read_counter instead of arch_counter_get_cntvct
  ARM: vdso: Remove dependency with the arch_timer driver internals
  arm64: Apply ARM64_ERRATUM_1188873 to Neoverse-N1
  arm64: Add part number for Neoverse N1
  arm64: Make ARM64_ERRATUM_1188873 depend on COMPAT
  arm64: Restrict ARM64_ERRATUM_1188873 mitigation to AArch32
  arm64: mm: Remove pte_unmap_nested()
  arm64: Fix compiler warning from pte_unmap() with -Wunused-but-set-variable
  arm64: compat: Reduce address limit for 64K pages
  ...

1  2 
Documentation/admin-guide/kernel-parameters.txt
arch/arm64/Kconfig
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/tlb.h
arch/arm64/kernel/traps.c
arch/arm64/mm/init.c
drivers/clocksource/arm_arch_timer.c
drivers/firmware/efi/libstub/Makefile
mm/kasan/Makefile
virt/kvm/arm/mmu.c

index 3c0646e28488c1349aa53b6415e42693862ed68b,ce226f7ee5663d9038a513d98ecc526c5dde2fcf..fd03e2b629bbcfda55847e0cddd973bdd7b3a708
                        upon panic. This parameter reserves the physical
                        memory region [offset, offset + size] for that kernel
                        image. If '@offset' is omitted, then a suitable offset
 -                      is selected automatically. Check
 -                      Documentation/kdump/kdump.txt for further details.
 +                      is selected automatically.
 +                      [KNL, x86_64] select a region under 4G first, and
 +                      fall back to reserve region above 4G when '@offset'
 +                      hasn't been specified.
 +                      See Documentation/kdump/kdump.txt for further details.
  
        crashkernel=range1:size1[,range2:size2,...][@offset]
                        [KNL] Same as above, but depends on the memory
                        http://repo.or.cz/w/linux-2.6/mini2440.git
  
        mitigations=
-                       [X86,PPC,S390] Control optional mitigations for CPU
-                       vulnerabilities.  This is a set of curated,
+                       [X86,PPC,S390,ARM64] Control optional mitigations for
+                       CPU vulnerabilities.  This is a set of curated,
                        arch-independent options, each of which is an
                        aggregation of existing arch-specific options.
  
                                improves system performance, but it may also
                                expose users to several CPU vulnerabilities.
                                Equivalent to: nopti [X86,PPC]
+                                              kpti=0 [ARM64]
                                               nospectre_v1 [PPC]
                                               nobp=0 [S390]
-                                              nospectre_v2 [X86,PPC,S390]
+                                              nospectre_v2 [X86,PPC,S390,ARM64]
                                               spectre_v2_user=off [X86]
                                               spec_store_bypass_disable=off [X86,PPC]
+                                              ssbd=force-off [ARM64]
                                               l1tf=off [X86]
  
                        auto (default)
                        check bypass). With this option data leaks are possible
                        in the system.
  
-       nospectre_v2    [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
-                       (indirect branch prediction) vulnerability. System may
-                       allow data leaks with this option, which is equivalent
-                       to spectre_v2=off.
+       nospectre_v2    [X86,PPC_FSL_BOOK3E,ARM64] Disable all mitigations for
+                       the Spectre variant 2 (indirect branch prediction)
+                       vulnerability. System may allow data leaks with this
+                       option.
  
        nospec_store_bypass_disable
                        [HW] Disable all mitigations for the Speculative Store Bypass vulnerability
                                bridges without forcing it upstream. Note:
                                this removes isolation between devices and
                                may put more devices in an IOMMU group.
 +              force_floating  [S390] Force usage of floating interrupts.
 +              nomio           [S390] Do not use MIO instructions.
  
        pcie_aspm=      [PCIE] Forcibly enable or disable PCIe Active State Power
                        Management.
                                see CONFIG_RAS_CEC help text.
  
        rcu_nocbs=      [KNL]
 -                      The argument is a cpu list, as described above.
 +                      The argument is a cpu list, as described above,
 +                      except that the string "all" can be used to
 +                      specify every CPU on the system.
  
                        In kernels built with CONFIG_RCU_NOCB_CPU=y, set
                        the specified list of CPUs to be no-callback CPUs.
                        [x86] unstable: mark the TSC clocksource as unstable, this
                        marks the TSC unconditionally unstable at bootup and
                        avoids any further wobbles once the TSC watchdog notices.
 +                      [x86] nowatchdog: disable clocksource watchdog. Used
 +                      in situations with strict latency requirements (where
 +                      interruptions from clocksource watchdog are not
 +                      acceptable).
  
        turbografx.map[2|3]=    [HW,JOY]
                        TurboGraFX parallel port interface
diff --combined arch/arm64/Kconfig
index d81adca1b04dbaa102dc06dc2f253a93a3184063,1c0cb5131c2ab5e75f1411b42eadad32b3d9e19d..df350f4e1e7ac479f03cf8b48b889a5fc84232ef
@@@ -90,6 -90,7 +90,7 @@@ config ARM6
        select GENERIC_CLOCKEVENTS
        select GENERIC_CLOCKEVENTS_BROADCAST
        select GENERIC_CPU_AUTOPROBE
+       select GENERIC_CPU_VULNERABILITIES
        select GENERIC_EARLY_IOREMAP
        select GENERIC_IDLE_POLL_SETUP
        select GENERIC_IRQ_MULTI_HANDLER
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
        select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_FUNCTION_ARG_ACCESS_API
        select HAVE_RCU_TABLE_FREE
 -      select HAVE_RCU_TABLE_INVALIDATE
        select HAVE_RSEQ
        select HAVE_STACKPROTECTOR
        select HAVE_SYSCALL_TRACEPOINTS
@@@ -236,6 -239,9 +238,6 @@@ config LOCKDEP_SUPPOR
  config TRACE_IRQFLAGS_SUPPORT
        def_bool y
  
 -config RWSEM_XCHGADD_ALGORITHM
 -      def_bool y
 -
  config GENERIC_BUG
        def_bool y
        depends on BUG
@@@ -293,7 -299,7 +295,7 @@@ menu "Kernel Features
  menu "ARM errata workarounds via the alternatives framework"
  
  config ARM64_WORKAROUND_CLEAN_CACHE
-       def_bool n
+       bool
  
  config ARM64_ERRATUM_826319
        bool "Cortex-A53: 826319: System might deadlock if a write cannot complete until read data is accepted"
@@@ -460,26 -466,28 +462,28 @@@ config ARM64_ERRATUM_102471
        bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
        default y
        help
-         This option adds work around for Arm Cortex-A55 Erratum 1024718.
+         This option adds a workaround for ARM Cortex-A55 Erratum 1024718.
  
          Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
          update of the hardware dirty bit when the DBM/AP bits are updated
-         without a break-before-make. The work around is to disable the usage
+         without a break-before-make. The workaround is to disable the usage
          of hardware DBM locally on the affected cores. CPUs not affected by
-         erratum will continue to use the feature.
+         this erratum will continue to use the feature.
  
          If unsure, say Y.
  
  config ARM64_ERRATUM_1188873
-       bool "Cortex-A76: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
+       bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
        default y
+       depends on COMPAT
        select ARM_ARCH_TIMER_OOL_WORKAROUND
        help
-         This option adds work arounds for ARM Cortex-A76 erratum 1188873
+         This option adds a workaround for ARM Cortex-A76/Neoverse-N1
+         erratum 1188873.
  
-         Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could cause
-         register corruption when accessing the timer registers from
-         AArch32 userspace.
+         Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could
+         cause register corruption when accessing the timer registers
+         from AArch32 userspace.
  
          If unsure, say Y.
  
@@@ -487,7 -495,7 +491,7 @@@ config ARM64_ERRATUM_116552
        bool "Cortex-A76: Speculative AT instruction using out-of-context translation regime could cause subsequent request to generate an incorrect translation"
        default y
        help
-         This option adds work arounds for ARM Cortex-A76 erratum 1165522
+         This option adds a workaround for ARM Cortex-A76 erratum 1165522.
  
          Affected Cortex-A76 cores (r0p0, r1p0, r2p0) could end-up with
          corrupted TLBs by speculating an AT instruction during a guest
@@@ -500,7 -508,7 +504,7 @@@ config ARM64_ERRATUM_128680
        default y
        select ARM64_WORKAROUND_REPEAT_TLBI
        help
-         This option adds workaround for ARM Cortex-A76 erratum 1286807
+         This option adds a workaround for ARM Cortex-A76 erratum 1286807.
  
          On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual
          address for a cacheable mapping of a location is being
@@@ -517,10 -525,10 +521,10 @@@ config CAVIUM_ERRATUM_2237
        bool "Cavium erratum 22375, 24313"
        default y
        help
-         Enable workaround for erratum 22375, 24313.
+         Enable workaround for errata 22375 and 24313.
  
          This implements two gicv3-its errata workarounds for ThunderX. Both
-         with small impact affecting only ITS table allocation.
+         with small impact affecting only ITS table allocation.
  
            erratum 22375: only alloc 8MB table size
            erratum 24313: ignore memory access type
@@@ -584,9 -592,6 +588,6 @@@ config QCOM_FALKOR_ERRATUM_100
  
  config ARM64_WORKAROUND_REPEAT_TLBI
        bool
-       help
-         Enable the repeat TLBI workaround for Falkor erratum 1009 and
-         Cortex-A76 erratum 1286807.
  
  config QCOM_FALKOR_ERRATUM_1009
        bool "Falkor E1009: Prematurely complete a DSB after a TLBI"
@@@ -622,7 -627,7 +623,7 @@@ config HISILICON_ERRATUM_16160080
        bool "Hip07 161600802: Erroneous redistributor VLPI base"
        default y
        help
-         The HiSilicon Hip07 SoC usees the wrong redistributor base
+         The HiSilicon Hip07 SoC uses the wrong redistributor base
          when issued ITS commands such as VMOVP and VMAPP, and requires
          a 128kB offset to be applied to the target address in this commands.
  
@@@ -642,7 -647,7 +643,7 @@@ config FUJITSU_ERRATUM_01000
        bool "Fujitsu-A64FX erratum E#010001: Undefined fault may occur wrongly"
        default y
        help
-         This option adds workaround for Fujitsu-A64FX erratum E#010001.
+         This option adds workaround for Fujitsu-A64FX erratum E#010001.
          On some variants of the Fujitsu-A64FX cores ver(1.0, 1.1), memory
          accesses may cause undefined fault (Data abort, DFSC=0b111111).
          This fault occurs under a specific hardware condition when a
          case-4  TTBR1_EL2 with TCR_EL2.NFD1 == 1.
  
          The workaround is to ensure these bits are clear in TCR_ELx.
-         The workaround only affect the Fujitsu-A64FX.
+         The workaround only affects the Fujitsu-A64FX.
  
          If unsure, say Y.
  
@@@ -885,6 -890,9 +886,9 @@@ config ARCH_WANT_HUGE_PMD_SHAR
  config ARCH_HAS_CACHE_LINE_SIZE
        def_bool y
  
+ config ARCH_ENABLE_SPLIT_PMD_PTLOCK
+       def_bool y if PGTABLE_LEVELS > 2
  config SECCOMP
        bool "Enable seccomp to safely compute untrusted bytecode"
        ---help---
@@@ -1074,9 -1082,65 +1078,65 @@@ config RODATA_FULL_DEFAULT_ENABLE
          This requires the linear region to be mapped down to pages,
          which may adversely affect performance in some cases.
  
+ config ARM64_SW_TTBR0_PAN
+       bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
+       help
+         Enabling this option prevents the kernel from accessing
+         user-space memory directly by pointing TTBR0_EL1 to a reserved
+         zeroed area and reserved ASID. The user access routines
+         restore the valid TTBR0_EL1 temporarily.
+ menuconfig COMPAT
+       bool "Kernel support for 32-bit EL0"
+       depends on ARM64_4K_PAGES || EXPERT
+       select COMPAT_BINFMT_ELF if BINFMT_ELF
+       select HAVE_UID16
+       select OLD_SIGSUSPEND3
+       select COMPAT_OLD_SIGACTION
+       help
+         This option enables support for a 32-bit EL0 running under a 64-bit
+         kernel at EL1. AArch32-specific components such as system calls,
+         the user helper functions, VFP support and the ptrace interface are
+         handled appropriately by the kernel.
+         If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware
+         that you will only be able to execute AArch32 binaries that were compiled
+         with page size aligned segments.
+         If you want to execute 32-bit userspace applications, say Y.
+ if COMPAT
+ config KUSER_HELPERS
+       bool "Enable kuser helpers page for 32 bit applications"
+       default y
+       help
+         Warning: disabling this option may break 32-bit user programs.
+         Provide kuser helpers to compat tasks. The kernel provides
+         helper code to userspace in read only form at a fixed location
+         to allow userspace to be independent of the CPU type fitted to
+         the system. This permits binaries to be run on ARMv4 through
+         to ARMv8 without modification.
+         See Documentation/arm/kernel_user_helpers.txt for details.
+         However, the fixed address nature of these helpers can be used
+         by ROP (return orientated programming) authors when creating
+         exploits.
+         If all of the binaries and libraries which run on your platform
+         are built specifically for your platform, and make no use of
+         these helpers, then you can turn this option off to hinder
+         such exploits. However, in that case, if a binary or library
+         relying on those helpers is run, it will not function correctly.
+         Say N here only if you are absolutely certain that you do not
+         need these helpers; otherwise, the safe option is to say Y.
  menuconfig ARMV8_DEPRECATED
        bool "Emulate deprecated/obsolete ARMv8 instructions"
-       depends on COMPAT
        depends on SYSCTL
        help
          Legacy software support may require certain instructions
@@@ -1142,13 -1206,7 +1202,7 @@@ config SETEND_EMULATIO
          If unsure, say Y
  endif
  
- config ARM64_SW_TTBR0_PAN
-       bool "Emulate Privileged Access Never using TTBR0_EL1 switching"
-       help
-         Enabling this option prevents the kernel from accessing
-         user-space memory directly by pointing TTBR0_EL1 to a reserved
-         zeroed area and reserved ASID. The user access routines
-         restore the valid TTBR0_EL1 temporarily.
+ endif
  
  menu "ARMv8.1 architectural features"
  
@@@ -1314,6 -1372,9 +1368,9 @@@ config ARM64_SV
  
          To enable use of this extension on CPUs that implement it, say Y.
  
+         On CPUs that support the SVE2 extensions, this option will enable
+         those too.
          Note that for architectural reasons, firmware _must_ implement SVE
          support when running on SVE capable hardware.  The required support
          is present in:
@@@ -1347,7 -1408,7 +1404,7 @@@ config ARM64_PSEUDO_NM
        help
          Adds support for mimicking Non-Maskable Interrupts through the use of
          GIC interrupt priority. This support requires version 3 or later of
-         Arm GIC.
+         ARM GIC.
  
          This high priority configuration for interrupts needs to be
          explicitly enabled by setting the kernel parameter
@@@ -1471,25 -1532,6 +1528,6 @@@ config DM
  
  endmenu
  
- config COMPAT
-       bool "Kernel support for 32-bit EL0"
-       depends on ARM64_4K_PAGES || EXPERT
-       select COMPAT_BINFMT_ELF if BINFMT_ELF
-       select HAVE_UID16
-       select OLD_SIGSUSPEND3
-       select COMPAT_OLD_SIGACTION
-       help
-         This option enables support for a 32-bit EL0 running under a 64-bit
-         kernel at EL1. AArch32-specific components such as system calls,
-         the user helper functions, VFP support and the ptrace interface are
-         handled appropriately by the kernel.
-         If you use a page size other than 4KB (i.e, 16KB or 64KB), please be aware
-         that you will only be able to execute AArch32 binaries that were compiled
-         with page size aligned segments.
-         If you want to execute 32-bit userspace applications, say Y.
  config SYSVIPC_COMPAT
        def_bool y
        depends on COMPAT && SYSVIPC
index c7e1a7837706c17eeffd96edd17bcc4da0009af2,bdb3c05070a2b5ebcdbb713e0c628d8710f7f3a8..a56efb5626fa25264d3de7d34c06ba024cc2c131
  
  #include <asm/errno.h>
  
+ #define FUTEX_MAX_LOOPS       128 /* What's the largest number you can think of? */
  #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)               \
  do {                                                                  \
+       unsigned int loops = FUTEX_MAX_LOOPS;                           \
+                                                                       \
        uaccess_enable();                                               \
        asm volatile(                                                   \
  "     prfm    pstl1strm, %2\n"                                        \
  "1:   ldxr    %w1, %2\n"                                              \
        insn "\n"                                                       \
  "2:   stlxr   %w0, %w3, %2\n"                                         \
- "     cbnz    %w0, 1b\n"                                              \
- "     dmb     ish\n"                                                  \
+ "     cbz     %w0, 3f\n"                                              \
+ "     sub     %w4, %w4, %w0\n"                                        \
+ "     cbnz    %w4, 1b\n"                                              \
+ "     mov     %w0, %w7\n"                                             \
  "3:\n"                                                                        \
+ "     dmb     ish\n"                                                  \
  "     .pushsection .fixup,\"ax\"\n"                                   \
  "     .align  2\n"                                                    \
- "4:   mov     %w0, %w5\n"                                             \
+ "4:   mov     %w0, %w6\n"                                             \
  "     b       3b\n"                                                   \
  "     .popsection\n"                                                  \
        _ASM_EXTABLE(1b, 4b)                                            \
        _ASM_EXTABLE(2b, 4b)                                            \
-       : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)       \
-       : "r" (oparg), "Ir" (-EFAULT)                                   \
+       : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp),      \
+         "+r" (loops)                                                  \
+       : "r" (oparg), "Ir" (-EFAULT), "Ir" (-EAGAIN)                   \
        : "memory");                                                    \
        uaccess_disable();                                              \
  } while (0)
  static inline int
  arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
  {
 -      int oldval, ret, tmp;
 +      int oldval = 0, ret, tmp;
        u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
  
        pagefault_disable();
  
        switch (op) {
        case FUTEX_OP_SET:
-               __futex_atomic_op("mov  %w3, %w4",
+               __futex_atomic_op("mov  %w3, %w5",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_ADD:
-               __futex_atomic_op("add  %w3, %w1, %w4",
+               __futex_atomic_op("add  %w3, %w1, %w5",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_OR:
-               __futex_atomic_op("orr  %w3, %w1, %w4",
+               __futex_atomic_op("orr  %w3, %w1, %w5",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        case FUTEX_OP_ANDN:
-               __futex_atomic_op("and  %w3, %w1, %w4",
+               __futex_atomic_op("and  %w3, %w1, %w5",
                                  ret, oldval, uaddr, tmp, ~oparg);
                break;
        case FUTEX_OP_XOR:
-               __futex_atomic_op("eor  %w3, %w1, %w4",
+               __futex_atomic_op("eor  %w3, %w1, %w5",
                                  ret, oldval, uaddr, tmp, oparg);
                break;
        default:
@@@ -93,6 -101,7 +101,7 @@@ futex_atomic_cmpxchg_inatomic(u32 *uval
                              u32 oldval, u32 newval)
  {
        int ret = 0;
+       unsigned int loops = FUTEX_MAX_LOOPS;
        u32 val, tmp;
        u32 __user *uaddr;
  
        asm volatile("// futex_atomic_cmpxchg_inatomic\n"
  "     prfm    pstl1strm, %2\n"
  "1:   ldxr    %w1, %2\n"
- "     sub     %w3, %w1, %w4\n"
- "     cbnz    %w3, 3f\n"
- "2:   stlxr   %w3, %w5, %2\n"
- "     cbnz    %w3, 1b\n"
- "     dmb     ish\n"
+ "     sub     %w3, %w1, %w5\n"
+ "     cbnz    %w3, 4f\n"
+ "2:   stlxr   %w3, %w6, %2\n"
+ "     cbz     %w3, 3f\n"
+ "     sub     %w4, %w4, %w3\n"
+ "     cbnz    %w4, 1b\n"
+ "     mov     %w0, %w8\n"
  "3:\n"
+ "     dmb     ish\n"
+ "4:\n"
  "     .pushsection .fixup,\"ax\"\n"
- "4:   mov     %w0, %w6\n"
- "     b       3b\n"
+ "5:   mov     %w0, %w7\n"
+ "     b       4b\n"
  "     .popsection\n"
-       _ASM_EXTABLE(1b, 4b)
-       _ASM_EXTABLE(2b, 4b)
-       : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
-       : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
+       _ASM_EXTABLE(1b, 5b)
+       _ASM_EXTABLE(2b, 5b)
+       : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops)
+       : "r" (oldval), "r" (newval), "Ir" (-EFAULT), "Ir" (-EAGAIN)
        : "memory");
        uaccess_disable();
  
-       *uval = val;
+       if (!ret)
+               *uval = val;
        return ret;
  }
  
index 37603b5616a588e514da402c49a85029d47c74dd,4e3becfed38776f5a105ce144080c73fb00a268d..a287189ca8b4ea4124c0139cd15c683f6afdcd87
@@@ -27,7 -27,6 +27,7 @@@ static inline void __tlb_remove_table(v
        free_page_and_swap_cache((struct page *)_table);
  }
  
 +#define tlb_flush tlb_flush
  static void tlb_flush(struct mmu_gather *tlb);
  
  #include <asm-generic/tlb.h>
@@@ -63,7 -62,10 +63,10 @@@ static inline void __pte_free_tlb(struc
  static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
                                  unsigned long addr)
  {
-       tlb_remove_table(tlb, virt_to_page(pmdp));
+       struct page *page = virt_to_page(pmdp);
+       pgtable_pmd_page_dtor(page);
+       tlb_remove_table(tlb, page);
  }
  #endif
  
index 29755989f616c187481803b27ca9dbfcf0a7847b,48432027969c8d1bd183af3cc2374ef18fe486f3..ade32046f3fea606172536f16912ed3fbc34b9e4
@@@ -102,16 -102,10 +102,16 @@@ static void dump_instr(const char *lvl
  void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
  {
        struct stackframe frame;
 -      int skip;
 +      int skip = 0;
  
        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
  
 +      if (regs) {
 +              if (user_mode(regs))
 +                      return;
 +              skip = 1;
 +      }
 +
        if (!tsk)
                tsk = current;
  
        frame.graph = 0;
  #endif
  
 -      skip = !!regs;
        printk("Call trace:\n");
        do {
                /* skip until specified stack frame */
@@@ -181,13 -176,15 +181,13 @@@ static int __die(const char *str, int e
                return ret;
  
        print_modules();
 -      __show_regs(regs);
        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
                 end_of_stack(tsk));
 +      show_regs(regs);
  
 -      if (!user_mode(regs)) {
 -              dump_backtrace(regs, tsk);
 +      if (!user_mode(regs))
                dump_instr(KERN_EMERG, regs);
 -      }
  
        return ret;
  }
@@@ -462,6 -459,9 +462,9 @@@ static void user_cache_maint_handler(un
        case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:     /* DC CVAC, gets promoted */
                __user_cache_maint("dc civac", address, ret);
                break;
+       case ESR_ELx_SYS64_ISS_CRM_DC_CVADP:    /* DC CVADP */
+               __user_cache_maint("sys 3, c7, c13, 1", address, ret);
+               break;
        case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:     /* DC CVAP */
                __user_cache_maint("sys 3, c7, c12, 1", address, ret);
                break;
@@@ -496,7 -496,7 +499,7 @@@ static void cntvct_read_handler(unsigne
  {
        int rt = ESR_ELx_SYS64_ISS_RT(esr);
  
-       pt_regs_write_reg(regs, rt, arch_counter_get_cntvct());
+       pt_regs_write_reg(regs, rt, arch_timer_read_counter());
        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
  }
  
@@@ -668,7 -668,7 +671,7 @@@ static void compat_cntvct_read_handler(
  {
        int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
        int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
-       u64 val = arch_counter_get_cntvct();
+       u64 val = arch_timer_read_counter();
  
        pt_regs_write_reg(regs, rt, lower_32_bits(val));
        pt_regs_write_reg(regs, rt2, upper_32_bits(val));
@@@ -950,9 -950,6 +953,6 @@@ int is_valid_bugaddr(unsigned long addr
  
  static int bug_handler(struct pt_regs *regs, unsigned int esr)
  {
-       if (user_mode(regs))
-               return DBG_HOOK_ERROR;
        switch (report_bug(regs->pc, regs)) {
        case BUG_TRAP_TYPE_BUG:
                die("Oops - BUG", regs, 0);
  }
  
  static struct break_hook bug_break_hook = {
-       .esr_val = 0xf2000000 | BUG_BRK_IMM,
-       .esr_mask = 0xffffffff,
        .fn = bug_handler,
+       .imm = BUG_BRK_IMM,
  };
  
  #ifdef CONFIG_KASAN_SW_TAGS
@@@ -992,9 -988,6 +991,6 @@@ static int kasan_handler(struct pt_reg
        u64 addr = regs->regs[0];
        u64 pc = regs->pc;
  
-       if (user_mode(regs))
-               return DBG_HOOK_ERROR;
        kasan_report(addr, size, write, pc);
  
        /*
        return DBG_HOOK_HANDLED;
  }
  
- #define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM)
- #define KASAN_ESR_MASK 0xffffff00
  static struct break_hook kasan_break_hook = {
-       .esr_val = KASAN_ESR_VAL,
-       .esr_mask = KASAN_ESR_MASK,
-       .fn = kasan_handler,
+       .fn     = kasan_handler,
+       .imm    = KASAN_BRK_IMM,
+       .mask   = KASAN_BRK_MASK,
  };
  #endif
  
@@@ -1037,7 -1027,9 +1030,9 @@@ int __init early_brk64(unsigned long ad
                struct pt_regs *regs)
  {
  #ifdef CONFIG_KASAN_SW_TAGS
-       if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL)
+       unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
+       if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
                return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
  #endif
        return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
  /* This registration must happen early, before debug_traps_init(). */
  void __init trap_init(void)
  {
-       register_break_hook(&bug_break_hook);
+       register_kernel_break_hook(&bug_break_hook);
  #ifdef CONFIG_KASAN_SW_TAGS
-       register_break_hook(&kasan_break_hook);
+       register_kernel_break_hook(&kasan_break_hook);
  #endif
  }
diff --combined arch/arm64/mm/init.c
index 7cae155e81a5fb71aa8148865e44d9482bfb5b9a,03a8a6888ec04fdf828c07556d0cc9213b0f1ce6..40e2d7e5efcb1e10c34b4abf83352ca4ec4c0d99
@@@ -363,7 -363,7 +363,7 @@@ void __init arm64_memblock_init(void
                 * Otherwise, this is a no-op
                 */
                u64 base = phys_initrd_start & PAGE_MASK;
 -              u64 size = PAGE_ALIGN(phys_initrd_size);
 +              u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
  
                /*
                 * We can only add back the initrd memory if we don't end up
                         base + size > memblock_start_of_DRAM() +
                                       linear_region_size,
                        "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
-                       initrd_start = 0;
+                       phys_initrd_size = 0;
                } else {
                        memblock_remove(base, size); /* clear MEMBLOCK_ flags */
                        memblock_add(base, size);
@@@ -440,6 -440,7 +440,7 @@@ void __init bootmem_init(void
        early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
  
        max_pfn = max_low_pfn = max;
+       min_low_pfn = min;
  
        arm64_numa_init();
        /*
@@@ -535,7 -536,7 +536,7 @@@ void __init mem_init(void
        else
                swiotlb_force = SWIOTLB_NO_FORCE;
  
-       set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
+       set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
  
  #ifndef CONFIG_SPARSEMEM_VMEMMAP
        free_unused_memmap();
index ea373cfbcecb5d8241f6a176a4a32a86a630c083,9a2f23df7610f7e172dd1d6f294249ad0e55e8f8..b2a951a798e2e9b9cc60b3c1f81ba6e966735bbf
@@@ -9,7 -9,7 +9,7 @@@
   * published by the Free Software Foundation.
   */
  
 -#define pr_fmt(fmt)   "arm_arch_timer: " fmt
 +#define pr_fmt(fmt)   "arch_timer: " fmt
  
  #include <linux/init.h>
  #include <linux/kernel.h>
@@@ -33,6 -33,9 +33,6 @@@
  
  #include <clocksource/arm_arch_timer.h>
  
 -#undef pr_fmt
 -#define pr_fmt(fmt) "arch_timer: " fmt
 -
  #define CNTTIDR               0x08
  #define CNTTIDR_VIRT(n)       (BIT(1) << ((n) * 4))
  
@@@ -149,6 -152,26 +149,26 @@@ u32 arch_timer_reg_read(int access, enu
        return val;
  }
  
+ static u64 arch_counter_get_cntpct_stable(void)
+ {
+       return __arch_counter_get_cntpct_stable();
+ }
+ static u64 arch_counter_get_cntpct(void)
+ {
+       return __arch_counter_get_cntpct();
+ }
+ static u64 arch_counter_get_cntvct_stable(void)
+ {
+       return __arch_counter_get_cntvct_stable();
+ }
+ static u64 arch_counter_get_cntvct(void)
+ {
+       return __arch_counter_get_cntvct();
+ }
  /*
   * Default to cp15 based access because arm64 uses this function for
   * sched_clock() before DT is probed and the cp15 method is guaranteed
@@@ -316,13 -339,6 +336,6 @@@ static u64 notrace arm64_858921_read_cn
  }
  #endif
  
- #ifdef CONFIG_ARM64_ERRATUM_1188873
- static u64 notrace arm64_1188873_read_cntvct_el0(void)
- {
-       return read_sysreg(cntvct_el0);
- }
- #endif
  #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
  /*
   * The low bits of the counter registers are indeterminate while bit 10 or
@@@ -369,8 -385,7 +382,7 @@@ static u32 notrace sun50i_a64_read_cntv
  DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround);
  EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
  
- DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled);
- EXPORT_SYMBOL_GPL(arch_timer_read_ool_enabled);
+ static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
  
  static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
                                                struct clock_event_device *clk)
@@@ -454,14 -469,6 +466,6 @@@ static const struct arch_timer_erratum_
                .read_cntvct_el0 = arm64_858921_read_cntvct_el0,
        },
  #endif
- #ifdef CONFIG_ARM64_ERRATUM_1188873
-       {
-               .match_type = ate_match_local_cap_id,
-               .id = (void *)ARM64_WORKAROUND_1188873,
-               .desc = "ARM erratum 1188873",
-               .read_cntvct_el0 = arm64_1188873_read_cntvct_el0,
-       },
- #endif
  #ifdef CONFIG_SUN50I_ERRATUM_UNKNOWN1
        {
                .match_type = ate_match_dt,
@@@ -549,11 -556,8 +553,8 @@@ void arch_timer_enable_workaround(cons
                        per_cpu(timer_unstable_counter_workaround, i) = wa;
        }
  
-       /*
-        * Use the locked version, as we're called from the CPU
-        * hotplug framework. Otherwise, we end-up in deadlock-land.
-        */
-       static_branch_enable_cpuslocked(&arch_timer_read_ool_enabled);
+       if (wa->read_cntvct_el0 || wa->read_cntpct_el0)
+               atomic_set(&timer_unstable_counter_workaround_in_use, 1);
  
        /*
         * Don't use the vdso fastpath if errata require using the
  static void arch_timer_check_ool_workaround(enum arch_timer_erratum_match_type type,
                                            void *arg)
  {
-       const struct arch_timer_erratum_workaround *wa;
+       const struct arch_timer_erratum_workaround *wa, *__wa;
        ate_match_fn_t match_fn = NULL;
        bool local = false;
  
        if (!wa)
                return;
  
-       if (needs_unstable_timer_counter_workaround()) {
-               const struct arch_timer_erratum_workaround *__wa;
-               __wa = __this_cpu_read(timer_unstable_counter_workaround);
-               if (__wa && wa != __wa)
-                       pr_warn("Can't enable workaround for %s (clashes with %s\n)",
-                               wa->desc, __wa->desc);
+       __wa = __this_cpu_read(timer_unstable_counter_workaround);
+       if (__wa && wa != __wa)
+               pr_warn("Can't enable workaround for %s (clashes with %s\n)",
+                       wa->desc, __wa->desc);
  
-               if (__wa)
-                       return;
-       }
+       if (__wa)
+               return;
  
        arch_timer_enable_workaround(wa, local);
        pr_info("Enabling %s workaround for %s\n",
                local ? "local" : "global", wa->desc);
  }
  
- #define erratum_handler(fn, r, ...)                                   \
- ({                                                                    \
-       bool __val;                                                     \
-       if (needs_unstable_timer_counter_workaround()) {                \
-               const struct arch_timer_erratum_workaround *__wa;       \
-               __wa = __this_cpu_read(timer_unstable_counter_workaround); \
-               if (__wa && __wa->fn) {                                 \
-                       r = __wa->fn(__VA_ARGS__);                      \
-                       __val = true;                                   \
-               } else {                                                \
-                       __val = false;                                  \
-               }                                                       \
-       } else {                                                        \
-               __val = false;                                          \
-       }                                                               \
-       __val;                                                          \
- })
  static bool arch_timer_this_cpu_has_cntvct_wa(void)
  {
-       const struct arch_timer_erratum_workaround *wa;
+       return has_erratum_handler(read_cntvct_el0);
+ }
  
-       wa = __this_cpu_read(timer_unstable_counter_workaround);
-       return wa && wa->read_cntvct_el0;
+ static bool arch_timer_counter_has_wa(void)
+ {
+       return atomic_read(&timer_unstable_counter_workaround_in_use);
  }
  #else
  #define arch_timer_check_ool_workaround(t,a)          do { } while(0)
- #define erratum_set_next_event_tval_virt(...)         ({BUG(); 0;})
- #define erratum_set_next_event_tval_phys(...)         ({BUG(); 0;})
- #define erratum_handler(fn, r, ...)                   ({false;})
  #define arch_timer_this_cpu_has_cntvct_wa()           ({false;})
+ #define arch_timer_counter_has_wa()                   ({false;})
  #endif /* CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND */
  
  static __always_inline irqreturn_t timer_handler(const int access,
@@@ -733,11 -716,6 +713,6 @@@ static __always_inline void set_next_ev
  static int arch_timer_set_next_event_virt(unsigned long evt,
                                          struct clock_event_device *clk)
  {
-       int ret;
-       if (erratum_handler(set_next_event_virt, ret, evt, clk))
-               return ret;
        set_next_event(ARCH_TIMER_VIRT_ACCESS, evt, clk);
        return 0;
  }
  static int arch_timer_set_next_event_phys(unsigned long evt,
                                          struct clock_event_device *clk)
  {
-       int ret;
-       if (erratum_handler(set_next_event_phys, ret, evt, clk))
-               return ret;
        set_next_event(ARCH_TIMER_PHYS_ACCESS, evt, clk);
        return 0;
  }
@@@ -774,6 -747,10 +744,10 @@@ static void __arch_timer_setup(unsigne
        clk->features = CLOCK_EVT_FEAT_ONESHOT;
  
        if (type == ARCH_TIMER_TYPE_CP15) {
+               typeof(clk->set_next_event) sne;
+               arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
                if (arch_timer_c3stop)
                        clk->features |= CLOCK_EVT_FEAT_C3STOP;
                clk->name = "arch_sys_timer";
                case ARCH_TIMER_VIRT_PPI:
                        clk->set_state_shutdown = arch_timer_shutdown_virt;
                        clk->set_state_oneshot_stopped = arch_timer_shutdown_virt;
-                       clk->set_next_event = arch_timer_set_next_event_virt;
+                       sne = erratum_handler(set_next_event_virt);
                        break;
                case ARCH_TIMER_PHYS_SECURE_PPI:
                case ARCH_TIMER_PHYS_NONSECURE_PPI:
                case ARCH_TIMER_HYP_PPI:
                        clk->set_state_shutdown = arch_timer_shutdown_phys;
                        clk->set_state_oneshot_stopped = arch_timer_shutdown_phys;
-                       clk->set_next_event = arch_timer_set_next_event_phys;
+                       sne = erratum_handler(set_next_event_phys);
                        break;
                default:
                        BUG();
                }
  
-               arch_timer_check_ool_workaround(ate_match_local_cap_id, NULL);
+               clk->set_next_event = sne;
        } else {
                clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
                clk->name = "arch_mem_timer";
@@@ -830,7 -807,11 +804,11 @@@ static void arch_timer_evtstrm_enable(i
        cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
                        | ARCH_TIMER_VIRT_EVT_EN;
        arch_timer_set_cntkctl(cntkctl);
+ #ifdef CONFIG_ARM64
+       cpu_set_named_feature(EVTSTRM);
+ #else
        elf_hwcap |= HWCAP_EVTSTRM;
+ #endif
  #ifdef CONFIG_COMPAT
        compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
  #endif
@@@ -995,12 -976,22 +973,22 @@@ static void __init arch_counter_registe
  
        /* Register the CP15 based counter if we have one */
        if (type & ARCH_TIMER_TYPE_CP15) {
+               u64 (*rd)(void);
                if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) ||
-                   arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI)
-                       arch_timer_read_counter = arch_counter_get_cntvct;
-               else
-                       arch_timer_read_counter = arch_counter_get_cntpct;
+                   arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) {
+                       if (arch_timer_counter_has_wa())
+                               rd = arch_counter_get_cntvct_stable;
+                       else
+                               rd = arch_counter_get_cntvct;
+               } else {
+                       if (arch_timer_counter_has_wa())
+                               rd = arch_counter_get_cntpct_stable;
+                       else
+                               rd = arch_counter_get_cntpct;
+               }
  
+               arch_timer_read_counter = rd;
                clocksource_counter.archdata.vdso_direct = vdso_default;
        } else {
                arch_timer_read_counter = arch_counter_get_cntvct_mem;
@@@ -1052,7 -1043,11 +1040,11 @@@ static int arch_timer_cpu_pm_notify(str
        } else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT) {
                arch_timer_set_cntkctl(__this_cpu_read(saved_cntkctl));
  
+ #ifdef CONFIG_ARM64
+               if (cpu_have_named_feature(EVTSTRM))
+ #else
                if (elf_hwcap & HWCAP_EVTSTRM)
+ #endif
                        cpumask_set_cpu(smp_processor_id(), &evtstrm_available);
        }
        return NOTIFY_OK;
index b1f7b64652dbb3da8d416432901ba576fc689558,645269c3906d84d3d36279803bfec5717e59a8a6..0460c7581220e13cc8418488042d9a7bb17c0b3e
@@@ -16,9 -16,9 +16,9 @@@ cflags-$(CONFIG_X86)          += -m$(BITS) -D__
  
  # arm64 uses the full KBUILD_CFLAGS so it's necessary to explicitly
  # disable the stackleak plugin
- cflags-$(CONFIG_ARM64)                := $(subst -pg,,$(KBUILD_CFLAGS)) -fpie \
-                                  $(DISABLE_STACKLEAK_PLUGIN)
- cflags-$(CONFIG_ARM)          := $(subst -pg,,$(KBUILD_CFLAGS)) \
+ cflags-$(CONFIG_ARM64)                := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
+                                  -fpie $(DISABLE_STACKLEAK_PLUGIN)
+ cflags-$(CONFIG_ARM)          := $(subst $(CC_FLAGS_FTRACE),,$(KBUILD_CFLAGS)) \
                                   -fno-builtin -fpic \
                                   $(call cc-option,-mno-single-pic-base)
  
@@@ -71,6 -71,7 +71,6 @@@ CFLAGS_arm64-stub.o           := -DTEXT_OFFSET=$
  extra-$(CONFIG_EFI_ARMSTUB)   := $(lib-y)
  lib-$(CONFIG_EFI_ARMSTUB)     := $(patsubst %.o,%.stub.o,$(lib-y))
  
 -STUBCOPY_RM-y                 := -R *ksymtab* -R *kcrctab*
  STUBCOPY_FLAGS-$(CONFIG_ARM64)        += --prefix-alloc-sections=.init \
                                   --prefix-symbols=__efistub_
  STUBCOPY_RELOC-$(CONFIG_ARM64)        := R_AARCH64_ABS
@@@ -85,13 -86,12 +85,13 @@@ $(obj)/%.stub.o: $(obj)/%.o FORC
  # this time, use objcopy and leave all sections in place.
  #
  quiet_cmd_stubcopy = STUBCPY $@
 -      cmd_stubcopy = if $(STRIP) --strip-debug $(STUBCOPY_RM-y) -o $@ $<; \
 -                   then if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); \
 -                   then (echo >&2 "$@: absolute symbol references not allowed in the EFI stub"; \
 -                         rm -f $@; /bin/false);                         \
 -                   else $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@; fi        \
 -                   else /bin/false; fi
 +      cmd_stubcopy =                                                  \
 +      $(STRIP) --strip-debug -o $@ $<;                                \
 +      if $(OBJDUMP) -r $@ | grep $(STUBCOPY_RELOC-y); then            \
 +              echo "$@: absolute symbol references not allowed in the EFI stub" >&2; \
 +              /bin/false;                                             \
 +      fi;                                                             \
 +      $(OBJCOPY) $(STUBCOPY_FLAGS-y) $< $@
  
  #
  # ARM discards the .data section because it disallows r/w data in the
diff --combined mm/kasan/Makefile
index 613dfe681e9fcd0b18907fc0dc3ddb55a0039eda,f06ee820d35602a54270a3a12677a2c15be1f16b..08b43de2383b7b844b55d13e5814594c5cf7ecec
@@@ -2,21 -2,18 +2,21 @@@
  KASAN_SANITIZE := n
  UBSAN_SANITIZE_common.o := n
  UBSAN_SANITIZE_generic.o := n
 +UBSAN_SANITIZE_generic_report.o := n
  UBSAN_SANITIZE_tags.o := n
  KCOV_INSTRUMENT := n
  
- CFLAGS_REMOVE_common.o = -pg
- CFLAGS_REMOVE_generic.o = -pg
- CFLAGS_REMOVE_generic_report.o = -pg
- CFLAGS_REMOVE_tags.o = -pg
+ CFLAGS_REMOVE_common.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_generic.o = $(CC_FLAGS_FTRACE)
++CFLAGS_REMOVE_generic_report.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE)
  
  # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
  # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
  
  CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
  CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
 +CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
  CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
  
  obj-$(CONFIG_KASAN) := common.o init.o report.o
diff --combined virt/kvm/arm/mmu.c
index a39dcfdbcc6527afc0ebc911cf0262238a2c131d,ad90ea3e5558a0911bf7602d67c2a16e6a7305f8..74b6582eaa3cffe8c19dcd5935811cb457a8dc10
@@@ -189,7 -189,7 +189,7 @@@ static void clear_stage2_pmd_entry(stru
        VM_BUG_ON(pmd_thp_or_huge(*pmd));
        pmd_clear(pmd);
        kvm_tlb_flush_vmid_ipa(kvm, addr);
-       pte_free_kernel(NULL, pte_table);
+       free_page((unsigned long)pte_table);
        put_page(virt_to_page(pmd));
  }
  
@@@ -1781,12 -1781,8 +1781,12 @@@ static int user_mem_abort(struct kvm_vc
                 * Only PMD_SIZE transparent hugepages(THP) are
                 * currently supported. This code will need to be
                 * updated to support other THP sizes.
 +               *
 +               * Make sure the host VA and the guest IPA are sufficiently
 +               * aligned and that the block is contained within the memslot.
                 */
 -              if (transparent_hugepage_adjust(&pfn, &fault_ipa))
 +              if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
 +                  transparent_hugepage_adjust(&pfn, &fault_ipa))
                        vma_pagesize = PMD_SIZE;
        }
  
This page took 0.147834 seconds and 4 git commands to generate.