]> Git Repo - linux.git/commitdiff
Merge branch 'linus' into locking/core, to resolve conflicts
authorIngo Molnar <[email protected]>
Tue, 7 Nov 2017 09:32:44 +0000 (10:32 +0100)
committerIngo Molnar <[email protected]>
Tue, 7 Nov 2017 09:32:44 +0000 (10:32 +0100)
Conflicts:
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler-intel.h
include/uapi/linux/stddef.h

Signed-off-by: Ingo Molnar <[email protected]>
96 files changed:
1  2 
arch/alpha/include/asm/atomic.h
arch/alpha/include/asm/rwsem.h
arch/alpha/include/asm/spinlock.h
arch/arc/kernel/smp.c
arch/arm/include/asm/spinlock.h
arch/ia64/include/asm/rwsem.h
arch/ia64/include/asm/spinlock.h
arch/m32r/include/asm/spinlock.h
arch/metag/include/asm/spinlock.h
arch/metag/include/asm/spinlock_lnkget.h
arch/metag/include/asm/spinlock_lock1.h
arch/mips/kernel/pm-cps.c
arch/parisc/include/asm/atomic.h
arch/parisc/include/asm/spinlock.h
arch/s390/include/asm/rwsem.h
arch/s390/include/asm/spinlock.h
arch/s390/lib/spinlock.c
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/ptrace.h
arch/sparc/include/asm/spinlock_32.h
arch/sparc/include/asm/spinlock_64.h
arch/um/include/shared/init.h
arch/x86/Kconfig
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/qspinlock.h
arch/x86/include/asm/rwsem.h
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/vgtod.h
arch/x86/kernel/ldt.c
arch/x86/xen/spinlock.c
drivers/md/dm-stats.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/tap.c
drivers/net/tun.c
drivers/usb/host/uhci-hcd.h
fs/crypto/keyinfo.c
fs/fcntl.c
fs/fs_pin.c
fs/namei.c
fs/ncpfs/dir.c
fs/overlayfs/readdir.c
fs/proc/array.c
fs/proc_namespace.c
fs/readdir.c
include/asm-generic/atomic-long.h
include/asm-generic/qrwlock_types.h
include/asm-generic/rwsem.h
include/linux/atomic.h
include/linux/average.h
include/linux/bitops.h
include/linux/compiler-clang.h
include/linux/compiler-gcc.h
include/linux/compiler-intel.h
include/linux/compiler.h
include/linux/completion.h
include/linux/dcache.h
include/linux/dynamic_queue_limits.h
include/linux/genetlink.h
include/linux/genhd.h
include/linux/huge_mm.h
include/linux/jump_label.h
include/linux/jump_label_ratelimit.h
include/linux/linkage.h
include/linux/lockdep.h
include/linux/netfilter/nfnetlink.h
include/linux/rculist.h
include/linux/rtnetlink.h
include/linux/rwsem.h
include/linux/spinlock.h
include/linux/workqueue.h
include/net/ip_vs.h
include/net/netfilter/nf_tables.h
include/uapi/linux/stddef.h
kernel/acct.c
kernel/events/core.c
kernel/locking/rwsem.c
kernel/locking/spinlock.c
kernel/seccomp.c
kernel/task_work.c
kernel/trace/trace.h
kernel/trace/trace_stack.c
kernel/workqueue.c
lib/assoc_array.c
lib/dynamic_queue_limits.c
mm/huge_memory.c
mm/slab.h
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/netfilter/ipvs/ip_vs_sync.c
scripts/headers_install.sh
sound/firewire/amdtp-stream.h
tools/arch/x86/include/asm/atomic.h
tools/include/asm-generic/atomic-gcc.h
tools/perf/util/session.h
tools/testing/selftests/rcutorture/formal/srcu-cbmc/src/barriers.h

index 16961a3f45ba3c83752e49f8cef8987428ec2c6b,85867d3cea649a093d90ff8c053475084ed3b9e0..767bfdd42992de7fb4084dfecb4caa6c13655129
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ALPHA_ATOMIC_H
  #define _ALPHA_ATOMIC_H
  
   * than regular operations.
   */
  
 +/*
 + * To ensure dependency ordering is preserved for the _relaxed and
 + * _release atomics, an smp_read_barrier_depends() is unconditionally
 + * inserted into the _relaxed variants, which are used to build the
 + * barriered versions. To avoid redundant back-to-back fences, we can
 + * define the _acquire and _fence versions explicitly.
 + */
 +#define __atomic_op_acquire(op, args...)      op##_relaxed(args)
 +#define __atomic_op_fence                     __atomic_op_release
  
  #define ATOMIC_INIT(i)                { (i) }
  #define ATOMIC64_INIT(i)      { (i) }
@@@ -69,7 -61,6 +70,7 @@@ static inline int atomic_##op##_return_
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
 +      smp_read_barrier_depends();                                     \
        return result;                                                  \
  }
  
@@@ -87,7 -78,6 +88,7 @@@ static inline int atomic_fetch_##op##_r
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
 +      smp_read_barrier_depends();                                     \
        return result;                                                  \
  }
  
@@@ -122,7 -112,6 +123,7 @@@ static __inline__ long atomic64_##op##_
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
 +      smp_read_barrier_depends();                                     \
        return result;                                                  \
  }
  
@@@ -140,7 -129,6 +141,7 @@@ static __inline__ long atomic64_fetch_#
        ".previous"                                                     \
        :"=&r" (temp), "=m" (v->counter), "=&r" (result)                \
        :"Ir" (i), "m" (v->counter) : "memory");                        \
 +      smp_read_barrier_depends();                                     \
        return result;                                                  \
  }
  
index 9624cb4cbf2fa388cf09ca2f76ba9e087560da67,3925f06afd6ba17d3a7dd4c7eab1751d24309a99..cf8fc8f9a2ed566067a3a67724f3a4a4389fab54
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ALPHA_RWSEM_H
  #define _ALPHA_RWSEM_H
  
@@@ -21,7 -22,7 +22,7 @@@
  #define RWSEM_ACTIVE_READ_BIAS                RWSEM_ACTIVE_BIAS
  #define RWSEM_ACTIVE_WRITE_BIAS               (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
  
 -static inline void __down_read(struct rw_semaphore *sem)
 +static inline int ___down_read(struct rw_semaphore *sem)
  {
        long oldcount;
  #ifndef       CONFIG_SMP
        :"=&r" (oldcount), "=m" (sem->count), "=&r" (temp)
        :"Ir" (RWSEM_ACTIVE_READ_BIAS), "m" (sem->count) : "memory");
  #endif
 -      if (unlikely(oldcount < 0))
 +      return (oldcount < 0);
 +}
 +
 +static inline void __down_read(struct rw_semaphore *sem)
 +{
 +      if (unlikely(___down_read(sem)))
                rwsem_down_read_failed(sem);
  }
  
 +static inline int __down_read_killable(struct rw_semaphore *sem)
 +{
 +      if (unlikely(___down_read(sem)))
 +              if (IS_ERR(rwsem_down_read_failed_killable(sem)))
 +                      return -EINTR;
 +
 +      return 0;
 +}
 +
  /*
   * trylock for reading -- returns 1 if successful, 0 if contention
   */
@@@ -108,10 -95,9 +109,10 @@@ static inline void __down_write(struct 
  
  static inline int __down_write_killable(struct rw_semaphore *sem)
  {
 -      if (unlikely(___down_write(sem)))
 +      if (unlikely(___down_write(sem))) {
                if (IS_ERR(rwsem_down_write_failed_killable(sem)))
                        return -EINTR;
 +      }
  
        return 0;
  }
index 3e2b4a05cb0f8da4acf605e7d850d7d181e73b17,aa4304afbea6468dc47492ec979d1361b01aefda..1221cbb86a6f6f5dd675ee1bd5c283ff09af4751
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ALPHA_SPINLOCK_H
  #define _ALPHA_SPINLOCK_H
  
@@@ -13,6 -14,7 +14,6 @@@
   * We make no fairness assumptions. They have a cost.
   */
  
 -#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  #define arch_spin_is_locked(x)        ((x)->lock != 0)
  
  static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
@@@ -53,6 -55,16 +54,6 @@@ static inline int arch_spin_trylock(arc
  
  /***********************************************************/
  
 -static inline int arch_read_can_lock(arch_rwlock_t *lock)
 -{
 -      return (lock->lock & 1) == 0;
 -}
 -
 -static inline int arch_write_can_lock(arch_rwlock_t *lock)
 -{
 -      return lock->lock == 0;
 -}
 -
  static inline void arch_read_lock(arch_rwlock_t *lock)
  {
        long regx;
@@@ -159,4 -171,7 +160,4 @@@ static inline void arch_write_unlock(ar
        lock->lock = 0;
  }
  
 -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -
  #endif /* _ALPHA_SPINLOCK_H */
diff --combined arch/arc/kernel/smp.c
index 94cabe73664bb0941cc379b6f643d3060a833c6a,6df9d94a953763eca43b20f02f1897308ab1ee7a..efe8b4200a676529a9f3f0af52d50faca176a1e3
@@@ -23,6 -23,8 +23,8 @@@
  #include <linux/cpumask.h>
  #include <linux/reboot.h>
  #include <linux/irqdomain.h>
+ #include <linux/export.h>
  #include <asm/processor.h>
  #include <asm/setup.h>
  #include <asm/mach_desc.h>
@@@ -30,6 -32,9 +32,9 @@@
  #ifndef CONFIG_ARC_HAS_LLSC
  arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+ EXPORT_SYMBOL_GPL(smp_atomic_ops_lock);
+ EXPORT_SYMBOL_GPL(smp_bitops_lock);
  #endif
  
  struct plat_smp_ops  __weak plat_smp_ops;
@@@ -245,7 -250,7 +250,7 @@@ static void ipi_send_msg_one(int cpu, e
         * and read back old value
         */
        do {
 -              new = old = ACCESS_ONCE(*ipi_data_ptr);
 +              new = old = READ_ONCE(*ipi_data_ptr);
                new |= 1U << msg;
        } while (cmpxchg(ipi_data_ptr, old, new) != old);
  
index 77f50ae0aeb47826752e630f6e011ba8a72d8ce3,25cb465c8538b22cc68de7cdd39e2852a8382f16..099c78fcf62d43cd0a123b4d520d44a5d853a813
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __ASM_SPINLOCK_H
  #define __ASM_SPINLOCK_H
  
@@@ -52,6 -53,8 +53,6 @@@ static inline void dsb_sev(void
   * memory.
   */
  
 -#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 -
  static inline void arch_spin_lock(arch_spinlock_t *lock)
  {
        unsigned long tmp;
@@@ -71,7 -74,7 +72,7 @@@
  
        while (lockval.tickets.next != lockval.tickets.owner) {
                wfe();
 -              lockval.tickets.owner = ACCESS_ONCE(lock->tickets.owner);
 +              lockval.tickets.owner = READ_ONCE(lock->tickets.owner);
        }
  
        smp_mb();
@@@ -191,6 -194,9 +192,6 @@@ static inline void arch_write_unlock(ar
        dsb_sev();
  }
  
 -/* write_can_lock - would write_trylock() succeed? */
 -#define arch_write_can_lock(x)                (ACCESS_ONCE((x)->lock) == 0)
 -
  /*
   * Read locks are a bit more hairy:
   *  - Exclusively load the lock value.
@@@ -268,4 -274,14 +269,4 @@@ static inline int arch_read_trylock(arc
        }
  }
  
 -/* read_can_lock - would read_trylock() succeed? */
 -#define arch_read_can_lock(x)         (ACCESS_ONCE((x)->lock) < 0x80000000)
 -
 -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -
 -#define arch_spin_relax(lock) cpu_relax()
 -#define arch_read_relax(lock) cpu_relax()
 -#define arch_write_relax(lock)        cpu_relax()
 -
  #endif /* __ASM_SPINLOCK_H */
index 49f7db0a1bcdf438c75fd488ec5ba2e3f6a8a990,7d6fceb3d5671c12e5e69330cfa1f79382e31673..917910607e0ea94a5bb349540d5d6cdbaec79de8
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /*
   * R/W semaphores for ia64
   *
  /*
   * lock for reading
   */
 -static inline void
 -__down_read (struct rw_semaphore *sem)
 +static inline int
 +___down_read (struct rw_semaphore *sem)
  {
        long result = ia64_fetchadd8_acq((unsigned long *)&sem->count.counter, 1);
  
 -      if (result < 0)
 +      return (result < 0);
 +}
 +
 +static inline void
 +__down_read (struct rw_semaphore *sem)
 +{
 +      if (___down_read(sem))
                rwsem_down_read_failed(sem);
  }
  
 +static inline int
 +__down_read_killable (struct rw_semaphore *sem)
 +{
 +      if (___down_read(sem))
 +              if (IS_ERR(rwsem_down_read_failed_killable(sem)))
 +                      return -EINTR;
 +
 +      return 0;
 +}
 +
  /*
   * lock for writing
   */
@@@ -88,10 -73,9 +89,10 @@@ __down_write (struct rw_semaphore *sem
  static inline int
  __down_write_killable (struct rw_semaphore *sem)
  {
 -      if (___down_write(sem))
 +      if (___down_write(sem)) {
                if (IS_ERR(rwsem_down_write_failed_killable(sem)))
                        return -EINTR;
 +      }
  
        return 0;
  }
index e98775be112d5dc237d016279ac1e1fd076d86c5,aa057abd948ec8eb01447e978e50f29616b0cd8a..afd0b3121b4c9565cd5c20d85e813e92fea50039
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_IA64_SPINLOCK_H
  #define _ASM_IA64_SPINLOCK_H
  
@@@ -61,7 -62,7 +62,7 @@@ static __always_inline void __ticket_sp
  
  static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
  {
 -      int tmp = ACCESS_ONCE(lock->lock);
 +      int tmp = READ_ONCE(lock->lock);
  
        if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
                return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
@@@ -73,19 -74,19 +74,19 @@@ static __always_inline void __ticket_sp
        unsigned short  *p = (unsigned short *)&lock->lock + 1, tmp;
  
        asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
 -      ACCESS_ONCE(*p) = (tmp + 2) & ~1;
 +      WRITE_ONCE(*p, (tmp + 2) & ~1);
  }
  
  static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
  {
 -      long tmp = ACCESS_ONCE(lock->lock);
 +      long tmp = READ_ONCE(lock->lock);
  
        return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
  }
  
  static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
  {
 -      long tmp = ACCESS_ONCE(lock->lock);
 +      long tmp = READ_ONCE(lock->lock);
  
        return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
  }
@@@ -126,7 -127,9 +127,7 @@@ static __always_inline void arch_spin_l
  {
        arch_spin_lock(lock);
  }
 -
 -#define arch_read_can_lock(rw)                (*(volatile int *)(rw) >= 0)
 -#define arch_write_can_lock(rw)       (*(volatile int *)(rw) == 0)
 +#define arch_spin_lock_flags  arch_spin_lock_flags
  
  #ifdef ASM_SUPPORTED
  
@@@ -154,7 -157,6 +155,7 @@@ arch_read_lock_flags(arch_rwlock_t *loc
                : "p6", "p7", "r2", "memory");
  }
  
 +#define arch_read_lock_flags arch_read_lock_flags
  #define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
  
  #else /* !ASM_SUPPORTED */
@@@ -207,7 -209,6 +208,7 @@@ arch_write_lock_flags(arch_rwlock_t *lo
                : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
  }
  
 +#define arch_write_lock_flags arch_write_lock_flags
  #define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
  
  #define arch_write_trylock(rw)                                                        \
@@@ -231,6 -232,8 +232,6 @@@ static inline void arch_write_unlock(ar
  
  #else /* !ASM_SUPPORTED */
  
 -#define arch_write_lock_flags(l, flags) arch_write_lock(l)
 -
  #define arch_write_lock(l)                                                            \
  ({                                                                                    \
        __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1);                       \
@@@ -270,4 -273,8 +271,4 @@@ static inline int arch_read_trylock(arc
        return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
  }
  
 -#define arch_spin_relax(lock) cpu_relax()
 -#define arch_read_relax(lock) cpu_relax()
 -#define arch_write_relax(lock)        cpu_relax()
 -
  #endif /*  _ASM_IA64_SPINLOCK_H */
index 882203db8723347d07b5ac99ddc7f050bb5f031f,604af84427ff3c3ea5e03469edfc1fb147dae723..0189f410f8f58901f1c53f7c5e54a978a9abefa2
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_M32R_SPINLOCK_H
  #define _ASM_M32R_SPINLOCK_H
  
@@@ -28,6 -29,7 +29,6 @@@
   */
  
  #define arch_spin_is_locked(x)                (*(volatile int *)(&(x)->slock) <= 0)
 -#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
  
  /**
   * arch_spin_trylock - Try spin lock and return a result
@@@ -136,6 -138,18 +137,6 @@@ static inline void arch_spin_unlock(arc
   * semaphore.h for details.  -ben
   */
  
 -/**
 - * read_can_lock - would read_trylock() succeed?
 - * @lock: the rwlock in question.
 - */
 -#define arch_read_can_lock(x) ((int)(x)->lock > 0)
 -
 -/**
 - * write_can_lock - would write_trylock() succeed?
 - * @lock: the rwlock in question.
 - */
 -#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
 -
  static inline void arch_read_lock(arch_rwlock_t *rw)
  {
        unsigned long tmp0, tmp1;
@@@ -304,4 -318,11 +305,4 @@@ static inline int arch_write_trylock(ar
        return 0;
  }
  
 -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -
 -#define arch_spin_relax(lock) cpu_relax()
 -#define arch_read_relax(lock) cpu_relax()
 -#define arch_write_relax(lock)        cpu_relax()
 -
  #endif        /* _ASM_M32R_SPINLOCK_H */
index 80e3e59172f25f052f18bbb39c67863837655ae8,349938c35f2dde38776610a0832bfdc8db0922aa..4497c232d9c1a311fb4d60bc403b05ed84557c00
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __ASM_SPINLOCK_H
  #define __ASM_SPINLOCK_H
  
   * locked.
   */
  
 -#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 -
 -#define       arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 -#define       arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -
 -#define arch_spin_relax(lock) cpu_relax()
 -#define arch_read_relax(lock) cpu_relax()
 -#define arch_write_relax(lock)        cpu_relax()
 -
  #endif /* __ASM_SPINLOCK_H */
index 5708ac0a9d091ab9bcc952a4528c9884a684933d,029935560b7f9d44c14899aebea028f0604a0f58..dfd780eab350420b803d11eb2061b13f9686aaa8
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __ASM_SPINLOCK_LNKGET_H
  #define __ASM_SPINLOCK_LNKGET_H
  
@@@ -136,6 -137,21 +137,6 @@@ static inline void arch_write_unlock(ar
                      : "memory");
  }
  
 -/* write_can_lock - would write_trylock() succeed? */
 -static inline int arch_write_can_lock(arch_rwlock_t *rw)
 -{
 -      int ret;
 -
 -      asm volatile ("LNKGETD  %0, [%1]\n"
 -                    "CMP      %0, #0\n"
 -                    "MOV      %0, #1\n"
 -                    "XORNZ     %0, %0, %0\n"
 -                    : "=&d" (ret)
 -                    : "da" (&rw->lock)
 -                    : "cc");
 -      return ret;
 -}
 -
  /*
   * Read locks are a bit more hairy:
   *  - Exclusively load the lock value.
@@@ -209,4 -225,26 +210,4 @@@ static inline int arch_read_trylock(arc
        return tmp;
  }
  
 -/* read_can_lock - would read_trylock() succeed? */
 -static inline int arch_read_can_lock(arch_rwlock_t *rw)
 -{
 -      int tmp;
 -
 -      asm volatile ("LNKGETD  %0, [%1]\n"
 -                    "CMP      %0, %2\n"
 -                    "MOV      %0, #1\n"
 -                    "XORZ     %0, %0, %0\n"
 -                    : "=&d" (tmp)
 -                    : "da" (&rw->lock), "bd" (0x80000000)
 -                    : "cc");
 -      return tmp;
 -}
 -
 -#define       arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 -#define       arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -
 -#define arch_spin_relax(lock) cpu_relax()
 -#define arch_read_relax(lock) cpu_relax()
 -#define arch_write_relax(lock)        cpu_relax()
 -
  #endif /* __ASM_SPINLOCK_LNKGET_H */
index 8ae12bfc8ad8a7d028ff2af43be245caa6843e14,12de9862d19028c7a88dcc789a730960bd2d7c63..c0bd81bbe18c047b7e61bd82cfb5968f7e8fe4ce
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __ASM_SPINLOCK_LOCK1_H
  #define __ASM_SPINLOCK_LOCK1_H
  
@@@ -104,6 -105,16 +105,6 @@@ static inline void arch_write_unlock(ar
        rw->lock = 0;
  }
  
 -/* write_can_lock - would write_trylock() succeed? */
 -static inline int arch_write_can_lock(arch_rwlock_t *rw)
 -{
 -      unsigned int ret;
 -
 -      barrier();
 -      ret = rw->lock;
 -      return (ret == 0);
 -}
 -
  /*
   * Read locks are a bit more hairy:
   *  - Exclusively load the lock value.
@@@ -161,4 -172,14 +162,4 @@@ static inline int arch_read_trylock(arc
        return (ret < 0x80000000);
  }
  
 -/* read_can_lock - would read_trylock() succeed? */
 -static inline int arch_read_can_lock(arch_rwlock_t *rw)
 -{
 -      unsigned int ret;
 -
 -      barrier();
 -      ret = rw->lock;
 -      return (ret < 0x80000000);
 -}
 -
  #endif /* __ASM_SPINLOCK_LOCK1_H */
index 1d2996cd58daec79fc7d05dbeafb0c5c353d0cd4,9dd624c2fe567e9f4ad41f414704c98e265df1a1..421e06dfee728a973452c11e8b51a128efaae032
@@@ -1,6 -1,6 +1,6 @@@
  /*
   * Copyright (C) 2014 Imagination Technologies
-  * Author: Paul Burton <paul.burton@imgtec.com>
+  * Author: Paul Burton <paul.burton@mips.com>
   *
   * This program is free software; you can redistribute it and/or modify it
   * under the terms of the GNU General Public License as published by the
@@@ -166,7 -166,7 +166,7 @@@ int cps_pm_enter_state(enum cps_pm_stat
        nc_core_ready_count = nc_addr;
  
        /* Ensure ready_count is zero-initialised before the assembly runs */
 -      ACCESS_ONCE(*nc_core_ready_count) = 0;
 +      WRITE_ONCE(*nc_core_ready_count, 0);
        coupled_barrier(&per_cpu(pm_barrier, core), online);
  
        /* Run the generated entry code */
index c57d4e8307f2df7a8c0c00d84d539ab554b032d3,bc54addd589f69daf4fb7aae0c23d47ea5c631a1..88bae6676c9b6ef3823f6a8590882d43b0d83b22
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /* Copyright (C) 2000 Philipp Rumpf <[email protected]>
   * Copyright (C) 2006 Kyle McMartin <[email protected]>
   */
@@@ -260,7 -261,7 +261,7 @@@ atomic64_set(atomic64_t *v, s64 i
  static __inline__ s64
  atomic64_read(const atomic64_t *v)
  {
 -      return ACCESS_ONCE((v)->counter);
 +      return READ_ONCE((v)->counter);
  }
  
  #define atomic64_inc(v)               (atomic64_add(   1,(v)))
index d66d7b1efc4e9eb28317893d61b4b1062be3192f,af03359e6ac5685d6fa93361b5a0f4f4817a8cae..6f84b6acc86ed1e291b70818b7715400e0f7b7e6
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __ASM_SPINLOCK_H
  #define __ASM_SPINLOCK_H
  
@@@ -31,7 -32,6 +32,7 @@@ static inline void arch_spin_lock_flags
                                cpu_relax();
        mb();
  }
 +#define arch_spin_lock_flags arch_spin_lock_flags
  
  static inline void arch_spin_unlock(arch_spinlock_t *x)
  {
@@@ -169,4 -169,25 +170,4 @@@ static __inline__ int arch_write_tryloc
        return result;
  }
  
 -/*
 - * read_can_lock - would read_trylock() succeed?
 - * @lock: the rwlock in question.
 - */
 -static __inline__ int arch_read_can_lock(arch_rwlock_t *rw)
 -{
 -      return rw->counter >= 0;
 -}
 -
 -/*
 - * write_can_lock - would write_trylock() succeed?
 - * @lock: the rwlock in question.
 - */
 -static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
 -{
 -      return !rw->counter;
 -}
 -
 -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -
  #endif /* __ASM_SPINLOCK_H */
index fda9481dec5c1e4c4aa5cb0db366c970e143dbbf,f731b7b518bd1b979ef4da5141119b1bb4dae2a6..3f4198793b79f8c33be41a244148327c2ef4d849
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _S390_RWSEM_H
  #define _S390_RWSEM_H
  
@@@ -49,7 -50,7 +50,7 @@@
  /*
   * lock for reading
   */
 -static inline void __down_read(struct rw_semaphore *sem)
 +static inline int ___down_read(struct rw_semaphore *sem)
  {
        signed long old, new;
  
                : "=&d" (old), "=&d" (new), "=Q" (sem->count)
                : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
                : "cc", "memory");
 -      if (old < 0)
 +      return (old < 0);
 +}
 +
 +static inline void __down_read(struct rw_semaphore *sem)
 +{
 +      if (___down_read(sem))
                rwsem_down_read_failed(sem);
  }
  
 +static inline int __down_read_killable(struct rw_semaphore *sem)
 +{
 +      if (___down_read(sem)) {
 +              if (IS_ERR(rwsem_down_read_failed_killable(sem)))
 +                      return -EINTR;
 +      }
 +
 +      return 0;
 +}
 +
  /*
   * trylock for reading -- returns 1 if successful, 0 if contention
   */
index 66f4160010efe8869da8eb16ce8030dd08598363,f3f5e0155b10721d175f4c4fb0ac80427e665e37..d97175e16d9b48aaca6857ad05433fcfc0edab6f
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /*
   *  S390 version
   *    Copyright IBM Corp. 1999
@@@ -45,7 -46,6 +46,7 @@@ static inline void arch_spin_relax(arch
  {
        arch_lock_relax(lock->lock);
  }
 +#define arch_spin_relax               arch_spin_relax
  
  static inline u32 arch_spin_lockval(int cpu)
  {
@@@ -81,7 -81,6 +82,7 @@@ static inline void arch_spin_lock_flags
        if (!arch_spin_trylock_once(lp))
                arch_spin_lock_wait_flags(lp, flags);
  }
 +#define arch_spin_lock_flags  arch_spin_lock_flags
  
  static inline int arch_spin_trylock(arch_spinlock_t *lp)
  {
@@@ -112,19 -111,34 +113,19 @@@ static inline void arch_spin_unlock(arc
   * read-locks.
   */
  
 -/**
 - * read_can_lock - would read_trylock() succeed?
 - * @lock: the rwlock in question.
 - */
 -#define arch_read_can_lock(x) ((int)(x)->lock >= 0)
 -
 -/**
 - * write_can_lock - would write_trylock() succeed?
 - * @lock: the rwlock in question.
 - */
 -#define arch_write_can_lock(x) ((x)->lock == 0)
 -
  extern int _raw_read_trylock_retry(arch_rwlock_t *lp);
  extern int _raw_write_trylock_retry(arch_rwlock_t *lp);
  
 -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -
  static inline int arch_read_trylock_once(arch_rwlock_t *rw)
  {
 -      int old = ACCESS_ONCE(rw->lock);
 +      int old = READ_ONCE(rw->lock);
        return likely(old >= 0 &&
                      __atomic_cmpxchg_bool(&rw->lock, old, old + 1));
  }
  
  static inline int arch_write_trylock_once(arch_rwlock_t *rw)
  {
 -      int old = ACCESS_ONCE(rw->lock);
 +      int old = READ_ONCE(rw->lock);
        return likely(old == 0 &&
                      __atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000));
  }
@@@ -211,7 -225,7 +212,7 @@@ static inline void arch_read_unlock(arc
        int old;
  
        do {
 -              old = ACCESS_ONCE(rw->lock);
 +              old = READ_ONCE(rw->lock);
        } while (!__atomic_cmpxchg_bool(&rw->lock, old, old - 1));
  }
  
@@@ -255,12 -269,10 +256,12 @@@ static inline void arch_read_relax(arch
  {
        arch_lock_relax(rw->owner);
  }
 +#define arch_read_relax               arch_read_relax
  
  static inline void arch_write_relax(arch_rwlock_t *rw)
  {
        arch_lock_relax(rw->owner);
  }
 +#define arch_write_relax      arch_write_relax
  
  #endif /* __ASM_SPINLOCK_H */
diff --combined arch/s390/lib/spinlock.c
index 34e30b9ea234e4ad8fbdc5ff9a393934391538db,1dc85f552f4817fb7187b800155db64bfe958789..2d0af866769566125a0e08cb70a495bb7f003961
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *    Out of line spinlock code.
   *
@@@ -162,8 -163,8 +163,8 @@@ void _raw_read_lock_wait(arch_rwlock_t 
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
 -              old = ACCESS_ONCE(rw->lock);
 -              owner = ACCESS_ONCE(rw->owner);
 +              old = READ_ONCE(rw->lock);
 +              owner = READ_ONCE(rw->owner);
                if (old < 0)
                        continue;
                if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
@@@ -178,7 -179,7 +179,7 @@@ int _raw_read_trylock_retry(arch_rwlock
        int old;
  
        while (count-- > 0) {
 -              old = ACCESS_ONCE(rw->lock);
 +              old = READ_ONCE(rw->lock);
                if (old < 0)
                        continue;
                if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
@@@ -202,8 -203,8 +203,8 @@@ void _raw_write_lock_wait(arch_rwlock_
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
 -              old = ACCESS_ONCE(rw->lock);
 -              owner = ACCESS_ONCE(rw->owner);
 +              old = READ_ONCE(rw->lock);
 +              owner = READ_ONCE(rw->owner);
                smp_mb();
                if (old >= 0) {
                        prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
@@@ -230,8 -231,8 +231,8 @@@ void _raw_write_lock_wait(arch_rwlock_
                                smp_yield_cpu(~owner);
                        count = spin_retry;
                }
 -              old = ACCESS_ONCE(rw->lock);
 -              owner = ACCESS_ONCE(rw->owner);
 +              old = READ_ONCE(rw->lock);
 +              owner = READ_ONCE(rw->owner);
                if (old >= 0 &&
                    __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
                        prev = old;
@@@ -251,7 -252,7 +252,7 @@@ int _raw_write_trylock_retry(arch_rwloc
        int old;
  
        while (count-- > 0) {
 -              old = ACCESS_ONCE(rw->lock);
 +              old = READ_ONCE(rw->lock);
                if (old)
                        continue;
                if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
index e2f398e9456cca29f2be5dface07a93372f9e97d,0c3b3b4a99633e7d3500c19c07393cc1b4633cdc..d13ce517f4b9946382579a7a4d5e9bbccdb6332e
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /* atomic.h: These still suck, but the I-cache hit rate is higher.
   *
   * Copyright (C) 1996 David S. Miller ([email protected])
@@@ -31,7 -32,7 +32,7 @@@ void atomic_set(atomic_t *, int)
  
  #define atomic_set_release(v, i)      atomic_set((v), (i))
  
 -#define atomic_read(v)          ACCESS_ONCE((v)->counter)
 +#define atomic_read(v)          READ_ONCE((v)->counter)
  
  #define atomic_add(i, v)      ((void)atomic_add_return( (int)(i), (v)))
  #define atomic_sub(i, v)      ((void)atomic_add_return(-(int)(i), (v)))
index b383484edcd36a06863fe5718b5d8c8e574fade7,6a339a78f4f42b442bfd40b8353356223ee831cb..71dd82b43cc57d84847a87bb315b6c2e19cda4e5
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __SPARC_PTRACE_H
  #define __SPARC_PTRACE_H
  
@@@ -6,7 -7,6 +7,7 @@@
  #if defined(__sparc__) && defined(__arch64__)
  #ifndef __ASSEMBLY__
  
 +#include <linux/compiler.h>
  #include <linux/threads.h>
  #include <asm/switch_to.h>
  
index 12bf857b471e1108309fb8d80f5cae85991c65f1,26f00ac2b4700a41ba525cc2568367fb84813b23..bc5aa6f61676430890feec03b296725afe664893
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /* spinlock.h: 32-bit Sparc spinlock support.
   *
   * Copyright (C) 1997 David S. Miller ([email protected])
@@@ -182,6 -183,17 +183,6 @@@ static inline int __arch_read_trylock(a
        res; \
  })
  
 -#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
 -#define arch_read_lock_flags(rw, flags)   arch_read_lock(rw)
 -#define arch_write_lock_flags(rw, flags)  arch_write_lock(rw)
 -
 -#define arch_spin_relax(lock) cpu_relax()
 -#define arch_read_relax(lock) cpu_relax()
 -#define arch_write_relax(lock)        cpu_relax()
 -
 -#define arch_read_can_lock(rw) (!((rw)->lock & 0xff))
 -#define arch_write_can_lock(rw) (!(rw)->lock)
 -
  #endif /* !(__ASSEMBLY__) */
  
  #endif /* __SPARC_SPINLOCK_H */
index 99b6e1c4f6300c1cf8fbf2a39969d87735c1a8ec,4822a7e94a30b839a09cf8a9bf4b2263611ff6fd..7fc82a233f4957c97bf8f0913a43bacd04a7eefc
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /* spinlock.h: 64-bit Sparc spinlock support.
   *
   * Copyright (C) 1997 David S. Miller ([email protected])
  #include <asm/qrwlock.h>
  #include <asm/qspinlock.h>
  
 -#define arch_read_lock_flags(p, f) arch_read_lock(p)
 -#define arch_write_lock_flags(p, f) arch_write_lock(p)
 -
 -#define arch_spin_relax(lock) cpu_relax()
 -#define arch_read_relax(lock) cpu_relax()
 -#define arch_write_relax(lock)        cpu_relax()
 -
  #endif /* !(__ASSEMBLY__) */
  
  #endif /* !(__SPARC64_SPINLOCK_H) */
index 094e96ce653b7fa4a6bebf4d12a5a1674f8b7819,390572daa40de1605f3f3ff65f8881fa0bfe235b..b3f5865a92c911b8ad82ff8ce1d4ec143d175296
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_UML_INIT_H
  #define _LINUX_UML_INIT_H
  
@@@ -40,7 -41,7 +41,7 @@@
  typedef int (*initcall_t)(void);
  typedef void (*exitcall_t)(void);
  
 -#include <linux/compiler.h>
 +#include <linux/compiler_types.h>
  
  /* These are for everybody (although not all archs will actually
     discard it in modules) */
diff --combined arch/x86/Kconfig
index 90535646b83d83c06e0278fd6103c4c0582c9e78,2fdb23313dd55fa2d08fee3e15c47bde6ef632ac..9bceea6a5852ed100112d5cb2710d93459ca821b
@@@ -1,3 -1,4 +1,4 @@@
+ # SPDX-License-Identifier: GPL-2.0
  # Select 32 or 64 bit
  config 64BIT
        bool "64-bit kernel" if ARCH = "x86"
@@@ -55,7 -56,7 +56,7 @@@ config X8
        select ARCH_HAS_KCOV                    if X86_64
        select ARCH_HAS_PMEM_API                if X86_64
        # Causing hangs/crashes, see the commit that added this change for details.
 -      select ARCH_HAS_REFCOUNT                if BROKEN
 +      select ARCH_HAS_REFCOUNT
        select ARCH_HAS_UACCESS_FLUSHCACHE      if X86_64
        select ARCH_HAS_SET_MEMORY
        select ARCH_HAS_SG_CHAIN
index efc530642f7d5d88d481fc507a7ff213e4581ec5,6699fc441644197608290d18659bc8d389b501c4..6d16d15d09a0daed96a1e3d670b6203d1779b98e
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_MMU_CONTEXT_H
  #define _ASM_X86_MMU_CONTEXT_H
  
@@@ -72,8 -73,8 +73,8 @@@ static inline void load_mm_ldt(struct m
  #ifdef CONFIG_MODIFY_LDT_SYSCALL
        struct ldt_struct *ldt;
  
 -      /* lockless_dereference synchronizes with smp_store_release */
 -      ldt = lockless_dereference(mm->context.ldt);
 +      /* READ_ONCE synchronizes with smp_store_release */
 +      ldt = READ_ONCE(mm->context.ldt);
  
        /*
         * Any change to mm->context.ldt is followed by an IPI to all
index 308dfd0714c713334bc8a6c432e84f31b927573a,9982dd96f093c3e924447c9750274d922381f2af..5e16b5d40d32b7667fb49f8c697f2b2443484151
@@@ -1,7 -1,7 +1,8 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_QSPINLOCK_H
  #define _ASM_X86_QSPINLOCK_H
  
 +#include <linux/jump_label.h>
  #include <asm/cpufeature.h>
  #include <asm-generic/qspinlock_types.h>
  #include <asm/paravirt.h>
@@@ -47,14 -47,10 +48,14 @@@ static inline void queued_spin_unlock(s
  #endif
  
  #ifdef CONFIG_PARAVIRT
 +DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
 +
 +void native_pv_lock_init(void) __init;
 +
  #define virt_spin_lock virt_spin_lock
  static inline bool virt_spin_lock(struct qspinlock *lock)
  {
 -      if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
 +      if (!static_branch_likely(&virt_spin_lock_key))
                return false;
  
        /*
  
        return true;
  }
 +#else
 +static inline void native_pv_lock_init(void)
 +{
 +}
  #endif /* CONFIG_PARAVIRT */
  
  #include <asm-generic/qspinlock.h>
index 1e51195c0b63fc66db39659cab67db28918eae51,4d38d85a16ada01ef65671cb47c220d268fa69a0..4c25cf6caefa1b64e732a6c1d5d7a6636ebd6a62
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /* rwsem.h: R/W semaphores implemented using XADD/CMPXCHG for i486+
   *
   * Written by David Howells ([email protected]).
  /*
   * lock for reading
   */
 +#define ____down_read(sem, slow_path)                                 \
 +({                                                                    \
 +      struct rw_semaphore* ret;                                       \
 +      asm volatile("# beginning down_read\n\t"                        \
 +                   LOCK_PREFIX _ASM_INC "(%[sem])\n\t"                \
 +                   /* adds 0x00000001 */                              \
 +                   "  jns        1f\n"                                \
 +                   "  call " slow_path "\n"                           \
 +                   "1:\n\t"                                           \
 +                   "# ending down_read\n\t"                           \
 +                   : "+m" (sem->count), "=a" (ret),                   \
 +                      ASM_CALL_CONSTRAINT                             \
 +                   : [sem] "a" (sem)                                  \
 +                   : "memory", "cc");                                 \
 +      ret;                                                            \
 +})
 +
  static inline void __down_read(struct rw_semaphore *sem)
  {
 -      asm volatile("# beginning down_read\n\t"
 -                   LOCK_PREFIX _ASM_INC "(%1)\n\t"
 -                   /* adds 0x00000001 */
 -                   "  jns        1f\n"
 -                   "  call call_rwsem_down_read_failed\n"
 -                   "1:\n\t"
 -                   "# ending down_read\n\t"
 -                   : "+m" (sem->count)
 -                   : "a" (sem)
 -                   : "memory", "cc");
 +      ____down_read(sem, "call_rwsem_down_read_failed");
 +}
 +
 +static inline int __down_read_killable(struct rw_semaphore *sem)
 +{
 +      if (IS_ERR(____down_read(sem, "call_rwsem_down_read_failed_killable")))
 +              return -EINTR;
 +      return 0;
  }
  
  /*
@@@ -96,18 -82,17 +97,18 @@@ static inline bool __down_read_trylock(
  {
        long result, tmp;
        asm volatile("# beginning __down_read_trylock\n\t"
 -                   "  mov          %0,%1\n\t"
 +                   "  mov          %[count],%[result]\n\t"
                     "1:\n\t"
 -                   "  mov          %1,%2\n\t"
 -                   "  add          %3,%2\n\t"
 +                   "  mov          %[result],%[tmp]\n\t"
 +                   "  add          %[inc],%[tmp]\n\t"
                     "  jle          2f\n\t"
 -                   LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
 +                   LOCK_PREFIX "  cmpxchg  %[tmp],%[count]\n\t"
                     "  jnz          1b\n\t"
                     "2:\n\t"
                     "# ending __down_read_trylock\n\t"
 -                   : "+m" (sem->count), "=&a" (result), "=&r" (tmp)
 -                   : "i" (RWSEM_ACTIVE_READ_BIAS)
 +                   : [count] "+m" (sem->count), [result] "=&a" (result),
 +                     [tmp] "=&r" (tmp)
 +                   : [inc] "i" (RWSEM_ACTIVE_READ_BIAS)
                     : "memory", "cc");
        return result >= 0;
  }
        struct rw_semaphore* ret;                       \
                                                        \
        asm volatile("# beginning down_write\n\t"       \
 -                   LOCK_PREFIX "  xadd      %1,(%4)\n\t"      \
 +                   LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"      \
                     /* adds 0xffff0001, returns the old value */ \
                     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t" \
                     /* was the active mask 0 before? */\
                     "  call " slow_path "\n"           \
                     "1:\n"                             \
                     "# ending down_write"              \
 -                   : "+m" (sem->count), "=d" (tmp),   \
 +                   : "+m" (sem->count), [tmp] "=d" (tmp),     \
                       "=a" (ret), ASM_CALL_CONSTRAINT  \
 -                   : "a" (sem), "1" (RWSEM_ACTIVE_WRITE_BIAS) \
 +                   : [sem] "a" (sem), "[tmp]" (RWSEM_ACTIVE_WRITE_BIAS) \
                     : "memory", "cc");                 \
        ret;                                            \
  })
@@@ -157,21 -142,21 +158,21 @@@ static inline bool __down_write_trylock
        bool result;
        long tmp0, tmp1;
        asm volatile("# beginning __down_write_trylock\n\t"
 -                   "  mov          %0,%1\n\t"
 +                   "  mov          %[count],%[tmp0]\n\t"
                     "1:\n\t"
                     "  test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
                     /* was the active mask 0 before? */
                     "  jnz          2f\n\t"
 -                   "  mov          %1,%2\n\t"
 -                   "  add          %4,%2\n\t"
 -                   LOCK_PREFIX "  cmpxchg  %2,%0\n\t"
 +                   "  mov          %[tmp0],%[tmp1]\n\t"
 +                   "  add          %[inc],%[tmp1]\n\t"
 +                   LOCK_PREFIX "  cmpxchg  %[tmp1],%[count]\n\t"
                     "  jnz          1b\n\t"
                     "2:\n\t"
                     CC_SET(e)
                     "# ending __down_write_trylock\n\t"
 -                   : "+m" (sem->count), "=&a" (tmp0), "=&r" (tmp1),
 -                     CC_OUT(e) (result)
 -                   : "er" (RWSEM_ACTIVE_WRITE_BIAS)
 +                   : [count] "+m" (sem->count), [tmp0] "=&a" (tmp0),
 +                     [tmp1] "=&r" (tmp1), CC_OUT(e) (result)
 +                   : [inc] "er" (RWSEM_ACTIVE_WRITE_BIAS)
                     : "memory");
        return result;
  }
@@@ -183,14 -168,14 +184,14 @@@ static inline void __up_read(struct rw_
  {
        long tmp;
        asm volatile("# beginning __up_read\n\t"
 -                   LOCK_PREFIX "  xadd      %1,(%2)\n\t"
 +                   LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"
                     /* subtracts 1, returns the old value */
                     "  jns        1f\n\t"
                     "  call call_rwsem_wake\n" /* expects old value in %edx */
                     "1:\n"
                     "# ending __up_read\n"
 -                   : "+m" (sem->count), "=d" (tmp)
 -                   : "a" (sem), "1" (-RWSEM_ACTIVE_READ_BIAS)
 +                   : "+m" (sem->count), [tmp] "=d" (tmp)
 +                   : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_READ_BIAS)
                     : "memory", "cc");
  }
  
@@@ -201,14 -186,14 +202,14 @@@ static inline void __up_write(struct rw
  {
        long tmp;
        asm volatile("# beginning __up_write\n\t"
 -                   LOCK_PREFIX "  xadd      %1,(%2)\n\t"
 +                   LOCK_PREFIX "  xadd      %[tmp],(%[sem])\n\t"
                     /* subtracts 0xffff0001, returns the old value */
                     "  jns        1f\n\t"
                     "  call call_rwsem_wake\n" /* expects old value in %edx */
                     "1:\n\t"
                     "# ending __up_write\n"
 -                   : "+m" (sem->count), "=d" (tmp)
 -                   : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
 +                   : "+m" (sem->count), [tmp] "=d" (tmp)
 +                   : [sem] "a" (sem), "[tmp]" (-RWSEM_ACTIVE_WRITE_BIAS)
                     : "memory", "cc");
  }
  
  static inline void __downgrade_write(struct rw_semaphore *sem)
  {
        asm volatile("# beginning __downgrade_write\n\t"
 -                   LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
 +                   LOCK_PREFIX _ASM_ADD "%[inc],(%[sem])\n\t"
                     /*
                      * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
                      *     0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
                     "1:\n\t"
                     "# ending __downgrade_write\n"
                     : "+m" (sem->count)
 -                   : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
 +                   : [sem] "a" (sem), [inc] "er" (-RWSEM_WAITING_BIAS)
                     : "memory", "cc");
  }
  
index c6a6adf0a5c5a1c23d09e198e650b79981328bde,b34625796eb2cf1f6d067a13e2a1f128e2fbb376..5b6bc7016c223e7496fc2645ae8b3434ef83b96b
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_SPINLOCK_H
  #define _ASM_X86_SPINLOCK_H
  
  
  #include <asm/qrwlock.h>
  
 -#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
 -#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
 -
 -#define arch_spin_relax(lock) cpu_relax()
 -#define arch_read_relax(lock) cpu_relax()
 -#define arch_write_relax(lock)        cpu_relax()
 -
  #endif /* _ASM_X86_SPINLOCK_H */
index 53dd162576a8b8bb0780b480e8c53371e48e0ddd,52250681f68c7410b30f42d9e424553aaed4f313..fb856c9f04494b6633d9ea8fd3ebb9204cc2c620
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_VGTOD_H
  #define _ASM_X86_VGTOD_H
  
@@@ -48,7 -49,7 +49,7 @@@ static inline unsigned gtod_read_begin(
        unsigned ret;
  
  repeat:
 -      ret = ACCESS_ONCE(s->seq);
 +      ret = READ_ONCE(s->seq);
        if (unlikely(ret & 1)) {
                cpu_relax();
                goto repeat;
diff --combined arch/x86/kernel/ldt.c
index 0a21390642c4c079805edf3b911c9916f01797e5,4d17bacf40308cbc9e0414b1b60d038f7a340f0a..a2fcf037bd80cfe6afa22addc5dc1809b56a62b0
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
   * Copyright (C) 1999 Ingo Molnar <[email protected]>
@@@ -101,7 -102,7 +102,7 @@@ static void finalize_ldt_struct(struct 
  static void install_ldt(struct mm_struct *current_mm,
                        struct ldt_struct *ldt)
  {
 -      /* Synchronizes with lockless_dereference in load_mm_ldt. */
 +      /* Synchronizes with READ_ONCE in load_mm_ldt. */
        smp_store_release(&current_mm->context.ldt, ldt);
  
        /* Activate the LDT for all CPUs using current_mm. */
diff --combined arch/x86/xen/spinlock.c
index 1e1462d3d43e3f2c03c636f342c42049464bda25,08324c64005dba48d79ee8914a198562e582e0d8..02f3445a2b5f60b5cf8fd1e296bba3a55fe37068
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * Split spinlock implementation out into its own file, so it can be
   * compiled in a FTRACE-compatible way.
@@@ -10,7 -11,6 +11,7 @@@
  #include <linux/slab.h>
  
  #include <asm/paravirt.h>
 +#include <asm/qspinlock.h>
  
  #include <xen/interface/xen.h>
  #include <xen/events.h>
@@@ -81,11 -81,8 +82,11 @@@ void xen_init_lock_cpu(int cpu
        int irq;
        char *name;
  
 -      if (!xen_pvspin)
 +      if (!xen_pvspin) {
 +              if (cpu == 0)
 +                      static_branch_disable(&virt_spin_lock_key);
                return;
 +      }
  
        WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n",
             cpu, per_cpu(lock_kicker_irq, cpu));
diff --combined drivers/md/dm-stats.c
index a1a5eec783ccc416b44c962a6eeaf26e0b70711e,a7868503d1352dfee4927ceeac246d39160685a5..29bc51084c82be8527e65c9cd157c54b86a708cd
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  #include <linux/errno.h>
  #include <linux/numa.h>
  #include <linux/slab.h>
@@@ -431,7 -432,7 +432,7 @@@ do_sync_free
                synchronize_rcu_expedited();
                dm_stat_free(&s->rcu_head);
        } else {
 -              ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
 +              WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
                call_rcu(&s->rcu_head, dm_stat_free);
        }
        return 0;
@@@ -639,12 -640,12 +640,12 @@@ void dm_stats_account_io(struct dm_stat
                 */
                last = raw_cpu_ptr(stats->last);
                stats_aux->merged =
 -                      (bi_sector == (ACCESS_ONCE(last->last_sector) &&
 +                      (bi_sector == (READ_ONCE(last->last_sector) &&
                                       ((bi_rw == WRITE) ==
 -                                      (ACCESS_ONCE(last->last_rw) == WRITE))
 +                                      (READ_ONCE(last->last_rw) == WRITE))
                                       ));
 -              ACCESS_ONCE(last->last_sector) = end_sector;
 -              ACCESS_ONCE(last->last_rw) = bi_rw;
 +              WRITE_ONCE(last->last_sector, end_sector);
 +              WRITE_ONCE(last->last_rw, bi_rw);
        }
  
        rcu_read_lock();
@@@ -693,22 -694,22 +694,22 @@@ static void __dm_stat_init_temporary_pe
  
        for_each_possible_cpu(cpu) {
                p = &s->stat_percpu[cpu][x];
 -              shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]);
 -              shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]);
 -              shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]);
 -              shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]);
 -              shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]);
 -              shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]);
 -              shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]);
 -              shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]);
 -              shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]);
 -              shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]);
 -              shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total);
 -              shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue);
 +              shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
 +              shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
 +              shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
 +              shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
 +              shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
 +              shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
 +              shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
 +              shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
 +              shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
 +              shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
 +              shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
 +              shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
                if (s->n_histogram_entries) {
                        unsigned i;
                        for (i = 0; i < s->n_histogram_entries + 1; i++)
 -                              shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]);
 +                              shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
                }
        }
  }
index 6bccc2be2b918f48f92dd4d70677c78caab8a883,ea69af267d63522ae7f545c2cbb8a7a6e601adbc..18b6c25d4705b9ca12918c5369a7a9e02cf35634
@@@ -750,7 -750,7 +750,7 @@@ static void igb_cache_ring_register(str
  u32 igb_rd32(struct e1000_hw *hw, u32 reg)
  {
        struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
 -      u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr);
 +      u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
        u32 value = 0;
  
        if (E1000_REMOVED(hw_addr))
@@@ -5326,7 -5326,7 +5326,7 @@@ dma_error
                                       DMA_TO_DEVICE);
                dma_unmap_len_set(tx_buffer, len, 0);
  
-               if (i--)
+               if (i-- == 0)
                        i += tx_ring->count;
                tx_buffer = &tx_ring->tx_buffer_info[i];
        }
index 2224e691ee07c43ac1177166412ef41603c3f86b,6d5f31e943583df77f5fa1e6aa3e2703fff33518..935a2f15b0b00e72763214aad0248a504f5b3a45
@@@ -380,7 -380,7 +380,7 @@@ static void ixgbe_check_remove(struct i
   */
  u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
  {
 -      u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
 +      u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr);
        u32 value;
  
        if (ixgbe_removed(reg_addr))
@@@ -8020,29 -8020,23 +8020,23 @@@ static int ixgbe_tx_map(struct ixgbe_ri
        return 0;
  dma_error:
        dev_err(tx_ring->dev, "TX DMA map failed\n");
-       tx_buffer = &tx_ring->tx_buffer_info[i];
  
        /* clear dma mappings for failed tx_buffer_info map */
-       while (tx_buffer != first) {
+       for (;;) {
+               tx_buffer = &tx_ring->tx_buffer_info[i];
                if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_page(tx_ring->dev,
                                       dma_unmap_addr(tx_buffer, dma),
                                       dma_unmap_len(tx_buffer, len),
                                       DMA_TO_DEVICE);
                dma_unmap_len_set(tx_buffer, len, 0);
-               if (i--)
+               if (tx_buffer == first)
+                       break;
+               if (i == 0)
                        i += tx_ring->count;
-               tx_buffer = &tx_ring->tx_buffer_info[i];
+               i--;
        }
  
-       if (dma_unmap_len(tx_buffer, len))
-               dma_unmap_single(tx_ring->dev,
-                                dma_unmap_addr(tx_buffer, dma),
-                                dma_unmap_len(tx_buffer, len),
-                                DMA_TO_DEVICE);
-       dma_unmap_len_set(tx_buffer, len, 0);
        dev_kfree_skb_any(first->skb);
        first->skb = NULL;
  
@@@ -8630,7 -8624,7 +8624,7 @@@ static void ixgbe_get_stats64(struct ne
  
        rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
 -              struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
 +              struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]);
                u64 bytes, packets;
                unsigned int start;
  
        }
  
        for (i = 0; i < adapter->num_tx_queues; i++) {
 -              struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]);
 +              struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]);
  
                ixgbe_get_ring_stats64(stats, ring);
        }
        for (i = 0; i < adapter->num_xdp_queues; i++) {
 -              struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]);
 +              struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]);
  
                ixgbe_get_ring_stats64(stats, ring);
        }
diff --combined drivers/net/tap.c
index b55b29b90b88e3fdd0857ee1c1863ebd7ab3f019,6c0c84c33e1fb62f259881de910934470b6a8929..b13890953ebb92515b3924f511714942a912b120
@@@ -257,7 -257,7 +257,7 @@@ static struct tap_queue *tap_get_queue(
         * and validate that the result isn't NULL - in case we are
         * racing against queue removal.
         */
 -      int numvtaps = ACCESS_ONCE(tap->numvtaps);
 +      int numvtaps = READ_ONCE(tap->numvtaps);
        __u32 rxq;
  
        if (!numvtaps)
@@@ -517,6 -517,10 +517,10 @@@ static int tap_open(struct inode *inode
                                             &tap_proto, 0);
        if (!q)
                goto err;
+       if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) {
+               sk_free(&q->sk);
+               goto err;
+       }
  
        RCU_INIT_POINTER(q->sock.wq, &q->wq);
        init_waitqueue_head(&q->wq.wait);
        if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG))
                sock_set_flag(&q->sk, SOCK_ZEROCOPY);
  
-       err = -ENOMEM;
-       if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL))
-               goto err_array;
        err = tap_set_queue(tap, file, q);
-       if (err)
-               goto err_queue;
+       if (err) {
+               /* tap_sock_destruct() will take care of freeing skb_array */
+               goto err_put;
+       }
  
        dev_put(tap->dev);
  
        rtnl_unlock();
        return err;
  
- err_queue:
-       skb_array_cleanup(&q->skb_array);
- err_array:
+ err_put:
        sock_put(&q->sk);
  err:
        if (tap)
@@@ -1032,6 -1032,8 +1032,8 @@@ static long tap_ioctl(struct file *file
        case TUNSETSNDBUF:
                if (get_user(s, sp))
                        return -EFAULT;
+               if (s <= 0)
+                       return -EINVAL;
  
                q->sk.sk_sndbuf = s;
                return 0;
@@@ -1249,8 -1251,8 +1251,8 @@@ static int tap_list_add(dev_t major, co
        return 0;
  }
  
- int tap_create_cdev(struct cdev *tap_cdev,
-                   dev_t *tap_major, const char *device_name)
+ int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major,
+                   const char *device_name, struct module *module)
  {
        int err;
  
                goto out1;
  
        cdev_init(tap_cdev, &tap_fops);
+       tap_cdev->owner = module;
        err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS);
        if (err)
                goto out2;
diff --combined drivers/net/tun.c
index 27cd50c5bc9eac1470f03269082a3dc69a071f95,42bb820a56c92e812d93e66ae2e87a9e0648f001..c1685a6d788360beb3a1a0b8cf3a01efe157618d
@@@ -469,7 -469,7 +469,7 @@@ static u16 tun_select_queue(struct net_
        u32 numqueues = 0;
  
        rcu_read_lock();
 -      numqueues = ACCESS_ONCE(tun->numqueues);
 +      numqueues = READ_ONCE(tun->numqueues);
  
        txq = __skb_get_hash_symmetric(skb);
        if (txq) {
@@@ -864,7 -864,7 +864,7 @@@ static netdev_tx_t tun_net_xmit(struct 
  
        rcu_read_lock();
        tfile = rcu_dereference(tun->tfiles[txq]);
 -      numqueues = ACCESS_ONCE(tun->numqueues);
 +      numqueues = READ_ONCE(tun->numqueues);
  
        /* Drop packet if interface is not attached */
        if (txq >= numqueues)
@@@ -1286,6 -1286,7 +1286,7 @@@ static struct sk_buff *tun_build_skb(st
        buflen += SKB_DATA_ALIGN(len + pad);
        rcu_read_unlock();
  
+       alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
        if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
                return ERR_PTR(-ENOMEM);
  
@@@ -2028,7 -2029,7 +2029,7 @@@ static int tun_set_iff(struct net *net
                if (!dev)
                        return -ENOMEM;
                err = dev_get_valid_name(net, dev, name);
-               if (err)
+               if (err < 0)
                        goto err_free_dev;
  
                dev_net_set(dev, net);
@@@ -2428,6 -2429,10 +2429,10 @@@ static long __tun_chr_ioctl(struct fil
                        ret = -EFAULT;
                        break;
                }
+               if (sndbuf <= 0) {
+                       ret = -EINVAL;
+                       break;
+               }
  
                tun->sndbuf = sndbuf;
                tun_set_sndbuf(tun);
index 09a2a259941b33b522642e2012420f6b8e66aabf,d97f0d9b3ce6ce0ec29ad1d79531082c0e392d9f..f1cc47292a59e9a3887ec7338dbdabf327773d1b
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __LINUX_UHCI_HCD_H
  #define __LINUX_UHCI_HCD_H
  
@@@ -186,7 -187,7 +187,7 @@@ struct uhci_qh 
   * We need a special accessor for the element pointer because it is
   * subject to asynchronous updates by the controller.
   */
 -#define qh_element(qh)                ACCESS_ONCE((qh)->element)
 +#define qh_element(qh)                READ_ONCE((qh)->element)
  
  #define LINK_TO_QH(uhci, qh)  (UHCI_PTR_QH((uhci)) | \
                                cpu_to_hc32((uhci), (qh)->dma_handle))
@@@ -274,7 -275,7 +275,7 @@@ struct uhci_td 
   * subject to asynchronous updates by the controller.
   */
  #define td_status(uhci, td)           hc32_to_cpu((uhci), \
 -                                              ACCESS_ONCE((td)->status))
 +                                              READ_ONCE((td)->status))
  
  #define LINK_TO_TD(uhci, td)          (cpu_to_hc32((uhci), (td)->dma_handle))
  
diff --combined fs/crypto/keyinfo.c
index 0083bd4fcaa5a3dfc4392ab54f0b2a742f3043c2,a38630214058214dec6c30eca64f74f3f7f693df..577dfaf0367f66e137442b3932210d806d832da3
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * key management facility for FS encryption support.
   *
@@@ -373,7 -374,7 +374,7 @@@ void fscrypt_put_encryption_info(struc
        struct fscrypt_info *prev;
  
        if (ci == NULL)
 -              ci = ACCESS_ONCE(inode->i_crypt_info);
 +              ci = READ_ONCE(inode->i_crypt_info);
        if (ci == NULL)
                return;
  
diff --combined fs/fcntl.c
index 57bf2964bb831cb1fd67331ccc5f314b0876bf89,8d78ffd7b399d430f05fdb8b72b68dfa1bda3bcb..30f47d0f74a00985149ee8123f34531643232711
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *  linux/fs/fcntl.c
   *
@@@ -724,7 -725,7 +725,7 @@@ static void send_sigio_to_task(struct t
         * F_SETSIG can change ->signum lockless in parallel, make
         * sure we read it once and use the same value throughout.
         */
 -      int signum = ACCESS_ONCE(fown->signum);
 +      int signum = READ_ONCE(fown->signum);
  
        if (!sigio_perm(p, fown, signum))
                return;
diff --combined fs/fs_pin.c
index 2d07f292b62567c952e39e3a79113ab45824882b,0d285fd5b44ab6d16d2377f3a19ef7fb6cc06384..a6497cf8ae53aa2bc7dcc2996887fcb92f1e9820
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  #include <linux/fs.h>
  #include <linux/sched.h>
  #include <linux/slab.h>
@@@ -78,7 -79,7 +79,7 @@@ void mnt_pin_kill(struct mount *m
        while (1) {
                struct hlist_node *p;
                rcu_read_lock();
 -              p = ACCESS_ONCE(m->mnt_pins.first);
 +              p = READ_ONCE(m->mnt_pins.first);
                if (!p) {
                        rcu_read_unlock();
                        break;
@@@ -92,7 -93,7 +93,7 @@@ void group_pin_kill(struct hlist_head *
        while (1) {
                struct hlist_node *q;
                rcu_read_lock();
 -              q = ACCESS_ONCE(p->first);
 +              q = READ_ONCE(p->first);
                if (!q) {
                        rcu_read_unlock();
                        break;
diff --combined fs/namei.c
index 40a0f34bf990b89dad2c380b57470fef57eb3236,ed8b9488a890c2b936e249ab16e6a44a292a9521..5424b10cfdc4657dfa7f487f06d98094e7e58158
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *  linux/fs/namei.c
   *
@@@ -1209,7 -1210,7 +1210,7 @@@ static int follow_managed(struct path *
        /* Given that we're not holding a lock here, we retain the value in a
         * local variable for each dentry as we look at it so that we don't see
         * the components of that value change under us */
 -      while (managed = ACCESS_ONCE(path->dentry->d_flags),
 +      while (managed = READ_ONCE(path->dentry->d_flags),
               managed &= DCACHE_MANAGED_DENTRY,
               unlikely(managed != 0)) {
                /* Allow the filesystem to manage the transit without i_mutex
@@@ -1394,7 -1395,7 +1395,7 @@@ int follow_down(struct path *path
        unsigned managed;
        int ret;
  
 -      while (managed = ACCESS_ONCE(path->dentry->d_flags),
 +      while (managed = READ_ONCE(path->dentry->d_flags),
               unlikely(managed & DCACHE_MANAGED_DENTRY)) {
                /* Allow the filesystem to manage the transit without i_mutex
                 * being held.
diff --combined fs/ncpfs/dir.c
index 72cfaa253a8f1e744f05ed8001bfdb8372e244bb,b5ec1d980dc933bfe0434201d7350def1371b6d1..0c57c5c5d40a1ce21c6537162d21ed0ad9b9867a
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *  dir.c
   *
@@@ -119,6 -120,10 +120,6 @@@ static inline int ncp_case_sensitive(co
  /*
   * Note: leave the hash unchanged if the directory
   * is case-sensitive.
 - *
 - * Accessing the parent inode can be racy under RCU pathwalking.
 - * Use ACCESS_ONCE() to make sure we use _one_ particular inode,
 - * the callers will handle races.
   */
  static int 
  ncp_hash_dentry(const struct dentry *dentry, struct qstr *this)
        return 0;
  }
  
 -/*
 - * Accessing the parent inode can be racy under RCU pathwalking.
 - * Use ACCESS_ONCE() to make sure we use _one_ particular inode,
 - * the callers will handle races.
 - */
  static int
  ncp_compare_dentry(const struct dentry *dentry,
                unsigned int len, const char *str, const struct qstr *name)
diff --combined fs/overlayfs/readdir.c
index c67a7703296b924b04e42e2ae1240f9a2cb055f6,698b74dd750ee6a9fb2586d0f8d42853111e6bd1..c310e3ff7f3f7d55979d60ef776c0740243f39b6
@@@ -754,7 -754,7 +754,7 @@@ static int ovl_dir_fsync(struct file *f
        if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
                struct inode *inode = file_inode(file);
  
 -              realfile = lockless_dereference(od->upperfile);
 +              realfile = READ_ONCE(od->upperfile);
                if (!realfile) {
                        struct path upperpath;
  
@@@ -1021,13 -1021,12 +1021,12 @@@ int ovl_indexdir_cleanup(struct dentry 
                        break;
                }
                err = ovl_verify_index(index, lowerstack, numlower);
-               if (err) {
-                       if (err == -EROFS)
-                               break;
+               /* Cleanup stale and orphan index entries */
+               if (err && (err == -ESTALE || err == -ENOENT))
                        err = ovl_cleanup(dir, index);
-                       if (err)
-                               break;
-               }
+               if (err)
+                       break;
                dput(index);
                index = NULL;
        }
diff --combined fs/proc/array.c
index 375e8bf0dd24c5c72fcd2ed5f1ca711fc6427108,9390032a11e13d559b0fbc9accbee25bdff38b30..d82549e804025bc59621b16e3d65e95462a1a9c9
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *  linux/fs/proc/array.c
   *
@@@ -453,7 -454,7 +454,7 @@@ static int do_task_stat(struct seq_fil
                cutime = sig->cutime;
                cstime = sig->cstime;
                cgtime = sig->cgtime;
 -              rsslim = ACCESS_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
 +              rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
  
                /* add up live thread stats at the group level */
                if (whole) {
diff --combined fs/proc_namespace.c
index 03afd51509162feaed7fd9d3eae196acd9f08522,7626ee11b06c67edac5d9c021516ff6ea3390b98..7b635d17321377e4868554a6ad338a1bd413b3cc
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * fs/proc_namespace.c - handling of /proc/<pid>/{mounts,mountinfo,mountstats}
   *
@@@ -27,7 -28,7 +28,7 @@@ static unsigned mounts_poll(struct fil
  
        poll_wait(file, &p->ns->poll, wait);
  
 -      event = ACCESS_ONCE(ns->event);
 +      event = READ_ONCE(ns->event);
        if (m->poll_event != event) {
                m->poll_event = event;
                res |= POLLERR | POLLPRI;
diff --combined fs/readdir.c
index 7c584bbb4ce3ccf4252a4c572478ab5e5c735a11,d336db65a33eaa4124a741e89fbbbee6b849850f..1b83b0ad183b656b06597dc65ab7d42883cc116e
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *  linux/fs/readdir.c
   *
@@@ -36,12 -37,13 +37,12 @@@ int iterate_dir(struct file *file, stru
        if (res)
                goto out;
  
 -      if (shared) {
 -              inode_lock_shared(inode);
 -      } else {
 +      if (shared)
 +              res = down_read_killable(&inode->i_rwsem);
 +      else
                res = down_write_killable(&inode->i_rwsem);
 -              if (res)
 -                      goto out;
 -      }
 +      if (res)
 +              goto out;
  
        res = -ENOENT;
        if (!IS_DEADDIR(inode)) {
index f2d97b782031e37d2875c4b8ff91ee0e905ce6dc,49be4bba1e9641de9713ebc06532e581f934e40b..34a028a7bcc53c095a2028f2d0cee89cd3835292
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_GENERIC_ATOMIC_LONG_H
  #define _ASM_GENERIC_ATOMIC_LONG_H
  /*
@@@ -243,7 -244,4 +244,7 @@@ static inline long atomic_long_add_unle
  #define atomic_long_inc_not_zero(l) \
        ATOMIC_LONG_PFX(_inc_not_zero)((ATOMIC_LONG_PFX(_t) *)(l))
  
 +#define atomic_long_cond_read_acquire(v, c) \
 +      ATOMIC_LONG_PFX(_cond_read_acquire)((ATOMIC_LONG_PFX(_t) *)(v), (c))
 +
  #endif  /*  _ASM_GENERIC_ATOMIC_LONG_H  */
index 8af752acbdc0849c5fe1bb74a82f4993a736a0b6,d93573eff16294aa1a9efa0a79a1f428ddd08c23..137ecdd16daa01c9ced213a9ee38de7b9f16181a
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __ASM_GENERIC_QRWLOCK_TYPES_H
  #define __ASM_GENERIC_QRWLOCK_TYPES_H
  
   */
  
  typedef struct qrwlock {
 -      atomic_t                cnts;
 +      union {
 +              atomic_t cnts;
 +              struct {
 +#ifdef __LITTLE_ENDIAN
 +                      u8 wlocked;     /* Locked for write? */
 +                      u8 __lstate[3];
 +#else
 +                      u8 __lstate[3];
 +                      u8 wlocked;     /* Locked for write? */
 +#endif
 +              };
 +      };
        arch_spinlock_t         wait_lock;
  } arch_rwlock_t;
  
  #define       __ARCH_RW_LOCK_UNLOCKED {               \
 -      .cnts = ATOMIC_INIT(0),                 \
 +      { .cnts = ATOMIC_INIT(0), },            \
        .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
  }
  
index b2d68d2d198e794e8c2aeb1128c78788fa5de68a,bdbe43bac2307f5cdd617ebad8c0ae5d0842a37d..93e67a055a4d85ca82436f71ed160d971e33e46e
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_GENERIC_RWSEM_H
  #define _ASM_GENERIC_RWSEM_H
  
@@@ -37,16 -38,6 +38,16 @@@ static inline void __down_read(struct r
                rwsem_down_read_failed(sem);
  }
  
 +static inline int __down_read_killable(struct rw_semaphore *sem)
 +{
 +      if (unlikely(atomic_long_inc_return_acquire(&sem->count) <= 0)) {
 +              if (IS_ERR(rwsem_down_read_failed_killable(sem)))
 +                      return -EINTR;
 +      }
 +
 +      return 0;
 +}
 +
  static inline int __down_read_trylock(struct rw_semaphore *sem)
  {
        long tmp;
diff --combined include/linux/atomic.h
index 0aeb2b3f45789a8a8f0045808e31da75a8757876,cd18203d6ff3278e477e24001fe2af058c5b00b0..8b276fd9a127317e5ff0d83082eecc9cfa68dd38
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /* Atomic operations usable in machine independent code */
  #ifndef _LINUX_ATOMIC_H
  #define _LINUX_ATOMIC_H
@@@ -653,8 -654,6 +654,8 @@@ static inline int atomic_dec_if_positiv
  }
  #endif
  
 +#define atomic_cond_read_acquire(v, c)        smp_cond_load_acquire(&(v)->counter, (c))
 +
  #ifdef CONFIG_GENERIC_ATOMIC64
  #include <asm-generic/atomic64.h>
  #endif
@@@ -1074,8 -1073,6 +1075,8 @@@ static inline long long atomic64_fetch_
  }
  #endif
  
 +#define atomic64_cond_read_acquire(v, c)      smp_cond_load_acquire(&(v)->counter, (c))
 +
  #include <asm-generic/atomic-long.h>
  
  #endif /* _LINUX_ATOMIC_H */
diff --combined include/linux/average.h
index 3f462292269cc388ccb94eff3fb8d0fe459ffa0a,1b6f5560c264d0fd0557800e1e66ba4722c4b758..a1a8f09631ce0de1652a944dd24fd754b906f40f
@@@ -1,10 -1,7 +1,11 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_AVERAGE_H
  #define _LINUX_AVERAGE_H
  
 +#include <linux/bug.h>
 +#include <linux/compiler.h>
 +#include <linux/log2.h>
 +
  /*
   * Exponentially weighted moving average (EWMA)
   *
@@@ -52,7 -49,7 +53,7 @@@
        static inline void ewma_##name##_add(struct ewma_##name *e,     \
                                             unsigned long val)         \
        {                                                               \
 -              unsigned long internal = ACCESS_ONCE(e->internal);      \
 +              unsigned long internal = READ_ONCE(e->internal);        \
                unsigned long weight_rcp = ilog2(_weight_rcp);          \
                unsigned long precision = _precision;                   \
                                                                        \
                BUILD_BUG_ON((_precision) > 30);                        \
                BUILD_BUG_ON_NOT_POWER_OF_2(_weight_rcp);               \
                                                                        \
 -              ACCESS_ONCE(e->internal) = internal ?                   \
 +              WRITE_ONCE(e->internal, internal ?                      \
                        (((internal << weight_rcp) - internal) +        \
                                (val << precision)) >> weight_rcp :     \
 -                      (val << precision)                            \
 +                      (val << precision));                            \
        }
  
  #endif /* _LINUX_AVERAGE_H */
diff --combined include/linux/bitops.h
index 0a7ce668f8e0e6c9cae4283f4668f447b7127d7b,d03c5dd6185daafd871e8ad8e685fa1c0af5cd8e..c537ac7435ad147e44165b7cceedde925677c50b
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_BITOPS_H
  #define _LINUX_BITOPS_H
  #include <asm/types.h>
@@@ -236,7 -237,7 +237,7 @@@ static inline unsigned long __ffs64(u6
        typeof(*ptr) old, new;                                  \
                                                                \
        do {                                                    \
 -              old = ACCESS_ONCE(*ptr);                        \
 +              old = READ_ONCE(*ptr);                  \
                new = (old & ~mask) | bits;                     \
        } while (cmpxchg(ptr, old, new) != old);                \
                                                                \
        typeof(*ptr) old, new;                                  \
                                                                \
        do {                                                    \
 -              old = ACCESS_ONCE(*ptr);                        \
 +              old = READ_ONCE(*ptr);                  \
                new = old & ~clear;                             \
        } while (!(old & test) &&                               \
                 cmpxchg(ptr, old, new) != old);                \
index 5947a3e6c0e6df82c9946ded18b062bc29e1da12,54dfef70a072744981e77f2621f8c01309c38970..a06583e41f80520a1425e4231d4e201b8d0a12a7
@@@ -1,4 -1,5 +1,5 @@@
 -#ifndef __LINUX_COMPILER_H
+ /* SPDX-License-Identifier: GPL-2.0 */
 +#ifndef __LINUX_COMPILER_TYPES_H
  #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
  #endif
  
index ce8e965646efbf9aa12aec4a6e3790737d983924,bb78e5bdff26334376eb54a4b142845bbbaf8c46..2272ded07496d6044ebe43e930047237bb5f254e
@@@ -1,4 -1,5 +1,5 @@@
 -#ifndef __LINUX_COMPILER_H
+ /* SPDX-License-Identifier: GPL-2.0 */
 +#ifndef __LINUX_COMPILER_TYPES_H
  #error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead."
  #endif
  
index e438ac89c69270dde162eb1c2f0e0cbe23c7f12f,523d1b74550f20e39c86f3042982185e3dfeb7bd..bfa08160db3a46c5bb798b63650717815d08158b
@@@ -1,4 -1,5 +1,5 @@@
 -#ifndef __LINUX_COMPILER_H
+ /* SPDX-License-Identifier: GPL-2.0 */
 +#ifndef __LINUX_COMPILER_TYPES_H
  #error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead."
  #endif
  
diff --combined include/linux/compiler.h
index 5a1cab48442c42176e7d5b982875b512e47893de,202710420d6deba1bba8107616359ae069ffd5e7..3672353a0acda884be51fd3debba26ea50f43b09
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __LINUX_COMPILER_H
  #define __LINUX_COMPILER_H
  
 -#ifndef __ASSEMBLY__
 +#include <linux/compiler_types.h>
  
 -#ifdef __CHECKER__
 -# define __user               __attribute__((noderef, address_space(1)))
 -# define __kernel     __attribute__((address_space(0)))
 -# define __safe               __attribute__((safe))
 -# define __force      __attribute__((force))
 -# define __nocast     __attribute__((nocast))
 -# define __iomem      __attribute__((noderef, address_space(2)))
 -# define __must_hold(x)       __attribute__((context(x,1,1)))
 -# define __acquires(x)        __attribute__((context(x,0,1)))
 -# define __releases(x)        __attribute__((context(x,1,0)))
 -# define __acquire(x) __context__(x,1)
 -# define __release(x) __context__(x,-1)
 -# define __cond_lock(x,c)     ((c) ? ({ __acquire(x); 1; }) : 0)
 -# define __percpu     __attribute__((noderef, address_space(3)))
 -# define __rcu                __attribute__((noderef, address_space(4)))
 -# define __private    __attribute__((noderef))
 -extern void __chk_user_ptr(const volatile void __user *);
 -extern void __chk_io_ptr(const volatile void __iomem *);
 -# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member))
 -#else /* __CHECKER__ */
 -# ifdef STRUCTLEAK_PLUGIN
 -#  define __user __attribute__((user))
 -# else
 -#  define __user
 -# endif
 -# define __kernel
 -# define __safe
 -# define __force
 -# define __nocast
 -# define __iomem
 -# define __chk_user_ptr(x) (void)0
 -# define __chk_io_ptr(x) (void)0
 -# define __builtin_warning(x, y...) (1)
 -# define __must_hold(x)
 -# define __acquires(x)
 -# define __releases(x)
 -# define __acquire(x) (void)0
 -# define __release(x) (void)0
 -# define __cond_lock(x,c) (c)
 -# define __percpu
 -# define __rcu
 -# define __private
 -# define ACCESS_PRIVATE(p, member) ((p)->member)
 -#endif /* __CHECKER__ */
 -
 -/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
 -#define ___PASTE(a,b) a##b
 -#define __PASTE(a,b) ___PASTE(a,b)
 +#ifndef __ASSEMBLY__
  
  #ifdef __KERNEL__
  
 -#ifdef __GNUC__
 -#include <linux/compiler-gcc.h>
 -#endif
 -
 -#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
 -#define notrace __attribute__((hotpatch(0,0)))
 -#else
 -#define notrace __attribute__((no_instrument_function))
 -#endif
 -
 -/* Intel compiler defines __GNUC__. So we will overwrite implementations
 - * coming from above header files here
 - */
 -#ifdef __INTEL_COMPILER
 -# include <linux/compiler-intel.h>
 -#endif
 -
 -/* Clang compiler defines __GNUC__. So we will overwrite implementations
 - * coming from above header files here
 - */
 -#ifdef __clang__
 -#include <linux/compiler-clang.h>
 -#endif
 -
 -/*
 - * Generic compiler-dependent macros required for kernel
 - * build go below this comment. Actual compiler/compiler version
 - * specific implementations come from the above header files
 - */
 -
 -struct ftrace_branch_data {
 -      const char *func;
 -      const char *file;
 -      unsigned line;
 -      union {
 -              struct {
 -                      unsigned long correct;
 -                      unsigned long incorrect;
 -              };
 -              struct {
 -                      unsigned long miss;
 -                      unsigned long hit;
 -              };
 -              unsigned long miss_hit[2];
 -      };
 -};
 -
 -struct ftrace_likely_data {
 -      struct ftrace_branch_data       data;
 -      unsigned long                   constant;
 -};
 -
  /*
   * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
   * to disable branch tracing on a per file basis.
@@@ -91,13 -191,13 +92,13 @@@ void ftrace_likely_update(struct ftrace
        asm("%c0:\n\t"                                                  \
            ".pushsection .discard.reachable\n\t"                       \
            ".long %c0b - .\n\t"                                        \
-           ".popsection\n\t" : : "i" (__LINE__));                      \
+           ".popsection\n\t" : : "i" (__COUNTER__));                   \
  })
  #define annotate_unreachable() ({                                     \
        asm("%c0:\n\t"                                                  \
            ".pushsection .discard.unreachable\n\t"                     \
            ".long %c0b - .\n\t"                                        \
-           ".popsection\n\t" : : "i" (__LINE__));                      \
+           ".popsection\n\t" : : "i" (__COUNTER__));                   \
  })
  #define ASM_UNREACHABLE                                                       \
        "999:\n\t"                                                      \
@@@ -233,7 -333,6 +234,7 @@@ static __always_inline void __write_onc
   * with an explicit memory barrier or atomic instruction that provides the
   * required ordering.
   */
 +#include <asm/barrier.h>
  
  #define __READ_ONCE(x, check)                                         \
  ({                                                                    \
                __read_once_size(&(x), __u.__c, sizeof(x));             \
        else                                                            \
                __read_once_size_nocheck(&(x), __u.__c, sizeof(x));     \
 +      smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \
        __u.__val;                                                      \
  })
  #define READ_ONCE(x) __READ_ONCE(x, 1)
  
  #endif /* __ASSEMBLY__ */
  
 -#ifdef __KERNEL__
 -/*
 - * Allow us to mark functions as 'deprecated' and have gcc emit a nice
 - * warning for each use, in hopes of speeding the functions removal.
 - * Usage is:
 - *            int __deprecated foo(void)
 - */
 -#ifndef __deprecated
 -# define __deprecated         /* unimplemented */
 -#endif
 -
 -#ifdef MODULE
 -#define __deprecated_for_modules __deprecated
 -#else
 -#define __deprecated_for_modules
 -#endif
 -
 -#ifndef __must_check
 -#define __must_check
 -#endif
 -
 -#ifndef CONFIG_ENABLE_MUST_CHECK
 -#undef __must_check
 -#define __must_check
 -#endif
 -#ifndef CONFIG_ENABLE_WARN_DEPRECATED
 -#undef __deprecated
 -#undef __deprecated_for_modules
 -#define __deprecated
 -#define __deprecated_for_modules
 -#endif
 -
 -#ifndef __malloc
 -#define __malloc
 -#endif
 -
 -/*
 - * Allow us to avoid 'defined but not used' warnings on functions and data,
 - * as well as force them to be emitted to the assembly file.
 - *
 - * As of gcc 3.4, static functions that are not marked with attribute((used))
 - * may be elided from the assembly file.  As of gcc 3.4, static data not so
 - * marked will not be elided, but this may change in a future gcc version.
 - *
 - * NOTE: Because distributions shipped with a backported unit-at-a-time
 - * compiler in gcc 3.3, we must define __used to be __attribute__((used))
 - * for gcc >=3.3 instead of 3.4.
 - *
 - * In prior versions of gcc, such functions and data would be emitted, but
 - * would be warned about except with attribute((unused)).
 - *
 - * Mark functions that are referenced only in inline assembly as __used so
 - * the code is emitted even though it appears to be unreferenced.
 - */
 -#ifndef __used
 -# define __used                       /* unimplemented */
 -#endif
 -
 -#ifndef __maybe_unused
 -# define __maybe_unused               /* unimplemented */
 -#endif
 -
 -#ifndef __always_unused
 -# define __always_unused      /* unimplemented */
 -#endif
 -
 -#ifndef noinline
 -#define noinline
 -#endif
 -
 -/*
 - * Rather then using noinline to prevent stack consumption, use
 - * noinline_for_stack instead.  For documentation reasons.
 - */
 -#define noinline_for_stack noinline
 -
 -#ifndef __always_inline
 -#define __always_inline inline
 -#endif
 -
 -#endif /* __KERNEL__ */
 -
 -/*
 - * From the GCC manual:
 - *
 - * Many functions do not examine any values except their arguments,
 - * and have no effects except the return value.  Basically this is
 - * just slightly more strict class than the `pure' attribute above,
 - * since function is not allowed to read global memory.
 - *
 - * Note that a function that has pointer arguments and examines the
 - * data pointed to must _not_ be declared `const'.  Likewise, a
 - * function that calls a non-`const' function usually must not be
 - * `const'.  It does not make sense for a `const' function to return
 - * `void'.
 - */
 -#ifndef __attribute_const__
 -# define __attribute_const__  /* unimplemented */
 -#endif
 -
 -#ifndef __designated_init
 -# define __designated_init
 -#endif
 -
 -#ifndef __latent_entropy
 -# define __latent_entropy
 -#endif
 -
 -#ifndef __randomize_layout
 -# define __randomize_layout __designated_init
 -#endif
 -
 -#ifndef __no_randomize_layout
 -# define __no_randomize_layout
 -#endif
 -
 -#ifndef randomized_struct_fields_start
 -# define randomized_struct_fields_start
 -# define randomized_struct_fields_end
 -#endif
 -
 -/*
 - * Tell gcc if a function is cold. The compiler will assume any path
 - * directly leading to the call is unlikely.
 - */
 -
 -#ifndef __cold
 -#define __cold
 -#endif
 -
 -/* Simple shorthand for a section definition */
 -#ifndef __section
 -# define __section(S) __attribute__ ((__section__(#S)))
 -#endif
 -
 -#ifndef __visible
 -#define __visible
 -#endif
 -
 -#ifndef __nostackprotector
 -# define __nostackprotector
 -#endif
 -
 -/*
 - * Assume alignment of return value.
 - */
 -#ifndef __assume_aligned
 -#define __assume_aligned(a, ...)
 -#endif
 -
 -
 -/* Are two types/vars the same type (ignoring qualifiers)? */
 -#ifndef __same_type
 -# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
 -#endif
 -
 -/* Is this type a native word size -- useful for atomic operations */
 -#ifndef __native_word
 -# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
 -#endif
 -
  /* Compile time object size, -1 for unknown */
  #ifndef __compiletime_object_size
  # define __compiletime_object_size(obj) -1
        (volatile typeof(x) *)&(x); })
  #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
  
 -/**
 - * lockless_dereference() - safely load a pointer for later dereference
 - * @p: The pointer to load
 - *
 - * Similar to rcu_dereference(), but for situations where the pointed-to
 - * object's lifetime is managed by something other than RCU.  That
 - * "something other" might be reference counting or simple immortality.
 - *
 - * The seemingly unused variable ___typecheck_p validates that @p is
 - * indeed a pointer type by using a pointer to typeof(*p) as the type.
 - * Taking a pointer to typeof(*p) again is needed in case p is void *.
 - */
 -#define lockless_dereference(p) \
 -({ \
 -      typeof(p) _________p1 = READ_ONCE(p); \
 -      typeof(*(p)) *___typecheck_p __maybe_unused; \
 -      smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
 -      (_________p1); \
 -})
 -
  #endif /* __LINUX_COMPILER_H */
index 4da49916ef3f2f4fcb0db44ad5490cd7cba19360,7828451e161aed19181a3b6cd6e210e6468cb190..0662a417febe34fb9e638857f13a6d52fcaf0d49
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __LINUX_COMPLETION_H
  #define __LINUX_COMPLETION_H
  
@@@ -49,23 -50,15 +50,23 @@@ static inline void complete_release_com
        lock_commit_crosslock((struct lockdep_map *)&x->map);
  }
  
 +#define init_completion_map(x, m)                                     \
 +do {                                                                  \
 +      lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map,     \
 +                      (m)->name, (m)->key, 0);                                \
 +      __init_completion(x);                                           \
 +} while (0)
 +
  #define init_completion(x)                                            \
  do {                                                                  \
        static struct lock_class_key __key;                             \
        lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map,     \
 -                      "(complete)" #x,                                \
 +                      "(completion)" #x,                              \
                        &__key, 0);                                     \
        __init_completion(x);                                           \
  } while (0)
  #else
 +#define init_completion_map(x, m) __init_completion(x)
  #define init_completion(x) __init_completion(x)
  static inline void complete_acquire(struct completion *x) {}
  static inline void complete_release(struct completion *x) {}
@@@ -75,15 -68,12 +76,15 @@@ static inline void complete_release_com
  #ifdef CONFIG_LOCKDEP_COMPLETIONS
  #define COMPLETION_INITIALIZER(work) \
        { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
 -      STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) }
 +      STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) }
  #else
  #define COMPLETION_INITIALIZER(work) \
        { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
  #endif
  
 +#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
 +      (*({ init_completion_map(&(work), &(map)); &(work); }))
 +
  #define COMPLETION_INITIALIZER_ONSTACK(work) \
        (*({ init_completion(&work); &work; }))
  
  #ifdef CONFIG_LOCKDEP
  # define DECLARE_COMPLETION_ONSTACK(work) \
        struct completion work = COMPLETION_INITIALIZER_ONSTACK(work)
 +# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) \
 +      struct completion work = COMPLETION_INITIALIZER_ONSTACK_MAP(work, map)
  #else
  # define DECLARE_COMPLETION_ONSTACK(work) DECLARE_COMPLETION(work)
 +# define DECLARE_COMPLETION_ONSTACK_MAP(work, map) DECLARE_COMPLETION(work)
  #endif
  
  /**
diff --combined include/linux/dcache.h
index 1d8f5818f6472f2163b9fbbb3851e04bf9397884,f05a659cdf348a0f2efa2f11b1a932a8f3181482..65cd8ab60b7a902bb1f1d36073170fe1040a13dc
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __LINUX_DCACHE_H
  #define __LINUX_DCACHE_H
  
@@@ -519,7 -520,7 +520,7 @@@ static inline struct inode *d_inode(con
  }
  
  /**
 - * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
 + * d_inode_rcu - Get the actual inode of this dentry with READ_ONCE()
   * @dentry: The dentry to query
   *
   * This is the helper normal filesystems should use to get at their own inodes
   */
  static inline struct inode *d_inode_rcu(const struct dentry *dentry)
  {
 -      return ACCESS_ONCE(dentry->d_inode);
 +      return READ_ONCE(dentry->d_inode);
  }
  
  /**
index 36dd4ffb57158837dde026a1287d2f04240604d1,34c0a5464c743c57f651c4563e465659a56a4534..023eae69398c4c393ea18f698b4957c7ee7fbaf1
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /*
   * Dynamic queue limits (dql) - Definitions
   *
@@@ -88,7 -89,7 +89,7 @@@ static inline void dql_queued(struct dq
  /* Returns how many objects can be queued, < 0 indicates over limit. */
  static inline int dql_avail(const struct dql *dql)
  {
 -      return ACCESS_ONCE(dql->adj_limit) - ACCESS_ONCE(dql->num_queued);
 +      return READ_ONCE(dql->adj_limit) - READ_ONCE(dql->num_queued);
  }
  
  /* Record number of completed objects and recalculate the limit. */
index 0e694cf6241444d9bf504b73b0b46cf4ebc5b425,b96dd4e1e6630997cbcea3f58c5dc34f0a1ed674..ecc2928e8046673b1933b8f85d01b83785176f6d
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __LINUX_GENERIC_NETLINK_H
  #define __LINUX_GENERIC_NETLINK_H
  
@@@ -30,7 -31,7 +31,7 @@@ extern wait_queue_head_t genl_sk_destru
   * @p: The pointer to read, prior to dereferencing
   *
   * Return the value of the specified RCU-protected pointer, but omit
 - * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
 + * both the smp_read_barrier_depends() and the READ_ONCE(), because
   * caller holds genl mutex.
   */
  #define genl_dereference(p)                                   \
diff --combined include/linux/genhd.h
index 19d18710546ad96822c899f634e2313ecd104352,44790523057f0380b1254458a20ca658f17a3305..eaefb7a62f83707a9493f664c232f3514d2db880
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_GENHD_H
  #define _LINUX_GENHD_H
  
@@@ -206,7 -207,6 +207,7 @@@ struct gendisk 
  #endif        /* CONFIG_BLK_DEV_INTEGRITY */
        int node_id;
        struct badblocks *bb;
 +      struct lockdep_map lockdep_map;
  };
  
  static inline struct gendisk *part_to_disk(struct hd_struct *part)
@@@ -591,7 -591,8 +592,7 @@@ extern void __delete_partition(struct p
  extern void delete_partition(struct gendisk *, int);
  extern void printk_all_partitions(void);
  
 -extern struct gendisk *alloc_disk_node(int minors, int node_id);
 -extern struct gendisk *alloc_disk(int minors);
 +extern struct gendisk *__alloc_disk_node(int minors, int node_id);
  extern struct kobject *get_disk(struct gendisk *disk);
  extern void put_disk(struct gendisk *disk);
  extern void blk_register_region(dev_t devt, unsigned long range,
@@@ -615,24 -616,6 +616,24 @@@ extern ssize_t part_fail_store(struct d
                               const char *buf, size_t count);
  #endif /* CONFIG_FAIL_MAKE_REQUEST */
  
 +#define alloc_disk_node(minors, node_id)                              \
 +({                                                                    \
 +      static struct lock_class_key __key;                             \
 +      const char *__name;                                             \
 +      struct gendisk *__disk;                                         \
 +                                                                      \
 +      __name = "(gendisk_completion)"#minors"("#node_id")";           \
 +                                                                      \
 +      __disk = __alloc_disk_node(minors, node_id);                    \
 +                                                                      \
 +      if (__disk)                                                     \
 +              lockdep_init_map(&__disk->lockdep_map, __name, &__key, 0); \
 +                                                                      \
 +      __disk;                                                         \
 +})
 +
 +#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
 +
  static inline int hd_ref_init(struct hd_struct *part)
  {
        if (percpu_ref_init(&part->ref, __delete_partition, 0,
diff --combined include/linux/huge_mm.h
index 785a00ca46283ba547769dfcf2ec953a52fac693,87067d23a48b898a2d9e60c51ee4a5d2196f6c62..a8a126259bc4c8e8590e5ef29793787de92f8d8c
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_HUGE_MM_H
  #define _LINUX_HUGE_MM_H
  
@@@ -221,7 -222,7 +222,7 @@@ extern struct page *huge_zero_page
  
  static inline bool is_huge_zero_page(struct page *page)
  {
 -      return ACCESS_ONCE(huge_zero_page) == page;
 +      return READ_ONCE(huge_zero_page) == page;
  }
  
  static inline bool is_huge_zero_pmd(pmd_t pmd)
index 979a2f2d529bede3a2a440e4afb7d0eed5618698,3b7675bcca645397d664ef24d6ed5449ff9f3fc4..c7b368c734af342da6a3e397c995b6859691f032
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_JUMP_LABEL_H
  #define _LINUX_JUMP_LABEL_H
  
@@@ -81,9 -82,9 +82,9 @@@
  
  extern bool static_key_initialized;
  
 -#define STATIC_KEY_CHECK_USE() WARN(!static_key_initialized,                \
 -                                  "%s used before call to jump_label_init", \
 -                                  __func__)
 +#define STATIC_KEY_CHECK_USE(key) WARN(!static_key_initialized,                     \
 +                                  "%s(): static key '%pS' used before call to jump_label_init()", \
 +                                  __func__, (key))
  
  #ifdef HAVE_JUMP_LABEL
  
@@@ -211,13 -212,13 +212,13 @@@ static __always_inline bool static_key_
  
  static inline void static_key_slow_inc(struct static_key *key)
  {
 -      STATIC_KEY_CHECK_USE();
 +      STATIC_KEY_CHECK_USE(key);
        atomic_inc(&key->enabled);
  }
  
  static inline void static_key_slow_dec(struct static_key *key)
  {
 -      STATIC_KEY_CHECK_USE();
 +      STATIC_KEY_CHECK_USE(key);
        atomic_dec(&key->enabled);
  }
  
@@@ -236,7 -237,7 +237,7 @@@ static inline int jump_label_apply_nops
  
  static inline void static_key_enable(struct static_key *key)
  {
 -      STATIC_KEY_CHECK_USE();
 +      STATIC_KEY_CHECK_USE(key);
  
        if (atomic_read(&key->enabled) != 0) {
                WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
  
  static inline void static_key_disable(struct static_key *key)
  {
 -      STATIC_KEY_CHECK_USE();
 +      STATIC_KEY_CHECK_USE(key);
  
        if (atomic_read(&key->enabled) != 1) {
                WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
index 93086df0a8470848146e9d013a6de1fdb5c94623,fc13ff289903700cb569f1bbfa5912e69b796455..baa8eabbaa56b7ef2c8367ee7a5c407b27cb18c8
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_JUMP_LABEL_RATELIMIT_H
  #define _LINUX_JUMP_LABEL_RATELIMIT_H
  
@@@ -24,18 -25,18 +25,18 @@@ struct static_key_deferred 
  };
  static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
  {
 -      STATIC_KEY_CHECK_USE();
 +      STATIC_KEY_CHECK_USE(key);
        static_key_slow_dec(&key->key);
  }
  static inline void static_key_deferred_flush(struct static_key_deferred *key)
  {
 -      STATIC_KEY_CHECK_USE();
 +      STATIC_KEY_CHECK_USE(key);
  }
  static inline void
  jump_label_rate_limit(struct static_key_deferred *key,
                unsigned long rl)
  {
 -      STATIC_KEY_CHECK_USE();
 +      STATIC_KEY_CHECK_USE(key);
  }
  #endif        /* HAVE_JUMP_LABEL */
  #endif        /* _LINUX_JUMP_LABEL_RATELIMIT_H */
diff --combined include/linux/linkage.h
index ebd61b80fed4f16390f7dd1c9dbb9c5ebc879779,2e6f90bd52aa6254a1291abe7082269bdf3003b1..f68db9e450eb3f7ed00e30855a3b183c42d41b7f
@@@ -1,7 -1,8 +1,8 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_LINKAGE_H
  #define _LINUX_LINKAGE_H
  
 -#include <linux/compiler.h>
 +#include <linux/compiler_types.h>
  #include <linux/stringify.h>
  #include <linux/export.h>
  #include <asm/linkage.h>
diff --combined include/linux/lockdep.h
index b6662d05bcdaacb8edced66fdee497fa698f69c3,f301d31b473c789b9f51038b9d9ac2dddace38f5..02720769c159db88a5e45d762d793e627a4e4089
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /*
   * Runtime locking correctness validator
   *
@@@ -527,11 -528,6 +528,11 @@@ static inline void lockdep_on(void
   */
  struct lock_class_key { };
  
 +/*
 + * The lockdep_map takes no space if lockdep is disabled:
 + */
 +struct lockdep_map { };
 +
  #define lockdep_depth(tsk)    (0)
  
  #define lockdep_is_held_type(l, r)            (1)
index 0f47a4aa7fc431c541841cc4a4e36cffe44c72f3,414a5e769fde98ba36f0bbe0b90505ffac2445ee..495ba4dd9da5bf419b140d540db6e6cefb30dfd4
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _NFNETLINK_H
  #define _NFNETLINK_H
  
@@@ -66,7 -67,7 +67,7 @@@ static inline bool lockdep_nfnl_is_held
   * @ss: The nfnetlink subsystem ID
   *
   * Return the value of the specified RCU-protected pointer, but omit
 - * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
 + * both the smp_read_barrier_depends() and the READ_ONCE(), because
   * caller holds the NFNL subsystem mutex.
   */
  #define nfnl_dereference(p, ss)                                       \
diff --combined include/linux/rculist.h
index 5ed091c064b21850c139557c8e5e2662354e3791,c2cdd45a880aa00229c0f7c5807305804cfd7823..127f534fec94aadfdc322a562b03f0860d3ccfa3
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_RCULIST_H
  #define _LINUX_RCULIST_H
  
@@@ -274,7 -275,7 +275,7 @@@ static inline void list_splice_tail_ini
   * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
   */
  #define list_entry_rcu(ptr, type, member) \
 -      container_of(lockless_dereference(ptr), type, member)
 +      container_of(READ_ONCE(ptr), type, member)
  
  /*
   * Where are list_empty_rcu() and list_first_entry_rcu()?
   * example is when items are added to the list, but never deleted.
   */
  #define list_entry_lockless(ptr, type, member) \
 -      container_of((typeof(ptr))lockless_dereference(ptr), type, member)
 +      container_of((typeof(ptr))READ_ONCE(ptr), type, member)
  
  /**
   * list_for_each_entry_lockless - iterate over rcu list of given type
index 765f7b91547559989203fdf4e63849629530aac0,ff3dd2ec44b478eb0a3c42977190588c3c138295..54bcd970bfd3c9586ac2be2d836ebe72f18261bf
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __LINUX_RTNETLINK_H
  #define __LINUX_RTNETLINK_H
  
@@@ -67,7 -68,7 +68,7 @@@ static inline bool lockdep_rtnl_is_held
   * @p: The pointer to read, prior to dereferencing
   *
   * Return the value of the specified RCU-protected pointer, but omit
 - * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because
 + * both the smp_read_barrier_depends() and the READ_ONCE(), because
   * caller holds RTNL.
   */
  #define rtnl_dereference(p)                                   \
diff --combined include/linux/rwsem.h
index 6ac8ee5f15ddf67977c5c32e9d45799ec8920f7a,dfa34d8034399c80eef05c707e36f839510dec92..56707d5ff6adddce20b7ae417d91abfd9c393744
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /* rwsem.h: R/W semaphores, public interface
   *
   * Written by David Howells ([email protected]).
@@@ -111,7 -112,6 +112,7 @@@ static inline int rwsem_is_contended(st
   * lock for reading
   */
  extern void down_read(struct rw_semaphore *sem);
 +extern int __must_check down_read_killable(struct rw_semaphore *sem);
  
  /*
   * trylock for reading -- returns 1 if successful, 0 if contention
diff --combined include/linux/spinlock.h
index 4e202b00dd66f925975ea938ad5343debd17ce77,341e1a12bfc78c99c62e21857c2f9e304800db52..a39186194cd6782ac0b6705f02af6179a86f7813
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __LINUX_SPINLOCK_H
  #define __LINUX_SPINLOCK_H
  
@@@ -165,10 -166,6 +166,10 @@@ static inline void do_raw_spin_lock(raw
        arch_spin_lock(&lock->raw_lock);
  }
  
 +#ifndef arch_spin_lock_flags
 +#define arch_spin_lock_flags(lock, flags)     arch_spin_lock(lock)
 +#endif
 +
  static inline void
  do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
  {
@@@ -282,6 -279,12 +283,6 @@@ static inline void do_raw_spin_unlock(r
        1 : ({ local_irq_restore(flags); 0; }); \
  })
  
 -/**
 - * raw_spin_can_lock - would raw_spin_trylock() succeed?
 - * @lock: the spinlock in question.
 - */
 -#define raw_spin_can_lock(lock)       (!raw_spin_is_locked(lock))
 -
  /* Include rwlock functions */
  #include <linux/rwlock.h>
  
@@@ -394,6 -397,11 +395,6 @@@ static __always_inline int spin_is_cont
        return raw_spin_is_contended(&lock->rlock);
  }
  
 -static __always_inline int spin_can_lock(spinlock_t *lock)
 -{
 -      return raw_spin_can_lock(&lock->rlock);
 -}
 -
  #define assert_spin_locked(lock)      assert_raw_spin_locked(&(lock)->rlock)
  
  /*
index c8a572cb49be1e8f237dba11afd519145b7ef65b,0eae11fc7a23fee792e13b4b16e0bf110d491cd5..1cdabfb813abe01c49375810fb19a120230a3740
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /*
   * workqueue.h --- work queue handling for Linux.
   */
@@@ -218,7 -219,7 +219,7 @@@ static inline unsigned int work_static(
                                                                        \
                __init_work((_work), _onstack);                         \
                (_work)->data = (atomic_long_t) WORK_DATA_INIT();       \
 -              lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \
 +              lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
                INIT_LIST_HEAD(&(_work)->entry);                        \
                (_work)->func = (_func);                                \
        } while (0)
@@@ -398,7 -399,7 +399,7 @@@ __alloc_workqueue_key(const char *fmt, 
        static struct lock_class_key __key;                             \
        const char *__lock_name;                                        \
                                                                        \
 -      __lock_name = #fmt#args;                                        \
 +      __lock_name = "(wq_completion)"#fmt#args;                       \
                                                                        \
        __alloc_workqueue_key((fmt), (flags), (max_active),             \
                              &__key, __lock_name, ##args);             \
diff --combined include/net/ip_vs.h
index 3fadb6f9982b39d4e55183a95f1c8efbf2fc59de,5d08c1950e7d76891ab244958b70c9d8808d92cd..ff68cf288f9bf3a3ac18d696e215980e4fee54f5
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /* IP Virtual Server
   * data structure and functionality definitions
   */
@@@ -983,12 -984,12 +984,12 @@@ static inline int sysctl_sync_threshold
  
  static inline int sysctl_sync_period(struct netns_ipvs *ipvs)
  {
 -      return ACCESS_ONCE(ipvs->sysctl_sync_threshold[1]);
 +      return READ_ONCE(ipvs->sysctl_sync_threshold[1]);
  }
  
  static inline unsigned int sysctl_sync_refresh_period(struct netns_ipvs *ipvs)
  {
 -      return ACCESS_ONCE(ipvs->sysctl_sync_refresh_period);
 +      return READ_ONCE(ipvs->sysctl_sync_refresh_period);
  }
  
  static inline int sysctl_sync_retries(struct netns_ipvs *ipvs)
@@@ -1013,7 -1014,7 +1014,7 @@@ static inline int sysctl_sloppy_sctp(st
  
  static inline int sysctl_sync_ports(struct netns_ipvs *ipvs)
  {
 -      return ACCESS_ONCE(ipvs->sysctl_sync_ports);
 +      return READ_ONCE(ipvs->sysctl_sync_ports);
  }
  
  static inline int sysctl_sync_persist_mode(struct netns_ipvs *ipvs)
index 5c68e279eaea3214fcfc9719fca9baf580fed23e,079c69cae2f6d723d306dfbde7fa21f71a131ace..470c1c71e7f4443e296f031e92d4743385a4610e
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _NET_NF_TABLES_H
  #define _NET_NF_TABLES_H
  
@@@ -1164,8 -1165,8 +1165,8 @@@ static inline u8 nft_genmask_next(cons
  
  static inline u8 nft_genmask_cur(const struct net *net)
  {
 -      /* Use ACCESS_ONCE() to prevent refetching the value for atomicity */
 -      return 1 << ACCESS_ONCE(net->nft.gencursor);
 +      /* Use READ_ONCE() to prevent refetching the value for atomicity */
 +      return 1 << READ_ONCE(net->nft.gencursor);
  }
  
  #define NFT_GENMASK_ANY               ((1 << 0) | (1 << 1))
index d1f7cb732dfcf1b777393424f6d0471b7ffe8760,f65b92e0e1f914fb84b7a4e67a47e5125afb80cc..ee8220f8dcf5f5e16662580cc5c35ac66c322048
@@@ -1,4 -1,5 +1,5 @@@
 -#include <linux/compiler.h>
+ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
 +#include <linux/compiler_types.h>
  
  #ifndef __always_inline
  #define __always_inline inline
diff --combined kernel/acct.c
index 21eedd0dd81a12e43c541ec93f86b740b1f710c1,6670fbd3e466d3003ae3c20d5f321b6e0abb2069..d15c0ee4d95504a88337498045a84f53a6ae0d15
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *  linux/kernel/acct.c
   *
@@@ -146,7 -147,7 +147,7 @@@ static struct bsd_acct_struct *acct_get
  again:
        smp_rmb();
        rcu_read_lock();
 -      res = to_acct(ACCESS_ONCE(ns->bacct));
 +      res = to_acct(READ_ONCE(ns->bacct));
        if (!res) {
                rcu_read_unlock();
                return NULL;
        }
        rcu_read_unlock();
        mutex_lock(&res->lock);
 -      if (res != to_acct(ACCESS_ONCE(ns->bacct))) {
 +      if (res != to_acct(READ_ONCE(ns->bacct))) {
                mutex_unlock(&res->lock);
                acct_put(res);
                goto again;
diff --combined kernel/events/core.c
index 8fd2f2d1358a852e66735ebd34cdf2e3aba84095,10cdb9c26b5d12d41ddacda43e335a9d635c66a7..b315aebbcc3fa2fd197ce875a0e8fccb1b58960e
@@@ -901,9 -901,11 +901,11 @@@ list_update_cgroup_event(struct perf_ev
        cpuctx_entry = &cpuctx->cgrp_cpuctx_entry;
        /* cpuctx->cgrp is NULL unless a cgroup event is active in this CPU .*/
        if (add) {
+               struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
                list_add(cpuctx_entry, this_cpu_ptr(&cgrp_cpuctx_list));
-               if (perf_cgroup_from_task(current, ctx) == event->cgrp)
-                       cpuctx->cgrp = event->cgrp;
+               if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
+                       cpuctx->cgrp = cgrp;
        } else {
                list_del(cpuctx_entry);
                cpuctx->cgrp = NULL;
@@@ -1200,7 -1202,7 +1202,7 @@@ perf_event_ctx_lock_nested(struct perf_
  
  again:
        rcu_read_lock();
 -      ctx = ACCESS_ONCE(event->ctx);
 +      ctx = READ_ONCE(event->ctx);
        if (!atomic_inc_not_zero(&ctx->refcount)) {
                rcu_read_unlock();
                goto again;
@@@ -4231,7 -4233,7 +4233,7 @@@ static void perf_remove_from_owner(stru
         * indeed free this event, otherwise we need to serialize on
         * owner->perf_event_mutex.
         */
 -      owner = lockless_dereference(event->owner);
 +      owner = READ_ONCE(event->owner);
        if (owner) {
                /*
                 * Since delayed_put_task_struct() also drops the last
@@@ -4328,7 -4330,7 +4330,7 @@@ again
                 * Cannot change, child events are not migrated, see the
                 * comment with perf_event_ctx_lock_nested().
                 */
 -              ctx = lockless_dereference(child->ctx);
 +              ctx = READ_ONCE(child->ctx);
                /*
                 * Since child_mutex nests inside ctx::mutex, we must jump
                 * through hoops. We start by grabbing a reference on the ctx.
@@@ -5302,8 -5304,8 +5304,8 @@@ static int perf_mmap(struct file *file
                if (!rb)
                        goto aux_unlock;
  
 -              aux_offset = ACCESS_ONCE(rb->user_page->aux_offset);
 -              aux_size = ACCESS_ONCE(rb->user_page->aux_size);
 +              aux_offset = READ_ONCE(rb->user_page->aux_offset);
 +              aux_size = READ_ONCE(rb->user_page->aux_size);
  
                if (aux_offset < perf_data_size(rb) + PAGE_SIZE)
                        goto aux_unlock;
diff --combined kernel/locking/rwsem.c
index e53f7746d9fd0833cd19a01deba2ace13147cd98,a6c76a4832b40d60dacafaa33490c3a30eb77c70..f549c552dbf1e376a412efc1cb7a95a7029f2f7a
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /* kernel/rwsem.c: R/W semaphores, public implementation
   *
   * Written by David Howells ([email protected]).
@@@ -28,22 -29,6 +29,22 @@@ void __sched down_read(struct rw_semaph
  
  EXPORT_SYMBOL(down_read);
  
 +int __sched down_read_killable(struct rw_semaphore *sem)
 +{
 +      might_sleep();
 +      rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
 +
 +      if (LOCK_CONTENDED_RETURN(sem, __down_read_trylock, __down_read_killable)) {
 +              rwsem_release(&sem->dep_map, 1, _RET_IP_);
 +              return -EINTR;
 +      }
 +
 +      rwsem_set_reader_owned(sem);
 +      return 0;
 +}
 +
 +EXPORT_SYMBOL(down_read_killable);
 +
  /*
   * trylock for reading -- returns 1 if successful, 0 if contention
   */
index 8fd48b5552a79141fce9212fb0a40de15d984313,6e40fdfba326b9e415dbea410eb7cb2166d51e45..b96343374a87297912f2423bc94f95c1011486a8
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * Copyright (2004) Linus Torvalds
   *
@@@ -32,6 -33,8 +33,6 @@@
   * include/linux/spinlock_api_smp.h
   */
  #else
 -#define raw_read_can_lock(l)  read_can_lock(l)
 -#define raw_write_can_lock(l) write_can_lock(l)
  
  /*
   * Some architectures can relax in favour of the CPU owning the lock.
@@@ -66,7 -69,7 +67,7 @@@ void __lockfunc __raw_##op##_lock(lockt
                                                                        \
                if (!(lock)->break_lock)                                \
                        (lock)->break_lock = 1;                         \
 -              while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
 +              while ((lock)->break_lock)                              \
                        arch_##op##_relax(&lock->raw_lock);             \
        }                                                               \
        (lock)->break_lock = 0;                                         \
@@@ -86,7 -89,7 +87,7 @@@ unsigned long __lockfunc __raw_##op##_l
                                                                        \
                if (!(lock)->break_lock)                                \
                        (lock)->break_lock = 1;                         \
 -              while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
 +              while ((lock)->break_lock)                              \
                        arch_##op##_relax(&lock->raw_lock);             \
        }                                                               \
        (lock)->break_lock = 0;                                         \
diff --combined kernel/seccomp.c
index 8ac79355915b3d544d3ef29ff96dd6b258d597ea,418a1c045933dc57e97de46efe1c71dcc5c879ca..5f0dfb2abb8d39542d72003f860e25eaff66c8f0
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * linux/kernel/seccomp.c
   *
@@@ -189,7 -190,7 +190,7 @@@ static u32 seccomp_run_filters(const st
        u32 ret = SECCOMP_RET_ALLOW;
        /* Make sure cross-thread synced filter points somewhere sane. */
        struct seccomp_filter *f =
 -                      lockless_dereference(current->seccomp.filter);
 +                      READ_ONCE(current->seccomp.filter);
  
        /* Ensure unexpected behavior doesn't result in failing open. */
        if (unlikely(WARN_ON(f == NULL)))
diff --combined kernel/task_work.c
index 9a9f262fc53d097b391fc53c2f1abd46817d8fe8,5718b3ea202a3f5dd7186e6454f18fb6be392644..0fef395662a6ea6f38301e92f74d14f60b3f145d
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  #include <linux/spinlock.h>
  #include <linux/task_work.h>
  #include <linux/tracehook.h>
@@@ -67,7 -68,7 +68,7 @@@ task_work_cancel(struct task_struct *ta
         * we raced with task_work_run(), *pprev == NULL/exited.
         */
        raw_spin_lock_irqsave(&task->pi_lock, flags);
 -      while ((work = lockless_dereference(*pprev))) {
 +      while ((work = READ_ONCE(*pprev))) {
                if (work->func != func)
                        pprev = &work->next;
                else if (cmpxchg(pprev, work, work->next) == work)
diff --combined kernel/trace/trace.h
index 9050c8b3ccdea9693cdf727b329440aabe6476f9,401b0639116f216c78b3dad0ad663c9e74858d52..6b0b343a36a278be32a89e91e95066e84020c620
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  
  #ifndef _LINUX_KERNEL_TRACE_H
  #define _LINUX_KERNEL_TRACE_H
@@@ -1459,7 -1460,7 +1460,7 @@@ extern struct trace_event_file *find_ev
  
  static inline void *event_file_data(struct file *filp)
  {
 -      return ACCESS_ONCE(file_inode(filp)->i_private);
 +      return READ_ONCE(file_inode(filp)->i_private);
  }
  
  extern struct mutex event_mutex;
index 780262210c9aba54035329fd8be357adcbf2c987,719a52a4064a0f6472ab7e662d00c47b6dd5993d..734accc02418930280a5248013fa6fbd4a5868e2
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * Copyright (C) 2008 Steven Rostedt <[email protected]>
   *
@@@ -77,7 -78,7 +78,7 @@@ check_stack(unsigned long ip, unsigned 
  {
        unsigned long this_size, flags; unsigned long *p, *top, *start;
        static int tracer_frame;
 -      int frame_size = ACCESS_ONCE(tracer_frame);
 +      int frame_size = READ_ONCE(tracer_frame);
        int i, x;
  
        this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
diff --combined kernel/workqueue.c
index 160fdc6e839a05421a4517ce55e674e22f6c033a,a2dccfe1acec34bbda97a292344b997055e56d9a..1070b21ba4aa764cde49810130fa8084c690e61e
@@@ -68,6 -68,7 +68,7 @@@ enum 
         * attach_mutex to avoid changing binding state while
         * worker_attach_to_pool() is in progress.
         */
+       POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
  
        /* worker flags */
@@@ -165,7 -166,6 +166,6 @@@ struct worker_pool 
                                                /* L: hash of busy workers */
  
        /* see manage_workers() for details on the two manager mutexes */
-       struct mutex            manager_arb;    /* manager arbitration */
        struct worker           *manager;       /* L: purely informational */
        struct mutex            attach_mutex;   /* attach/detach exclusion */
        struct list_head        workers;        /* A: attached workers */
@@@ -299,6 -299,7 +299,7 @@@ static struct workqueue_attrs *wq_updat
  
  static DEFINE_MUTEX(wq_pool_mutex);   /* protects pools and workqueues list */
  static DEFINE_SPINLOCK(wq_mayday_lock);       /* protects wq->maydays list */
+ static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
  
  static LIST_HEAD(workqueues);         /* PR: list of all workqueues */
  static bool workqueue_freezing;               /* PL: have wqs started freezing? */
@@@ -801,7 -802,7 +802,7 @@@ static bool need_to_create_worker(struc
  /* Do we have too many workers and should some go away? */
  static bool too_many_workers(struct worker_pool *pool)
  {
-       bool managing = mutex_is_locked(&pool->manager_arb);
+       bool managing = pool->flags & POOL_MANAGER_ACTIVE;
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
  
@@@ -1980,24 -1981,17 +1981,17 @@@ static bool manage_workers(struct worke
  {
        struct worker_pool *pool = worker->pool;
  
-       /*
-        * Anyone who successfully grabs manager_arb wins the arbitration
-        * and becomes the manager.  mutex_trylock() on pool->manager_arb
-        * failure while holding pool->lock reliably indicates that someone
-        * else is managing the pool and the worker which failed trylock
-        * can proceed to executing work items.  This means that anyone
-        * grabbing manager_arb is responsible for actually performing
-        * manager duties.  If manager_arb is grabbed and released without
-        * actual management, the pool may stall indefinitely.
-        */
-       if (!mutex_trylock(&pool->manager_arb))
+       if (pool->flags & POOL_MANAGER_ACTIVE)
                return false;
+       pool->flags |= POOL_MANAGER_ACTIVE;
        pool->manager = worker;
  
        maybe_create_worker(pool);
  
        pool->manager = NULL;
-       mutex_unlock(&pool->manager_arb);
+       pool->flags &= ~POOL_MANAGER_ACTIVE;
+       wake_up(&wq_manager_wait);
        return true;
  }
  
@@@ -2497,8 -2491,15 +2491,8 @@@ static void insert_wq_barrier(struct po
        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
  
 -      /*
 -       * Explicitly init the crosslock for wq_barrier::done, make its lock
 -       * key a subkey of the corresponding work. As a result we won't
 -       * build a dependency between wq_barrier::done and unrelated work.
 -       */
 -      lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
 -                                 "(complete)wq_barr::done",
 -                                 target->lockdep_map.key, 1);
 -      __init_completion(&barr->done);
 +      init_completion_map(&barr->done, &target->lockdep_map);
 +
        barr->task = current;
  
        /*
@@@ -2604,13 -2605,16 +2598,13 @@@ void flush_workqueue(struct workqueue_s
        struct wq_flusher this_flusher = {
                .list = LIST_HEAD_INIT(this_flusher.list),
                .flush_color = -1,
 -              .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
 +              .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
        };
        int next_color;
  
        if (WARN_ON(!wq_online))
                return;
  
 -      lock_map_acquire(&wq->lockdep_map);
 -      lock_map_release(&wq->lockdep_map);
 -
        mutex_lock(&wq->mutex);
  
        /*
@@@ -2873,6 -2877,9 +2867,6 @@@ bool flush_work(struct work_struct *wor
        if (WARN_ON(!wq_online))
                return false;
  
 -      lock_map_acquire(&work->lockdep_map);
 -      lock_map_release(&work->lockdep_map);
 -
        if (start_flush_work(work, &barr)) {
                wait_for_completion(&barr.done);
                destroy_work_on_stack(&barr.work);
@@@ -3235,7 -3242,6 +3229,6 @@@ static int init_worker_pool(struct work
        setup_timer(&pool->mayday_timer, pool_mayday_timeout,
                    (unsigned long)pool);
  
-       mutex_init(&pool->manager_arb);
        mutex_init(&pool->attach_mutex);
        INIT_LIST_HEAD(&pool->workers);
  
@@@ -3305,13 -3311,15 +3298,15 @@@ static void put_unbound_pool(struct wor
        hash_del(&pool->hash_node);
  
        /*
-        * Become the manager and destroy all workers.  Grabbing
-        * manager_arb prevents @pool's workers from blocking on
-        * attach_mutex.
+        * Become the manager and destroy all workers.  This prevents
+        * @pool's workers from blocking on attach_mutex.  We're the last
+        * manager and @pool gets freed with the flag set.
         */
-       mutex_lock(&pool->manager_arb);
        spin_lock_irq(&pool->lock);
+       wait_event_lock_irq(wq_manager_wait,
+                           !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
+       pool->flags |= POOL_MANAGER_ACTIVE;
        while ((worker = first_idle_worker(pool)))
                destroy_worker(worker);
        WARN_ON(pool->nr_workers || pool->nr_idle);
        if (pool->detach_completion)
                wait_for_completion(pool->detach_completion);
  
-       mutex_unlock(&pool->manager_arb);
        /* shut down the timers */
        del_timer_sync(&pool->idle_timer);
        del_timer_sync(&pool->mayday_timer);
@@@ -4634,7 -4640,7 +4627,7 @@@ static void rebind_workers(struct worke
                 * concurrency management.  Note that when or whether
                 * @worker clears REBOUND doesn't affect correctness.
                 *
 -               * ACCESS_ONCE() is necessary because @worker->flags may be
 +               * WRITE_ONCE() is necessary because @worker->flags may be
                 * tested without holding any lock in
                 * wq_worker_waking_up().  Without it, NOT_RUNNING test may
                 * fail incorrectly leading to premature concurrency
                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
                worker_flags |= WORKER_REBOUND;
                worker_flags &= ~WORKER_UNBOUND;
 -              ACCESS_ONCE(worker->flags) = worker_flags;
 +              WRITE_ONCE(worker->flags, worker_flags);
        }
  
        spin_unlock_irq(&pool->lock);
diff --combined lib/assoc_array.c
index fe7953aead82f57039de99047fbd00d54282e6a0,4e53be8bc590dc2030a930aec5a2cac8c4fa6a30..b77d51da8c73def55d1aea5ca6c34161652981a0
@@@ -39,7 -39,7 +39,7 @@@ begin_node
                /* Descend through a shortcut */
                shortcut = assoc_array_ptr_to_shortcut(cursor);
                smp_read_barrier_depends();
 -              cursor = ACCESS_ONCE(shortcut->next_node);
 +              cursor = READ_ONCE(shortcut->next_node);
        }
  
        node = assoc_array_ptr_to_node(cursor);
@@@ -55,7 -55,7 +55,7 @@@
         */
        has_meta = 0;
        for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
 -              ptr = ACCESS_ONCE(node->slots[slot]);
 +              ptr = READ_ONCE(node->slots[slot]);
                has_meta |= (unsigned long)ptr;
                if (ptr && assoc_array_ptr_is_leaf(ptr)) {
                        /* We need a barrier between the read of the pointer
@@@ -89,7 -89,7 +89,7 @@@ continue_node
        smp_read_barrier_depends();
  
        for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
 -              ptr = ACCESS_ONCE(node->slots[slot]);
 +              ptr = READ_ONCE(node->slots[slot]);
                if (assoc_array_ptr_is_meta(ptr)) {
                        cursor = ptr;
                        goto begin_node;
@@@ -98,7 -98,7 +98,7 @@@
  
  finished_node:
        /* Move up to the parent (may need to skip back over a shortcut) */
 -      parent = ACCESS_ONCE(node->back_pointer);
 +      parent = READ_ONCE(node->back_pointer);
        slot = node->parent_slot;
        if (parent == stop)
                return 0;
                shortcut = assoc_array_ptr_to_shortcut(parent);
                smp_read_barrier_depends();
                cursor = parent;
 -              parent = ACCESS_ONCE(shortcut->back_pointer);
 +              parent = READ_ONCE(shortcut->back_pointer);
                slot = shortcut->parent_slot;
                if (parent == stop)
                        return 0;
@@@ -147,7 -147,7 +147,7 @@@ int assoc_array_iterate(const struct as
                                        void *iterator_data),
                        void *iterator_data)
  {
 -      struct assoc_array_ptr *root = ACCESS_ONCE(array->root);
 +      struct assoc_array_ptr *root = READ_ONCE(array->root);
  
        if (!root)
                return 0;
@@@ -194,7 -194,7 +194,7 @@@ assoc_array_walk(const struct assoc_arr
  
        pr_devel("-->%s()\n", __func__);
  
 -      cursor = ACCESS_ONCE(array->root);
 +      cursor = READ_ONCE(array->root);
        if (!cursor)
                return assoc_array_walk_tree_empty;
  
@@@ -220,7 -220,7 +220,7 @@@ consider_node
  
        slot = segments >> (level & ASSOC_ARRAY_KEY_CHUNK_MASK);
        slot &= ASSOC_ARRAY_FAN_MASK;
 -      ptr = ACCESS_ONCE(node->slots[slot]);
 +      ptr = READ_ONCE(node->slots[slot]);
  
        pr_devel("consider slot %x [ix=%d type=%lu]\n",
                 slot, level, (unsigned long)ptr & 3);
@@@ -294,7 -294,7 +294,7 @@@ follow_shortcut
        } while (sc_level < shortcut->skip_to_level);
  
        /* The shortcut matches the leaf's index to this point. */
 -      cursor = ACCESS_ONCE(shortcut->next_node);
 +      cursor = READ_ONCE(shortcut->next_node);
        if (((level ^ sc_level) & ~ASSOC_ARRAY_KEY_CHUNK_MASK) != 0) {
                level = sc_level;
                goto jumped;
@@@ -337,7 -337,7 +337,7 @@@ void *assoc_array_find(const struct ass
         * the terminal node.
         */
        for (slot = 0; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
 -              ptr = ACCESS_ONCE(node->slots[slot]);
 +              ptr = READ_ONCE(node->slots[slot]);
                if (ptr && assoc_array_ptr_is_leaf(ptr)) {
                        /* We need a barrier between the read of the pointer
                         * and dereferencing the pointer - but only if we are
@@@ -598,21 -598,31 +598,31 @@@ static bool assoc_array_insert_into_ter
                if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0)
                        goto all_leaves_cluster_together;
  
-               /* Otherwise we can just insert a new node ahead of the old
-                * one.
+               /* Otherwise all the old leaves cluster in the same slot, but
+                * the new leaf wants to go into a different slot - so we
+                * create a new node (n0) to hold the new leaf and a pointer to
+                * a new node (n1) holding all the old leaves.
+                *
+                * This can be done by falling through to the node splitting
+                * path.
                 */
-               goto present_leaves_cluster_but_not_new_leaf;
+               pr_devel("present leaves cluster but not new leaf\n");
        }
  
  split_node:
        pr_devel("split node\n");
  
-       /* We need to split the current node; we know that the node doesn't
-        * simply contain a full set of leaves that cluster together (it
-        * contains meta pointers and/or non-clustering leaves).
+       /* We need to split the current node.  The node must contain anything
+        * from a single leaf (in the one leaf case, this leaf will cluster
+        * with the new leaf) and the rest meta-pointers, to all leaves, some
+        * of which may cluster.
+        *
+        * It won't contain the case in which all the current leaves plus the
+        * new leaves want to cluster in the same slot.
         *
         * We need to expel at least two leaves out of a set consisting of the
-        * leaves in the node and the new leaf.
+        * leaves in the node and the new leaf.  The current meta pointers can
+        * just be copied as they shouldn't cluster with any of the leaves.
         *
         * We need a new node (n0) to replace the current one and a new node to
         * take the expelled nodes (n1).
@@@ -717,33 -727,6 +727,6 @@@ found_slot_for_multiple_occupancy
        pr_devel("<--%s() = ok [split node]\n", __func__);
        return true;
  
- present_leaves_cluster_but_not_new_leaf:
-       /* All the old leaves cluster in the same slot, but the new leaf wants
-        * to go into a different slot, so we create a new node to hold the new
-        * leaf and a pointer to a new node holding all the old leaves.
-        */
-       pr_devel("present leaves cluster but not new leaf\n");
-       new_n0->back_pointer = node->back_pointer;
-       new_n0->parent_slot = node->parent_slot;
-       new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch;
-       new_n1->back_pointer = assoc_array_node_to_ptr(new_n0);
-       new_n1->parent_slot = edit->segment_cache[0];
-       new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch;
-       edit->adjust_count_on = new_n0;
-       for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++)
-               new_n1->slots[i] = node->slots[i];
-       new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0);
-       edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]];
-       edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot];
-       edit->set[0].to = assoc_array_node_to_ptr(new_n0);
-       edit->excised_meta[0] = assoc_array_node_to_ptr(node);
-       pr_devel("<--%s() = ok [insert node before]\n", __func__);
-       return true;
  all_leaves_cluster_together:
        /* All the leaves, new and old, want to cluster together in this node
         * in the same slot, so we have to replace this node with a shortcut to
index 81770a55cb16ebfdba0003479fbdd3eb4a78f46e,6a406fafb5d611771fd6db769efd584719a0ebc6..da4672a50a54a2046bb86479c57dc11552a1981c
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * Dynamic byte queue limits.  See include/linux/dynamic_queue_limits.h
   *
@@@ -20,7 -21,7 +21,7 @@@ void dql_completed(struct dql *dql, uns
        unsigned int ovlimit, completed, num_queued;
        bool all_prev_completed;
  
 -      num_queued = ACCESS_ONCE(dql->num_queued);
 +      num_queued = READ_ONCE(dql->num_queued);
  
        /* Can't complete more than what's in queue */
        BUG_ON(count > num_queued - dql->num_completed);
diff --combined mm/huge_memory.c
index c3bf907a03ee1242f800c4f86e184122e93bfe85,1981ed697dabb530b56c9e6b84d9f569428b101c..b521ed1170f967d37adac445eff57a394c7f0e4a
@@@ -941,6 -941,9 +941,9 @@@ int copy_huge_pmd(struct mm_struct *dst
                                pmd = pmd_swp_mksoft_dirty(pmd);
                        set_pmd_at(src_mm, addr, src_pmd, pmd);
                }
+               add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+               atomic_long_inc(&dst_mm->nr_ptes);
+               pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
                set_pmd_at(dst_mm, addr, dst_pmd, pmd);
                ret = 0;
                goto out_unlock;
@@@ -2715,7 -2718,7 +2718,7 @@@ static unsigned long deferred_split_cou
                struct shrink_control *sc)
  {
        struct pglist_data *pgdata = NODE_DATA(sc->nid);
 -      return ACCESS_ONCE(pgdata->split_queue_len);
 +      return READ_ONCE(pgdata->split_queue_len);
  }
  
  static unsigned long deferred_split_scan(struct shrinker *shrink,
diff --combined mm/slab.h
index 8894f811a89dedf1df25852592bf97340d59c9f5,028cdc7df67ec9e08a683ba27e64b17b19e7612e..86d7c7d860f92c3a46d505a4b327c76e34f5415a
+++ b/mm/slab.h
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef MM_SLAB_H
  #define MM_SLAB_H
  /*
@@@ -258,7 -259,7 +259,7 @@@ cache_from_memcg_idx(struct kmem_cache 
         * memcg_caches issues a write barrier to match this (see
         * memcg_create_kmem_cache()).
         */
 -      cachep = lockless_dereference(arr->entries[idx]);
 +      cachep = READ_ONCE(arr->entries[idx]);
        rcu_read_unlock();
  
        return cachep;
diff --combined net/ipv4/tcp_input.c
index 74480e039005064fe135bacafacc0c1019b85b09,5a87a00641d3a82bfb78d4f3a2959fe8ea2e119c..9a0b3c5ffa46fcaf7176393907bfe3dbc2c3752e
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * INET               An implementation of the TCP/IP protocol suite for the LINUX
   *            operating system.  INET is implemented using the  BSD Socket
@@@ -815,12 -816,12 +816,12 @@@ static void tcp_update_pacing_rate(stru
        if (likely(tp->srtt_us))
                do_div(rate, tp->srtt_us);
  
 -      /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
 +      /* WRITE_ONCE() is needed because sch_fq fetches sk_pacing_rate
         * without any lock. We want to make sure compiler wont store
         * intermediate values in this location.
         */
 -      ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
 -                                              sk->sk_max_pacing_rate);
 +      WRITE_ONCE(sk->sk_pacing_rate, min_t(u64, rate,
 +                                           sk->sk_max_pacing_rate));
  }
  
  /* Calculate rto without backoff.  This is the second half of Van Jacobson's
diff --combined net/ipv4/tcp_output.c
index 48531da1aba6275bbdb6008fd1ca1c244ca576e7,478909f4694d00076c96b7a3be1eda62b6be8bef..5a42e873d44a8f880d8999e911d64d6f388865fb
@@@ -739,8 -739,10 +739,10 @@@ static void tcp_tsq_handler(struct soc
                struct tcp_sock *tp = tcp_sk(sk);
  
                if (tp->lost_out > tp->retrans_out &&
-                   tp->snd_cwnd > tcp_packets_in_flight(tp))
+                   tp->snd_cwnd > tcp_packets_in_flight(tp)) {
+                       tcp_mstamp_refresh(tp);
                        tcp_xmit_retransmit_queue(sk);
+               }
  
                tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
                               0, GFP_ATOMIC);
@@@ -1908,7 -1910,7 +1910,7 @@@ static bool tcp_tso_should_defer(struc
        if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
                goto send_now;
  
 -      win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
 +      win_divisor = READ_ONCE(sysctl_tcp_tso_win_divisor);
        if (win_divisor) {
                u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
  
@@@ -2060,6 -2062,7 +2062,7 @@@ static int tcp_mtu_probe(struct sock *s
        nskb->ip_summed = skb->ip_summed;
  
        tcp_insert_write_queue_before(nskb, skb, sk);
+       tcp_highest_sack_replace(sk, skb, nskb);
  
        len = 0;
        tcp_for_write_queue_from_safe(skb, next, sk) {
@@@ -2237,6 -2240,7 +2240,7 @@@ static bool tcp_write_xmit(struct sock 
  
        sent_pkts = 0;
  
+       tcp_mstamp_refresh(tp);
        if (!push_one) {
                /* Do MTU probing. */
                result = tcp_mtu_probe(sk);
        }
  
        max_segs = tcp_tso_segs(sk, mss_now);
-       tcp_mstamp_refresh(tp);
        while ((skb = tcp_send_head(sk))) {
                unsigned int limit;
  
@@@ -2663,7 -2666,7 +2666,7 @@@ static bool tcp_collapse_retrans(struc
                else if (!skb_shift(skb, next_skb, next_skb_size))
                        return false;
        }
-       tcp_highest_sack_combine(sk, next_skb, skb);
+       tcp_highest_sack_replace(sk, next_skb, skb);
  
        tcp_unlink_write_queue(next_skb, sk);
  
@@@ -2841,8 -2844,10 +2844,10 @@@ int __tcp_retransmit_skb(struct sock *s
                nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
                err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
                             -ENOBUFS;
-               if (!err)
+               if (!err) {
                        skb->skb_mstamp = tp->tcp_mstamp;
+                       tcp_rate_skb_sent(sk, skb);
+               }
        } else {
                err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
        }
@@@ -3175,13 -3180,8 +3180,8 @@@ struct sk_buff *tcp_make_synack(const s
        th->source = htons(ireq->ir_num);
        th->dest = ireq->ir_rmt_port;
        skb->mark = ireq->ir_mark;
-       /* Setting of flags are superfluous here for callers (and ECE is
-        * not even correctly set)
-        */
-       tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
-                            TCPHDR_SYN | TCPHDR_ACK);
-       th->seq = htonl(TCP_SKB_CB(skb)->seq);
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       th->seq = htonl(tcp_rsk(req)->snt_isn);
        /* XXX data is queued and acked as is. No buffer/window check */
        th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
  
index 1cfffd42d1e21b19f093897a5c25cbc5fdc07052,13f7408755073b6f43e402975b8450e70ff32873..9ee71cb276d734795e16719347fbf2740ce0cfc4
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * IPVS         An implementation of the IP virtual server support for the
   *              LINUX operating system.  IPVS is now implemented as a module
@@@ -457,7 -458,7 +458,7 @@@ static inline bool in_persistence(struc
  static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs,
                                  struct ip_vs_conn *cp, int pkts)
  {
 -      unsigned long orig = ACCESS_ONCE(cp->sync_endtime);
 +      unsigned long orig = READ_ONCE(cp->sync_endtime);
        unsigned long now = jiffies;
        unsigned long n = (now + cp->timeout) & ~3UL;
        unsigned int sync_refresh_period;
index 63b8cc26456adff137bedf4d45859567a4182ba1,4d1ea96e8794c19c99bae029f57377c076ea1f46..a18bca7209957302ebcbb4977f400b375b2ef274
@@@ -1,4 -1,5 +1,5 @@@
  #!/bin/sh
+ # SPDX-License-Identifier: GPL-2.0
  
  if [ $# -lt 2 ]
  then
@@@ -33,7 -34,7 +34,7 @@@ d
        sed -r \
                -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \
                -e 's/__attribute_const__([ \t]|$)/\1/g' \
 -              -e 's@^#include <linux/compiler.h>@@' \
 +              -e 's@^#include <linux/compiler(|_types).h>@@' \
                -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
                -e 's/(^|[ \t(])(inline|asm|volatile)([ \t(]|$)/\1__\2__\3/g' \
                -e 's@#(ifndef|define|endif[ \t]*/[*])[ \t]*_UAPI@#\1 @' \
index f9abd8b07ce6117518dde7ce9d50450a1f8d3954,a608dae8334870ac2d83aae5ef5808a278a87bc9..e45de3eecfe39cb9b3b2f928f9a50307b05a3210
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef SOUND_FIREWIRE_AMDTP_H_INCLUDED
  #define SOUND_FIREWIRE_AMDTP_H_INCLUDED
  
@@@ -220,7 -221,7 +221,7 @@@ static inline bool amdtp_stream_pcm_run
  static inline void amdtp_stream_pcm_trigger(struct amdtp_stream *s,
                                            struct snd_pcm_substream *pcm)
  {
 -      ACCESS_ONCE(s->pcm) = pcm;
 +      WRITE_ONCE(s->pcm, pcm);
  }
  
  static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
index 96e2d06cb031005595864e5212e7b31b672b5f39,7d8c3261a50d0d0d9ddc9c9c8cb26a632e524c3a..1f5e26aae9fc5fa898d496e3b0a1651627c61e0b
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _TOOLS_LINUX_ASM_X86_ATOMIC_H
  #define _TOOLS_LINUX_ASM_X86_ATOMIC_H
  
@@@ -24,7 -25,7 +25,7 @@@
   */
  static inline int atomic_read(const atomic_t *v)
  {
 -      return ACCESS_ONCE((v)->counter);
 +      return READ_ONCE((v)->counter);
  }
  
  /**
index 97427e700e3b359f9218b4bba80a9e6934b83eb3,40b231fb95bd2848c11dba7f8d3a8e3632d9c284..4c1966f7c77a746c898838bc2556675fca21be44
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __TOOLS_ASM_GENERIC_ATOMIC_H
  #define __TOOLS_ASM_GENERIC_ATOMIC_H
  
@@@ -21,7 -22,7 +22,7 @@@
   */
  static inline int atomic_read(const atomic_t *v)
  {
 -      return ACCESS_ONCE((v)->counter);
 +      return READ_ONCE((v)->counter);
  }
  
  /**
index aae9645c7122f0fabf46f5629d8e1bf9e755c9d5,41caa098ed15534f7b8100025e9eac533ad24c0e..3f63ee12471d0ce27e8b6d1605ac1c76fa313df3
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef __PERF_SESSION_H
  #define __PERF_SESSION_H
  
@@@ -113,7 -114,7 +114,7 @@@ int __perf_session__set_tracepoints_han
  
  extern volatile int session_done;
  
 -#define session_done()        ACCESS_ONCE(session_done)
 +#define session_done()        READ_ONCE(session_done)
  
  int perf_session__deliver_synth_event(struct perf_session *session,
                                      union perf_event *event,
index cc27b9ebcf2004191ef53a640513f3ea01f5711c,be3fdd351937c254fd8fe8f8cd6f16ea8b09e039..3f95a768a03b45c4fc32393297a4ed6af88145d6
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef BARRIERS_H
  #define BARRIERS_H
  
@@@ -34,7 -35,8 +35,7 @@@
  #define rs_smp_mb() do {} while (0)
  #endif
  
 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *) &(x))
 -#define READ_ONCE(x) ACCESS_ONCE(x)
 -#define WRITE_ONCE(x, val) (ACCESS_ONCE(x) = (val))
 +#define READ_ONCE(x) (*(volatile typeof(x) *) &(x))
 +#define WRITE_ONCE(x) ((*(volatile typeof(x) *) &(x)) = (val))
  
  #endif
This page took 0.599793 seconds and 4 git commands to generate.