]> Git Repo - linux.git/commitdiff
Merge branch 'linus' into x86/apic, to resolve conflicts
authorIngo Molnar <[email protected]>
Tue, 7 Nov 2017 09:51:10 +0000 (10:51 +0100)
committerIngo Molnar <[email protected]>
Tue, 7 Nov 2017 09:51:10 +0000 (10:51 +0100)
Conflicts:
arch/x86/include/asm/x2apic.h

Signed-off-by: Ingo Molnar <[email protected]>
31 files changed:
1  2 
arch/x86/Kconfig
arch/x86/include/asm/desc.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/irq.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/irqdomain.h
arch/x86/include/asm/trace/irq_vectors.h
arch/x86/include/asm/x86_init.h
arch/x86/kernel/apic/Makefile
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/apic_noop.c
arch/x86/kernel/apic/bigsmp_32.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/x2apic_cluster.c
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/i8259.c
arch/x86/kernel/irqinit.c
arch/x86/kernel/time.c
arch/x86/xen/apic.c
arch/x86/xen/enlighten_pv.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/irqchip/irq-gic-v3-its.c
include/linux/irq.h
include/linux/irqdesc.h
include/linux/irqdomain.h
include/linux/msi.h
kernel/irq/Makefile
kernel/irq/autoprobe.c
kernel/irq/internals.h

diff --combined arch/x86/Kconfig
index ea4bedaba4b8d91fb6580dd459586a90ec5457a5,2fdb23313dd55fa2d08fee3e15c47bde6ef632ac..94802149255a5ec956c4977346ecc17b088f0483
@@@ -1,3 -1,4 +1,4 @@@
+ # SPDX-License-Identifier: GPL-2.0
  # Select 32 or 64 bit
  config 64BIT
        bool "64-bit kernel" if ARCH = "x86"
@@@ -92,10 -93,8 +93,10 @@@ config X8
        select GENERIC_FIND_FIRST_BIT
        select GENERIC_IOMAP
        select GENERIC_IRQ_EFFECTIVE_AFF_MASK   if SMP
 +      select GENERIC_IRQ_MATRIX_ALLOCATOR     if X86_LOCAL_APIC
        select GENERIC_IRQ_MIGRATION            if SMP
        select GENERIC_IRQ_PROBE
 +      select GENERIC_IRQ_RESERVATION_MODE
        select GENERIC_IRQ_SHOW
        select GENERIC_PENDING_IRQ              if SMP
        select GENERIC_SMP_IDLE_THREAD
index c474bf4971d976282c231ea96cae35185205a9f0,0a3e808b91230dbe498769fd18503a2d2960d3bc..4011cb03ef08e52db15f52779ce366c26359a34b
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_DESC_H
  #define _ASM_X86_DESC_H
  
@@@ -392,7 -393,7 +393,7 @@@ static inline void set_desc_limit(struc
  void update_intr_gate(unsigned int n, const void *addr);
  void alloc_intr_gate(unsigned int n, const void *addr);
  
 -extern unsigned long used_vectors[];
 +extern unsigned long system_vectors[];
  
  #ifdef CONFIG_X86_64
  DECLARE_PER_CPU(u32, debug_idt_ctr);
index 661540a93072e27680147f63b222d5925aee4547,8ec99a55e6b9d17df05bc02fcff637204c07ffa3..b80e46733909c981aa9125c055d7e1f9f22b2409
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_HW_IRQ_H
  #define _ASM_X86_HW_IRQ_H
  
@@@ -15,8 -16,6 +16,8 @@@
  
  #include <asm/irq_vectors.h>
  
 +#define IRQ_MATRIX_BITS               NR_VECTORS
 +
  #ifndef __ASSEMBLY__
  
  #include <linux/percpu.h>
@@@ -124,13 -123,15 +125,13 @@@ struct irq_alloc_info 
  
  struct irq_cfg {
        unsigned int            dest_apicid;
 -      u8                      vector;
 -      u8                      old_vector;
 +      unsigned int            vector;
  };
  
  extern struct irq_cfg *irq_cfg(unsigned int irq);
  extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
  extern void lock_vector_lock(void);
  extern void unlock_vector_lock(void);
 -extern void setup_vector_irq(int cpu);
  #ifdef CONFIG_SMP
  extern void send_cleanup_vector(struct irq_cfg *);
  extern void irq_complete_move(struct irq_cfg *cfg);
index 731c686de37c0856585d5ada11938a82c5e20a6e,5c27e146a16696365754c8381722379f399f490e..a8834dd546cdb10ce148262a55cf18653876d7c7
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_IO_APIC_H
  #define _ASM_X86_IO_APIC_H
  
@@@ -192,6 -193,7 +193,6 @@@ static inline unsigned int io_apic_read
  extern void setup_IO_APIC(void);
  extern void enable_IO_APIC(void);
  extern void disable_IO_APIC(void);
 -extern void setup_ioapic_dest(void);
  extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin);
  extern void print_IO_APICs(void);
  #else  /* !CONFIG_X86_IO_APIC */
@@@ -231,6 -233,7 +232,6 @@@ static inline void io_apic_init_mapping
  
  static inline void setup_IO_APIC(void) { }
  static inline void enable_IO_APIC(void) { }
 -static inline void setup_ioapic_dest(void) { }
  
  #endif
  
index 1002a3e8fccc9b484de4fafd0ba6ee357c77be6c,d8632f8fa17d5a1fe52217ded64892c7c6bf72ff..2395bb794c7bb89211abb69a3f6deab2abf4d132
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_IRQ_H
  #define _ASM_X86_IRQ_H
  /*
@@@ -25,7 -26,11 +26,7 @@@ extern void irq_ctx_init(int cpu)
  
  struct irq_desc;
  
 -#ifdef CONFIG_HOTPLUG_CPU
 -#include <linux/cpumask.h>
 -extern int check_irq_vectors_for_cpu_disable(void);
  extern void fixup_irqs(void);
 -#endif
  
  #ifdef CONFIG_HAVE_KVM
  extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
index 1e9bd28f842d65ab24f96a3fccada745a2b9e0f5,c20ffca8fef1782ec8fdca8cad5e38b57b014120..67421f649cfa18d064a259bbbd78b30ebd44144a
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_IRQ_VECTORS_H
  #define _ASM_X86_IRQ_VECTORS_H
  
  #define POSTED_INTR_NESTED_VECTOR     0xf0
  #endif
  
 -/*
 - * Local APIC timer IRQ vector is on a different priority level,
 - * to work around the 'lost local interrupt if more than 2 IRQ
 - * sources per level' errata.
 - */
 -#define LOCAL_TIMER_VECTOR            0xef
 +#define MANAGED_IRQ_SHUTDOWN_VECTOR   0xef
 +#define LOCAL_TIMER_VECTOR            0xee
  
  #define NR_VECTORS                     256
  
index 73e9c42ce63be1470120e40d80804571d4fd9c8f,423e112c1e8fe3f884e3c0e0f0001234104a305a..f695cc6b8e1f4476263d4477063aec247accde0d
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_IRQDOMAIN_H
  #define _ASM_IRQDOMAIN_H
  
@@@ -8,7 -9,6 +9,7 @@@
  enum {
        /* Allocate contiguous CPU vectors */
        X86_IRQ_ALLOC_CONTIGUOUS_VECTORS                = 0x1,
 +      X86_IRQ_ALLOC_LEGACY                            = 0x2,
  };
  
  extern struct irq_domain *x86_vector_domain;
@@@ -42,8 -42,8 +43,8 @@@ extern int mp_irqdomain_alloc(struct ir
                              unsigned int nr_irqs, void *arg);
  extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
                              unsigned int nr_irqs);
 -extern void mp_irqdomain_activate(struct irq_domain *domain,
 -                                struct irq_data *irq_data);
 +extern int mp_irqdomain_activate(struct irq_domain *domain,
 +                               struct irq_data *irq_data, bool early);
  extern void mp_irqdomain_deactivate(struct irq_domain *domain,
                                    struct irq_data *irq_data);
  extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
index bfd480b827f5f6390a4c3d56c1ccd9bb0594c803,8eb139ed1a03bec70322b72502e518a2cc3ccf1e..84b9ec0c1bc0867795c3a2d327f3b8831bf4f8ba
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #undef TRACE_SYSTEM
  #define TRACE_SYSTEM irq_vectors
  
@@@ -137,254 -138,6 +138,254 @@@ DEFINE_IRQ_VECTOR_EVENT(deferred_error_
  DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
  #endif
  
 +TRACE_EVENT(vector_config,
 +
 +      TP_PROTO(unsigned int irq, unsigned int vector,
 +               unsigned int cpu, unsigned int apicdest),
 +
 +      TP_ARGS(irq, vector, cpu, apicdest),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq             )
 +              __field(        unsigned int,   vector          )
 +              __field(        unsigned int,   cpu             )
 +              __field(        unsigned int,   apicdest        )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq            = irq;
 +              __entry->vector         = vector;
 +              __entry->cpu            = cpu;
 +              __entry->apicdest       = apicdest;
 +      ),
 +
 +      TP_printk("irq=%u vector=%u cpu=%u apicdest=0x%08x",
 +                __entry->irq, __entry->vector, __entry->cpu,
 +                __entry->apicdest)
 +);
 +
 +DECLARE_EVENT_CLASS(vector_mod,
 +
 +      TP_PROTO(unsigned int irq, unsigned int vector,
 +               unsigned int cpu, unsigned int prev_vector,
 +               unsigned int prev_cpu),
 +
 +      TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq             )
 +              __field(        unsigned int,   vector          )
 +              __field(        unsigned int,   cpu             )
 +              __field(        unsigned int,   prev_vector     )
 +              __field(        unsigned int,   prev_cpu        )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq            = irq;
 +              __entry->vector         = vector;
 +              __entry->cpu            = cpu;
 +              __entry->prev_vector    = prev_vector;
 +              __entry->prev_cpu       = prev_cpu;
 +
 +      ),
 +
 +      TP_printk("irq=%u vector=%u cpu=%u prev_vector=%u prev_cpu=%u",
 +                __entry->irq, __entry->vector, __entry->cpu,
 +                __entry->prev_vector, __entry->prev_cpu)
 +);
 +
 +#define DEFINE_IRQ_VECTOR_MOD_EVENT(name)                             \
 +DEFINE_EVENT_FN(vector_mod, name,                                     \
 +      TP_PROTO(unsigned int irq, unsigned int vector,                 \
 +               unsigned int cpu, unsigned int prev_vector,            \
 +               unsigned int prev_cpu),                                \
 +      TP_ARGS(irq, vector, cpu, prev_vector, prev_cpu), NULL, NULL);  \
 +
 +DEFINE_IRQ_VECTOR_MOD_EVENT(vector_update);
 +DEFINE_IRQ_VECTOR_MOD_EVENT(vector_clear);
 +
 +DECLARE_EVENT_CLASS(vector_reserve,
 +
 +      TP_PROTO(unsigned int irq, int ret),
 +
 +      TP_ARGS(irq, ret),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq     )
 +              __field(        int,            ret     )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq = irq;
 +              __entry->ret = ret;
 +      ),
 +
 +      TP_printk("irq=%u ret=%d", __entry->irq, __entry->ret)
 +);
 +
 +#define DEFINE_IRQ_VECTOR_RESERVE_EVENT(name) \
 +DEFINE_EVENT_FN(vector_reserve, name, \
 +      TP_PROTO(unsigned int irq, int ret),    \
 +      TP_ARGS(irq, ret), NULL, NULL);         \
 +
 +DEFINE_IRQ_VECTOR_RESERVE_EVENT(vector_reserve_managed);
 +DEFINE_IRQ_VECTOR_RESERVE_EVENT(vector_reserve);
 +
 +TRACE_EVENT(vector_alloc,
 +
 +      TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
 +               int ret),
 +
 +      TP_ARGS(irq, vector, ret, reserved),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq             )
 +              __field(        unsigned int,   vector          )
 +              __field(        bool,           reserved        )
 +              __field(        int,            ret             )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq            = irq;
 +              __entry->vector         = ret < 0 ? 0 : vector;
 +              __entry->reserved       = reserved;
 +              __entry->ret            = ret > 0 ? 0 : ret;
 +      ),
 +
 +      TP_printk("irq=%u vector=%u reserved=%d ret=%d",
 +                __entry->irq, __entry->vector,
 +                __entry->reserved, __entry->ret)
 +);
 +
 +TRACE_EVENT(vector_alloc_managed,
 +
 +      TP_PROTO(unsigned int irq, unsigned int vector,
 +               int ret),
 +
 +      TP_ARGS(irq, vector, ret),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq             )
 +              __field(        unsigned int,   vector          )
 +              __field(        int,            ret             )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq            = irq;
 +              __entry->vector         = ret < 0 ? 0 : vector;
 +              __entry->ret            = ret > 0 ? 0 : ret;
 +      ),
 +
 +      TP_printk("irq=%u vector=%u ret=%d",
 +                __entry->irq, __entry->vector, __entry->ret)
 +);
 +
 +DECLARE_EVENT_CLASS(vector_activate,
 +
 +      TP_PROTO(unsigned int irq, bool is_managed, bool can_reserve,
 +               bool early),
 +
 +      TP_ARGS(irq, is_managed, can_reserve, early),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq             )
 +              __field(        bool,           is_managed      )
 +              __field(        bool,           can_reserve     )
 +              __field(        bool,           early           )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq            = irq;
 +              __entry->is_managed     = is_managed;
 +              __entry->can_reserve    = can_reserve;
 +              __entry->early          = early;
 +      ),
 +
 +      TP_printk("irq=%u is_managed=%d can_reserve=%d early=%d",
 +                __entry->irq, __entry->is_managed, __entry->can_reserve,
 +                __entry->early)
 +);
 +
 +#define DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(name)                                \
 +DEFINE_EVENT_FN(vector_activate, name,                                        \
 +      TP_PROTO(unsigned int irq, bool is_managed,                     \
 +               bool can_reserve, bool early),                         \
 +      TP_ARGS(irq, is_managed, can_reserve, early), NULL, NULL);      \
 +
 +DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_activate);
 +DEFINE_IRQ_VECTOR_ACTIVATE_EVENT(vector_deactivate);
 +
 +TRACE_EVENT(vector_teardown,
 +
 +      TP_PROTO(unsigned int irq, bool is_managed, bool has_reserved),
 +
 +      TP_ARGS(irq, is_managed, has_reserved),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq             )
 +              __field(        bool,           is_managed      )
 +              __field(        bool,           has_reserved    )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq            = irq;
 +              __entry->is_managed     = is_managed;
 +              __entry->has_reserved   = has_reserved;
 +      ),
 +
 +      TP_printk("irq=%u is_managed=%d has_reserved=%d",
 +                __entry->irq, __entry->is_managed, __entry->has_reserved)
 +);
 +
 +TRACE_EVENT(vector_setup,
 +
 +      TP_PROTO(unsigned int irq, bool is_legacy, int ret),
 +
 +      TP_ARGS(irq, is_legacy, ret),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq             )
 +              __field(        bool,           is_legacy       )
 +              __field(        int,            ret             )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq            = irq;
 +              __entry->is_legacy      = is_legacy;
 +              __entry->ret            = ret;
 +      ),
 +
 +      TP_printk("irq=%u is_legacy=%d ret=%d",
 +                __entry->irq, __entry->is_legacy, __entry->ret)
 +);
 +
 +TRACE_EVENT(vector_free_moved,
 +
 +      TP_PROTO(unsigned int irq, unsigned int cpu, unsigned int vector,
 +               bool is_managed),
 +
 +      TP_ARGS(irq, cpu, vector, is_managed),
 +
 +      TP_STRUCT__entry(
 +              __field(        unsigned int,   irq             )
 +              __field(        unsigned int,   cpu             )
 +              __field(        unsigned int,   vector          )
 +              __field(        bool,           is_managed      )
 +      ),
 +
 +      TP_fast_assign(
 +              __entry->irq            = irq;
 +              __entry->cpu            = cpu;
 +              __entry->vector         = vector;
 +              __entry->is_managed     = is_managed;
 +      ),
 +
 +      TP_printk("irq=%u cpu=%u vector=%u is_managed=%d",
 +                __entry->irq, __entry->cpu, __entry->vector,
 +                __entry->is_managed)
 +);
 +
 +
  #endif /* CONFIG_X86_LOCAL_APIC */
  
  #undef TRACE_INCLUDE_PATH
index f45acdf4595795bda75bdf48e111ba2afacb62a4,8a1ebf9540ddf0822c3f80c2fa09340c339860f7..63d0eb250792a6b98385b744f699c2ef46cdf04a
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _ASM_X86_PLATFORM_H
  #define _ASM_X86_PLATFORM_H
  
@@@ -50,13 -51,11 +51,13 @@@ struct x86_init_resources 
   *                            are set up.
   * @intr_init:                        interrupt init code
   * @trap_init:                        platform specific trap setup
 + * @intr_mode_init:           interrupt delivery mode setup
   */
  struct x86_init_irqs {
        void (*pre_vector_init)(void);
        void (*intr_init)(void);
        void (*trap_init)(void);
 +      void (*intr_mode_init)(void);
  };
  
  /**
index bd65ce2e768e361a7e2bf20b10814ea8ff2f2380,2fb7309c6900635aa0891537875647cdbbf0da10..a9e08924927ef6da6f4620942b3ceba654d1578d
@@@ -1,3 -1,4 +1,4 @@@
+ # SPDX-License-Identifier: GPL-2.0
  #
  # Makefile for local APIC drivers and for the IO-APIC code
  #
@@@ -6,7 -7,7 +7,7 @@@
  # In particualr, smp_apic_timer_interrupt() is called in random places.
  KCOV_INSTRUMENT               := n
  
 -obj-$(CONFIG_X86_LOCAL_APIC)  += apic.o apic_noop.o ipi.o vector.o
 +obj-$(CONFIG_X86_LOCAL_APIC)  += apic.o apic_common.o apic_noop.o ipi.o vector.o
  obj-y                         += hw_nmi.o
  
  obj-$(CONFIG_X86_IO_APIC)     += io_apic.o
index a1ca2c08f5320086613bfbf04367c30d5c1b2471,ff891772c9f86492d7ca2721b66619b2be61ed9d..132bf45c943adf90d7f87d36c145954e289e496a
@@@ -211,7 -211,11 +211,7 @@@ static inline int lapic_get_version(voi
   */
  static inline int lapic_is_integrated(void)
  {
 -#ifdef CONFIG_X86_64
 -      return 1;
 -#else
        return APIC_INTEGRATED(lapic_get_version());
 -#endif
  }
  
  /*
@@@ -294,11 -298,14 +294,11 @@@ int get_physical_broadcast(void
   */
  int lapic_get_maxlvt(void)
  {
 -      unsigned int v;
 -
 -      v = apic_read(APIC_LVR);
        /*
         * - we always have APIC integrated on 64bit mode
         * - 82489DXs do not report # of LVT entries
         */
 -      return APIC_INTEGRATED(GET_APIC_VERSION(v)) ? GET_APIC_MAXLVT(v) : 2;
 +      return lapic_is_integrated() ? GET_APIC_MAXLVT(apic_read(APIC_LVR)) : 2;
  }
  
  /*
@@@ -566,11 -573,21 +566,21 @@@ static u32 bdx_deadline_rev(void
        return ~0U;
  }
  
+ static u32 skx_deadline_rev(void)
+ {
+       switch (boot_cpu_data.x86_mask) {
+       case 0x03: return 0x01000136;
+       case 0x04: return 0x02000014;
+       }
+       return ~0U;
+ }
  static const struct x86_cpu_id deadline_match[] = {
        DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X,        hsx_deadline_rev),
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X,      0x0b000020),
        DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev),
-       DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X,        0x02000014),
+       DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X,        skx_deadline_rev),
  
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE,     0x22),
        DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT,      0x20),
@@@ -593,7 -610,8 +603,8 @@@ static void apic_check_deadline_errata(
        const struct x86_cpu_id *m;
        u32 rev;
  
-       if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
+       if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) ||
+           boot_cpu_has(X86_FEATURE_HYPERVISOR))
                return;
  
        m = x86_match_cpu(deadline_match);
@@@ -1211,100 -1229,53 +1222,100 @@@ void __init sync_Arb_IDs(void
                        APIC_INT_LEVELTRIG | APIC_DM_INIT);
  }
  
 -/*
 - * An initial setup of the virtual wire mode.
 - */
 -void __init init_bsp_APIC(void)
 +enum apic_intr_mode_id apic_intr_mode;
 +
 +static int __init apic_intr_mode_select(void)
  {
 -      unsigned int value;
 +      /* Check kernel option */
 +      if (disable_apic) {
 +              pr_info("APIC disabled via kernel command line\n");
 +              return APIC_PIC;
 +      }
  
 -      /*
 -       * Don't do the setup now if we have a SMP BIOS as the
 -       * through-I/O-APIC virtual wire mode might be active.
 -       */
 -      if (smp_found_config || !boot_cpu_has(X86_FEATURE_APIC))
 -              return;
 +      /* Check BIOS */
 +#ifdef CONFIG_X86_64
 +      /* On 64-bit, the APIC must be integrated, Check local APIC only */
 +      if (!boot_cpu_has(X86_FEATURE_APIC)) {
 +              disable_apic = 1;
 +              pr_info("APIC disabled by BIOS\n");
 +              return APIC_PIC;
 +      }
 +#else
 +      /* On 32-bit, the APIC may be integrated APIC or 82489DX */
  
 -      /*
 -       * Do not trust the local APIC being empty at bootup.
 -       */
 -      clear_local_APIC();
 +      /* Neither 82489DX nor integrated APIC ? */
 +      if (!boot_cpu_has(X86_FEATURE_APIC) && !smp_found_config) {
 +              disable_apic = 1;
 +              return APIC_PIC;
 +      }
  
 -      /*
 -       * Enable APIC.
 -       */
 -      value = apic_read(APIC_SPIV);
 -      value &= ~APIC_VECTOR_MASK;
 -      value |= APIC_SPIV_APIC_ENABLED;
 +      /* If the BIOS pretends there is an integrated APIC ? */
 +      if (!boot_cpu_has(X86_FEATURE_APIC) &&
 +              APIC_INTEGRATED(boot_cpu_apic_version)) {
 +              disable_apic = 1;
 +              pr_err(FW_BUG "Local APIC %d not detected, force emulation\n",
 +                                     boot_cpu_physical_apicid);
 +              return APIC_PIC;
 +      }
 +#endif
  
 -#ifdef CONFIG_X86_32
 -      /* This bit is reserved on P4/Xeon and should be cleared */
 -      if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
 -          (boot_cpu_data.x86 == 15))
 -              value &= ~APIC_SPIV_FOCUS_DISABLED;
 -      else
 +      /* Check MP table or ACPI MADT configuration */
 +      if (!smp_found_config) {
 +              disable_ioapic_support();
 +              if (!acpi_lapic) {
 +                      pr_info("APIC: ACPI MADT or MP tables are not detected\n");
 +                      return APIC_VIRTUAL_WIRE_NO_CONFIG;
 +              }
 +              return APIC_VIRTUAL_WIRE;
 +      }
 +
 +#ifdef CONFIG_SMP
 +      /* If SMP should be disabled, then really disable it! */
 +      if (!setup_max_cpus) {
 +              pr_info("APIC: SMP mode deactivated\n");
 +              return APIC_SYMMETRIC_IO_NO_ROUTING;
 +      }
 +
 +      if (read_apic_id() != boot_cpu_physical_apicid) {
 +              panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
 +                   read_apic_id(), boot_cpu_physical_apicid);
 +              /* Or can we switch back to PIC here? */
 +      }
  #endif
 -              value |= APIC_SPIV_FOCUS_DISABLED;
 -      value |= SPURIOUS_APIC_VECTOR;
 -      apic_write(APIC_SPIV, value);
  
 -      /*
 -       * Set up the virtual wire mode.
 -       */
 -      apic_write(APIC_LVT0, APIC_DM_EXTINT);
 -      value = APIC_DM_NMI;
 -      if (!lapic_is_integrated())             /* 82489DX */
 -              value |= APIC_LVT_LEVEL_TRIGGER;
 -      if (apic_extnmi == APIC_EXTNMI_NONE)
 -              value |= APIC_LVT_MASKED;
 -      apic_write(APIC_LVT1, value);
 +      return APIC_SYMMETRIC_IO;
 +}
 +
 +/* Init the interrupt delivery mode for the BSP */
 +void __init apic_intr_mode_init(void)
 +{
 +      bool upmode = IS_ENABLED(CONFIG_UP_LATE_INIT);
 +
 +      apic_intr_mode = apic_intr_mode_select();
 +
 +      switch (apic_intr_mode) {
 +      case APIC_PIC:
 +              pr_info("APIC: Keep in PIC mode(8259)\n");
 +              return;
 +      case APIC_VIRTUAL_WIRE:
 +              pr_info("APIC: Switch to virtual wire mode setup\n");
 +              default_setup_apic_routing();
 +              break;
 +      case APIC_VIRTUAL_WIRE_NO_CONFIG:
 +              pr_info("APIC: Switch to virtual wire mode setup with no configuration\n");
 +              upmode = true;
 +              default_setup_apic_routing();
 +              break;
 +      case APIC_SYMMETRIC_IO:
 +              pr_info("APIC: Switch to symmetric I/O mode setup\n");
 +              default_setup_apic_routing();
 +              break;
 +      case APIC_SYMMETRIC_IO_NO_ROUTING:
 +              pr_info("APIC: Switch to symmetric I/O mode setup in no SMP routine\n");
 +              break;
 +      }
 +
 +      apic_bsp_setup(upmode);
  }
  
  static void lapic_setup_esr(void)
@@@ -1528,9 -1499,7 +1539,9 @@@ void setup_local_APIC(void
                value = APIC_DM_NMI;
        else
                value = APIC_DM_NMI | APIC_LVT_MASKED;
 -      if (!lapic_is_integrated())             /* 82489DX */
 +
 +      /* Is 82489DX ? */
 +      if (!lapic_is_integrated())
                value |= APIC_LVT_LEVEL_TRIGGER;
        apic_write(APIC_LVT1, value);
  
@@@ -1916,8 -1885,8 +1927,8 @@@ void __init init_apic_mappings(void
                 * yeah -- we lie about apic_version
                 * in case if apic was disabled via boot option
                 * but it's not a problem for SMP compiled kernel
 -               * since smp_sanity_check is prepared for such a case
 -               * and disable smp mode
 +               * since apic_intr_mode_select is prepared for such
 +               * a case and disable smp mode
                 */
                boot_cpu_apic_version = GET_APIC_VERSION(apic_read(APIC_LVR));
        }
@@@ -2273,6 -2242,44 +2284,6 @@@ int hard_smp_processor_id(void
        return read_apic_id();
  }
  
 -void default_init_apic_ldr(void)
 -{
 -      unsigned long val;
 -
 -      apic_write(APIC_DFR, APIC_DFR_VALUE);
 -      val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
 -      val |= SET_APIC_LOGICAL_ID(1UL << smp_processor_id());
 -      apic_write(APIC_LDR, val);
 -}
 -
 -int default_cpu_mask_to_apicid(const struct cpumask *mask,
 -                             struct irq_data *irqdata,
 -                             unsigned int *apicid)
 -{
 -      unsigned int cpu = cpumask_first(mask);
 -
 -      if (cpu >= nr_cpu_ids)
 -              return -EINVAL;
 -      *apicid = per_cpu(x86_cpu_to_apicid, cpu);
 -      irq_data_update_effective_affinity(irqdata, cpumask_of(cpu));
 -      return 0;
 -}
 -
 -int flat_cpu_mask_to_apicid(const struct cpumask *mask,
 -                          struct irq_data *irqdata,
 -                          unsigned int *apicid)
 -
 -{
 -      struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
 -      unsigned long cpu_mask = cpumask_bits(mask)[0] & APIC_ALL_CPUS;
 -
 -      if (!cpu_mask)
 -              return -EINVAL;
 -      *apicid = (unsigned int)cpu_mask;
 -      cpumask_bits(effmsk)[0] = cpu_mask;
 -      return 0;
 -}
 -
  /*
   * Override the generic EOI implementation with an optimized version.
   * Only called during early boot when only one CPU is active and with
@@@ -2315,27 -2322,72 +2326,27 @@@ static void __init apic_bsp_up_setup(vo
   * Returns:
   * apic_id of BSP APIC
   */
 -int __init apic_bsp_setup(bool upmode)
 +void __init apic_bsp_setup(bool upmode)
  {
 -      int id;
 -
        connect_bsp_APIC();
        if (upmode)
                apic_bsp_up_setup();
        setup_local_APIC();
  
 -      if (x2apic_mode)
 -              id = apic_read(APIC_LDR);
 -      else
 -              id = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
 -
        enable_IO_APIC();
        end_local_APIC_setup();
        irq_remap_enable_fault_handling();
        setup_IO_APIC();
 -      /* Setup local timer */
 -      x86_init.timers.setup_percpu_clockev();
 -      return id;
 -}
 -
 -/*
 - * This initializes the IO-APIC and APIC hardware if this is
 - * a UP kernel.
 - */
 -int __init APIC_init_uniprocessor(void)
 -{
 -      if (disable_apic) {
 -              pr_info("Apic disabled\n");
 -              return -1;
 -      }
 -#ifdef CONFIG_X86_64
 -      if (!boot_cpu_has(X86_FEATURE_APIC)) {
 -              disable_apic = 1;
 -              pr_info("Apic disabled by BIOS\n");
 -              return -1;
 -      }
 -#else
 -      if (!smp_found_config && !boot_cpu_has(X86_FEATURE_APIC))
 -              return -1;
 -
 -      /*
 -       * Complain if the BIOS pretends there is one.
 -       */
 -      if (!boot_cpu_has(X86_FEATURE_APIC) &&
 -          APIC_INTEGRATED(boot_cpu_apic_version)) {
 -              pr_err("BIOS bug, local APIC 0x%x not detected!...\n",
 -                      boot_cpu_physical_apicid);
 -              return -1;
 -      }
 -#endif
 -
 -      if (!smp_found_config)
 -              disable_ioapic_support();
 -
 -      default_setup_apic_routing();
 -      apic_bsp_setup(true);
 -      return 0;
  }
  
  #ifdef CONFIG_UP_LATE_INIT
  void __init up_late_init(void)
  {
 -      APIC_init_uniprocessor();
 +      if (apic_intr_mode == APIC_PIC)
 +              return;
 +
 +      /* Setup local timer */
 +      x86_init.timers.setup_percpu_clockev();
  }
  #endif
  
index 0285f28d531acc899a1fdd365eba77bc66e91c46,c8d211277315e6b1a34172160578f8091def26a3..7b659c4480c91a680466cc961cee68754c633ada
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * NOOP APIC driver.
   *
@@@ -83,6 -84,20 +84,6 @@@ static int noop_apic_id_registered(void
        return physid_isset(0, phys_cpu_present_map);
  }
  
 -static const struct cpumask *noop_target_cpus(void)
 -{
 -      /* only BSP here */
 -      return cpumask_of(0);
 -}
 -
 -static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask,
 -                                        const struct cpumask *mask)
 -{
 -      if (cpu != 0)
 -              pr_warning("APIC: Vector allocated for non-BSP cpu\n");
 -      cpumask_copy(retmask, cpumask_of(cpu));
 -}
 -
  static u32 noop_apic_read(u32 reg)
  {
        WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
@@@ -94,13 -109,6 +95,13 @@@ static void noop_apic_write(u32 reg, u3
        WARN_ON_ONCE(boot_cpu_has(X86_FEATURE_APIC) && !disable_apic);
  }
  
 +#ifdef CONFIG_X86_32
 +static int noop_x86_32_early_logical_apicid(int cpu)
 +{
 +      return BAD_APICID;
 +}
 +#endif
 +
  struct apic apic_noop __ro_after_init = {
        .name                           = "noop",
        .probe                          = noop_probe,
        /* logical delivery broadcast to all CPUs: */
        .irq_dest_mode                  = 1,
  
 -      .target_cpus                    = noop_target_cpus,
        .disable_esr                    = 0,
        .dest_logical                   = APIC_DEST_LOGICAL,
        .check_apicid_used              = default_check_apicid_used,
  
 -      .vector_allocation_domain       = noop_vector_allocation_domain,
        .init_apic_ldr                  = noop_init_apic_ldr,
  
        .ioapic_phys_id_map             = default_ioapic_phys_id_map,
        .get_apic_id                    = noop_get_apic_id,
        .set_apic_id                    = NULL,
  
 -      .cpu_mask_to_apicid             = flat_cpu_mask_to_apicid,
 +      .calc_dest_apicid               = apic_flat_calc_apicid,
  
        .send_IPI                       = noop_send_IPI,
        .send_IPI_mask                  = noop_send_IPI_mask,
index 7b754c513fa5b409377e4e6f0eb64b469d16e121,e12fbcfc95715586c9b3dc8840321fe1142dde46..afee386ff711e95132dd5c01e79317ac22bd9a56
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
   *
@@@ -26,9 -27,9 +27,9 @@@ static int bigsmp_apic_id_registered(vo
        return 1;
  }
  
 -static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
 +static bool bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
  {
 -      return 0;
 +      return false;
  }
  
  static int bigsmp_early_logical_apicid(int cpu)
@@@ -154,10 -155,12 +155,10 @@@ static struct apic apic_bigsmp __ro_aft
        /* phys delivery to target CPU: */
        .irq_dest_mode                  = 0,
  
 -      .target_cpus                    = default_target_cpus,
        .disable_esr                    = 1,
        .dest_logical                   = 0,
        .check_apicid_used              = bigsmp_check_apicid_used,
  
 -      .vector_allocation_domain       = default_vector_allocation_domain,
        .init_apic_ldr                  = bigsmp_init_apic_ldr,
  
        .ioapic_phys_id_map             = bigsmp_ioapic_phys_id_map,
        .get_apic_id                    = bigsmp_get_apic_id,
        .set_apic_id                    = NULL,
  
 -      .cpu_mask_to_apicid             = default_cpu_mask_to_apicid,
 +      .calc_dest_apicid               = apic_default_calc_apicid,
  
        .send_IPI                       = default_send_IPI_single_phys,
        .send_IPI_mask                  = default_send_IPI_mask_sequence_phys,
index 18c6a486158649fd460c357b82dc2fa4f29743aa,3b89b27945fffc8f7c9d8dd28f9ed787158ad833..201579dc52428edb5c3989102a432c14799d1e1f
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *    Intel IO-APIC support for multi-Pentium hosts.
   *
@@@ -1013,7 -1014,6 +1014,7 @@@ static int alloc_isa_irq_from_domain(st
                                          info->ioapic_pin))
                        return -ENOMEM;
        } else {
 +              info->flags |= X86_IRQ_ALLOC_LEGACY;
                irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true,
                                              NULL);
                if (irq >= 0) {
@@@ -1586,43 -1586,6 +1587,43 @@@ static int __init notimercheck(char *s
  }
  __setup("no_timer_check", notimercheck);
  
 +static void __init delay_with_tsc(void)
 +{
 +      unsigned long long start, now;
 +      unsigned long end = jiffies + 4;
 +
 +      start = rdtsc();
 +
 +      /*
 +       * We don't know the TSC frequency yet, but waiting for
 +       * 40000000000/HZ TSC cycles is safe:
 +       * 4 GHz == 10 jiffies
 +       * 1 GHz == 40 jiffies
 +       */
 +      do {
 +              rep_nop();
 +              now = rdtsc();
 +      } while ((now - start) < 40000000000UL / HZ &&
 +              time_before_eq(jiffies, end));
 +}
 +
 +static void __init delay_without_tsc(void)
 +{
 +      unsigned long end = jiffies + 4;
 +      int band = 1;
 +
 +      /*
 +       * We don't know any frequency yet, but waiting for
 +       * 40940000000/HZ cycles is safe:
 +       * 4 GHz == 10 jiffies
 +       * 1 GHz == 40 jiffies
 +       * 1 << 1 + 1 << 2 +...+ 1 << 11 = 4094
 +       */
 +      do {
 +              __delay(((1U << band++) * 10000000UL) / HZ);
 +      } while (band < 12 && time_before_eq(jiffies, end));
 +}
 +
  /*
   * There is a nasty bug in some older SMP boards, their mptable lies
   * about the timer IRQ. We do the following to work around the situation:
@@@ -1641,12 -1604,8 +1642,12 @@@ static int __init timer_irq_works(void
  
        local_save_flags(flags);
        local_irq_enable();
 -      /* Let ten ticks pass... */
 -      mdelay((10 * 1000) / HZ);
 +
 +      if (boot_cpu_has(X86_FEATURE_TSC))
 +              delay_with_tsc();
 +      else
 +              delay_without_tsc();
 +
        local_irq_restore(flags);
  
        /*
@@@ -1862,36 -1821,26 +1863,36 @@@ static void ioapic_ir_ack_level(struct 
        eoi_ioapic_pin(data->entry.vector, data);
  }
  
 +static void ioapic_configure_entry(struct irq_data *irqd)
 +{
 +      struct mp_chip_data *mpd = irqd->chip_data;
 +      struct irq_cfg *cfg = irqd_cfg(irqd);
 +      struct irq_pin_list *entry;
 +
 +      /*
 +       * Only update when the parent is the vector domain, don't touch it
 +       * if the parent is the remapping domain. Check the installed
 +       * ioapic chip to verify that.
 +       */
 +      if (irqd->chip == &ioapic_chip) {
 +              mpd->entry.dest = cfg->dest_apicid;
 +              mpd->entry.vector = cfg->vector;
 +      }
 +      for_each_irq_pin(entry, mpd->irq_2_pin)
 +              __ioapic_write_entry(entry->apic, entry->pin, mpd->entry);
 +}
 +
  static int ioapic_set_affinity(struct irq_data *irq_data,
                               const struct cpumask *mask, bool force)
  {
        struct irq_data *parent = irq_data->parent_data;
 -      struct mp_chip_data *data = irq_data->chip_data;
 -      struct irq_pin_list *entry;
 -      struct irq_cfg *cfg;
        unsigned long flags;
        int ret;
  
        ret = parent->chip->irq_set_affinity(parent, mask, force);
        raw_spin_lock_irqsave(&ioapic_lock, flags);
 -      if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
 -              cfg = irqd_cfg(irq_data);
 -              data->entry.dest = cfg->dest_apicid;
 -              data->entry.vector = cfg->vector;
 -              for_each_irq_pin(entry, data->irq_2_pin)
 -                      __ioapic_write_entry(entry->apic, entry->pin,
 -                                           data->entry);
 -      }
 +      if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE)
 +              ioapic_configure_entry(irq_data);
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
  
        return ret;
@@@ -2148,7 -2097,7 +2149,7 @@@ static inline void __init check_timer(v
                                unmask_ioapic_irq(irq_get_irq_data(0));
                }
                irq_domain_deactivate_irq(irq_data);
 -              irq_domain_activate_irq(irq_data);
 +              irq_domain_activate_irq(irq_data, false);
                if (timer_irq_works()) {
                        if (disable_timer_pin_1 > 0)
                                clear_IO_APIC_pin(0, pin1);
                 */
                replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
                irq_domain_deactivate_irq(irq_data);
 -              irq_domain_activate_irq(irq_data);
 +              irq_domain_activate_irq(irq_data, false);
                legacy_pic->unmask(0);
                if (timer_irq_works()) {
                        apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@@ -2564,9 -2513,52 +2565,9 @@@ int acpi_get_override_irq(u32 gsi, int 
  }
  
  /*
 - * This function currently is only a helper for the i386 smp boot process where
 - * we need to reprogram the ioredtbls to cater for the cpus which have come online
 - * so mask in all cases should simply be apic->target_cpus()
 + * This function updates target affinity of IOAPIC interrupts to include
 + * the CPUs which came online during SMP bringup.
   */
 -#ifdef CONFIG_SMP
 -void __init setup_ioapic_dest(void)
 -{
 -      int pin, ioapic, irq, irq_entry;
 -      const struct cpumask *mask;
 -      struct irq_desc *desc;
 -      struct irq_data *idata;
 -      struct irq_chip *chip;
 -
 -      if (skip_ioapic_setup == 1)
 -              return;
 -
 -      for_each_ioapic_pin(ioapic, pin) {
 -              irq_entry = find_irq_entry(ioapic, pin, mp_INT);
 -              if (irq_entry == -1)
 -                      continue;
 -
 -              irq = pin_2_irq(irq_entry, ioapic, pin, 0);
 -              if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
 -                      continue;
 -
 -              desc = irq_to_desc(irq);
 -              raw_spin_lock_irq(&desc->lock);
 -              idata = irq_desc_get_irq_data(desc);
 -
 -              /*
 -               * Honour affinities which have been set in early boot
 -               */
 -              if (!irqd_can_balance(idata) || irqd_affinity_was_set(idata))
 -                      mask = irq_data_get_affinity_mask(idata);
 -              else
 -                      mask = apic->target_cpus();
 -
 -              chip = irq_data_get_irq_chip(idata);
 -              /* Might be lapic_chip for irq 0 */
 -              if (chip->irq_set_affinity)
 -                      chip->irq_set_affinity(idata, mask, false);
 -              raw_spin_unlock_irq(&desc->lock);
 -      }
 -}
 -#endif
 -
  #define IOAPIC_RESOURCE_NAME_SIZE 11
  
  static struct resource *ioapic_resources;
@@@ -2986,15 -2978,17 +2987,15 @@@ void mp_irqdomain_free(struct irq_domai
        irq_domain_free_irqs_top(domain, virq, nr_irqs);
  }
  
 -void mp_irqdomain_activate(struct irq_domain *domain,
 -                         struct irq_data *irq_data)
 +int mp_irqdomain_activate(struct irq_domain *domain,
 +                        struct irq_data *irq_data, bool early)
  {
        unsigned long flags;
 -      struct irq_pin_list *entry;
 -      struct mp_chip_data *data = irq_data->chip_data;
  
        raw_spin_lock_irqsave(&ioapic_lock, flags);
 -      for_each_irq_pin(entry, data->irq_2_pin)
 -              __ioapic_write_entry(entry->apic, entry->pin, data->entry);
 +      ioapic_configure_entry(irq_data);
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
 +      return 0;
  }
  
  void mp_irqdomain_deactivate(struct irq_domain *domain,
index 6050c5364bdcca8ec528b607e1c68d33bd130caf,e216cf3d64d2e8432589c6b2fdfaca1b33765bf8..622f13ca8a943c270e70df6100561ce4c56d7db2
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  #include <linux/threads.h>
  #include <linux/cpumask.h>
  #include <linux/string.h>
@@@ -8,24 -9,22 +9,24 @@@
  #include <linux/cpu.h>
  
  #include <asm/smp.h>
 -#include <asm/x2apic.h>
 +#include "x2apic.h"
 +
 +struct cluster_mask {
 +      unsigned int    clusterid;
 +      int             node;
 +      struct cpumask  mask;
 +};
  
  static DEFINE_PER_CPU(u32, x86_cpu_to_logical_apicid);
 -static DEFINE_PER_CPU(cpumask_var_t, cpus_in_cluster);
  static DEFINE_PER_CPU(cpumask_var_t, ipi_mask);
 +static DEFINE_PER_CPU(struct cluster_mask *, cluster_masks);
 +static struct cluster_mask *cluster_hotplug_mask;
  
  static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
  {
        return x2apic_enabled();
  }
  
 -static inline u32 x2apic_cluster(int cpu)
 -{
 -      return per_cpu(x86_cpu_to_logical_apicid, cpu) >> 16;
 -}
 -
  static void x2apic_send_IPI(int cpu, int vector)
  {
        u32 dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
  static void
  __x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
  {
 -      struct cpumask *cpus_in_cluster_ptr;
 -      struct cpumask *ipi_mask_ptr;
 -      unsigned int cpu, this_cpu;
 +      unsigned int cpu, clustercpu;
 +      struct cpumask *tmpmsk;
        unsigned long flags;
        u32 dest;
  
        x2apic_wrmsr_fence();
 -
        local_irq_save(flags);
  
 -      this_cpu = smp_processor_id();
 +      tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
 +      cpumask_copy(tmpmsk, mask);
 +      /* If IPI should not be sent to self, clear current CPU */
 +      if (apic_dest != APIC_DEST_ALLINC)
 +              cpumask_clear_cpu(smp_processor_id(), tmpmsk);
  
 -      /*
 -       * We are to modify mask, so we need an own copy
 -       * and be sure it's manipulated with irq off.
 -       */
 -      ipi_mask_ptr = this_cpu_cpumask_var_ptr(ipi_mask);
 -      cpumask_copy(ipi_mask_ptr, mask);
 -
 -      /*
 -       * The idea is to send one IPI per cluster.
 -       */
 -      for_each_cpu(cpu, ipi_mask_ptr) {
 -              unsigned long i;
 +      /* Collapse cpus in a cluster so a single IPI per cluster is sent */
 +      for_each_cpu(cpu, tmpmsk) {
 +              struct cluster_mask *cmsk = per_cpu(cluster_masks, cpu);
  
 -              cpus_in_cluster_ptr = per_cpu(cpus_in_cluster, cpu);
                dest = 0;
 -
 -              /* Collect cpus in cluster. */
 -              for_each_cpu_and(i, ipi_mask_ptr, cpus_in_cluster_ptr) {
 -                      if (apic_dest == APIC_DEST_ALLINC || i != this_cpu)
 -                              dest |= per_cpu(x86_cpu_to_logical_apicid, i);
 -              }
 +              for_each_cpu_and(clustercpu, tmpmsk, &cmsk->mask)
 +                      dest |= per_cpu(x86_cpu_to_logical_apicid, clustercpu);
  
                if (!dest)
                        continue;
  
                __x2apic_send_IPI_dest(dest, vector, apic->dest_logical);
 -              /*
 -               * Cluster sibling cpus should be discared now so
 -               * we would not send IPI them second time.
 -               */
 -              cpumask_andnot(ipi_mask_ptr, ipi_mask_ptr, cpus_in_cluster_ptr);
 +              /* Remove cluster CPUs from tmpmask */
 +              cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
        }
  
        local_irq_restore(flags);
@@@ -91,90 -105,125 +92,90 @@@ static void x2apic_send_IPI_all(int vec
        __x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
  }
  
 -static int
 -x2apic_cpu_mask_to_apicid(const struct cpumask *mask, struct irq_data *irqdata,
 -                        unsigned int *apicid)
 +static u32 x2apic_calc_apicid(unsigned int cpu)
  {
 -      struct cpumask *effmsk = irq_data_get_effective_affinity_mask(irqdata);
 -      unsigned int cpu;
 -      u32 dest = 0;
 -      u16 cluster;
 -
 -      cpu = cpumask_first(mask);
 -      if (cpu >= nr_cpu_ids)
 -              return -EINVAL;
 -
 -      dest = per_cpu(x86_cpu_to_logical_apicid, cpu);
 -      cluster = x2apic_cluster(cpu);
 -
 -      cpumask_clear(effmsk);
 -      for_each_cpu(cpu, mask) {
 -              if (cluster != x2apic_cluster(cpu))
 -                      continue;
 -              dest |= per_cpu(x86_cpu_to_logical_apicid, cpu);
 -              cpumask_set_cpu(cpu, effmsk);
 -      }
 -
 -      *apicid = dest;
 -      return 0;
 +      return per_cpu(x86_cpu_to_logical_apicid, cpu);
  }
  
  static void init_x2apic_ldr(void)
  {
 -      unsigned int this_cpu = smp_processor_id();
 +      struct cluster_mask *cmsk = this_cpu_read(cluster_masks);
 +      u32 cluster, apicid = apic_read(APIC_LDR);
        unsigned int cpu;
  
 -      per_cpu(x86_cpu_to_logical_apicid, this_cpu) = apic_read(APIC_LDR);
 +      this_cpu_write(x86_cpu_to_logical_apicid, apicid);
 +
 +      if (cmsk)
 +              goto update;
  
 -      cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, this_cpu));
 +      cluster = apicid >> 16;
        for_each_online_cpu(cpu) {
 -              if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
 -                      continue;
 -              cpumask_set_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
 -              cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
 +              cmsk = per_cpu(cluster_masks, cpu);
 +              /* Matching cluster found. Link and update it. */
 +              if (cmsk && cmsk->clusterid == cluster)
 +                      goto update;
        }
 +      cmsk = cluster_hotplug_mask;
 +      cluster_hotplug_mask = NULL;
 +update:
 +      this_cpu_write(cluster_masks, cmsk);
 +      cpumask_set_cpu(smp_processor_id(), &cmsk->mask);
  }
  
 -/*
 - * At CPU state changes, update the x2apic cluster sibling info.
 - */
 -static int x2apic_prepare_cpu(unsigned int cpu)
 +static int alloc_clustermask(unsigned int cpu, int node)
  {
 -      if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL))
 -              return -ENOMEM;
 +      if (per_cpu(cluster_masks, cpu))
 +              return 0;
 +      /*
 +       * If a hotplug spare mask exists, check whether it's on the right
 +       * node. If not, free it and allocate a new one.
 +       */
 +      if (cluster_hotplug_mask) {
 +              if (cluster_hotplug_mask->node == node)
 +                      return 0;
 +              kfree(cluster_hotplug_mask);
 +      }
  
 -      if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) {
 -              free_cpumask_var(per_cpu(cpus_in_cluster, cpu));
 +      cluster_hotplug_mask = kzalloc_node(sizeof(*cluster_hotplug_mask),
 +                                          GFP_KERNEL, node);
 +      if (!cluster_hotplug_mask)
                return -ENOMEM;
 -      }
 +      cluster_hotplug_mask->node = node;
 +      return 0;
 +}
  
 +static int x2apic_prepare_cpu(unsigned int cpu)
 +{
 +      if (alloc_clustermask(cpu, cpu_to_node(cpu)) < 0)
 +              return -ENOMEM;
 +      if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL))
 +              return -ENOMEM;
        return 0;
  }
  
 -static int x2apic_dead_cpu(unsigned int this_cpu)
 +static int x2apic_dead_cpu(unsigned int dead_cpu)
  {
 -      int cpu;
 +      struct cluster_mask *cmsk = per_cpu(cluster_masks, dead_cpu);
  
 -      for_each_online_cpu(cpu) {
 -              if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu))
 -                      continue;
 -              cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu));
 -              cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu));
 -      }
 -      free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu));
 -      free_cpumask_var(per_cpu(ipi_mask, this_cpu));
 +      cpumask_clear_cpu(dead_cpu, &cmsk->mask);
 +      free_cpumask_var(per_cpu(ipi_mask, dead_cpu));
        return 0;
  }
  
  static int x2apic_cluster_probe(void)
  {
 -      int cpu = smp_processor_id();
 -      int ret;
 -
        if (!x2apic_mode)
                return 0;
  
 -      ret = cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
 -                              x2apic_prepare_cpu, x2apic_dead_cpu);
 -      if (ret < 0) {
 +      if (cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "x86/x2apic:prepare",
 +                            x2apic_prepare_cpu, x2apic_dead_cpu) < 0) {
                pr_err("Failed to register X2APIC_PREPARE\n");
                return 0;
        }
 -      cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu));
 +      init_x2apic_ldr();
        return 1;
  }
  
 -static const struct cpumask *x2apic_cluster_target_cpus(void)
 -{
 -      return cpu_all_mask;
 -}
 -
 -/*
 - * Each x2apic cluster is an allocation domain.
 - */
 -static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
 -                                           const struct cpumask *mask)
 -{
 -      /*
 -       * To minimize vector pressure, default case of boot, device bringup
 -       * etc will use a single cpu for the interrupt destination.
 -       *
 -       * On explicit migration requests coming from irqbalance etc,
 -       * interrupts will be routed to the x2apic cluster (cluster-id
 -       * derived from the first cpu in the mask) members specified
 -       * in the mask.
 -       */
 -      if (mask == x2apic_cluster_target_cpus())
 -              cpumask_copy(retmask, cpumask_of(cpu));
 -      else
 -              cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
 -}
 -
  static struct apic apic_x2apic_cluster __ro_after_init = {
  
        .name                           = "cluster x2apic",
        .irq_delivery_mode              = dest_LowestPrio,
        .irq_dest_mode                  = 1, /* logical */
  
 -      .target_cpus                    = x2apic_cluster_target_cpus,
        .disable_esr                    = 0,
        .dest_logical                   = APIC_DEST_LOGICAL,
        .check_apicid_used              = NULL,
  
 -      .vector_allocation_domain       = cluster_vector_allocation_domain,
        .init_apic_ldr                  = init_x2apic_ldr,
  
        .ioapic_phys_id_map             = NULL,
        .get_apic_id                    = x2apic_get_apic_id,
        .set_apic_id                    = x2apic_set_apic_id,
  
 -      .cpu_mask_to_apicid             = x2apic_cpu_mask_to_apicid,
 +      .calc_dest_apicid               = x2apic_calc_apicid,
  
        .send_IPI                       = x2apic_send_IPI,
        .send_IPI_mask                  = x2apic_send_IPI_mask,
index 17c2c5b0b7b9c7d087ce993e69573ca0b8dfd699,b94d35320f85e7bcc5d20c4461b43bc848e191b4..f8d9d69994e619f3abb90dbcb135a130cbe257f5
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  #include <linux/threads.h>
  #include <linux/cpumask.h>
  #include <linux/string.h>
@@@ -6,8 -7,7 +7,8 @@@
  #include <linux/dmar.h>
  
  #include <asm/smp.h>
 -#include <asm/x2apic.h>
 +#include <asm/ipi.h>
 +#include "x2apic.h"
  
  int x2apic_phys;
  
@@@ -99,43 -99,6 +100,43 @@@ static int x2apic_phys_probe(void
        return apic == &apic_x2apic_phys;
  }
  
 +/* Common x2apic functions, also used by x2apic_cluster */
 +int x2apic_apic_id_valid(int apicid)
 +{
 +      return 1;
 +}
 +
 +int x2apic_apic_id_registered(void)
 +{
 +      return 1;
 +}
 +
 +void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest)
 +{
 +      unsigned long cfg = __prepare_ICR(0, vector, dest);
 +      native_x2apic_icr_write(cfg, apicid);
 +}
 +
 +unsigned int x2apic_get_apic_id(unsigned long id)
 +{
 +      return id;
 +}
 +
 +u32 x2apic_set_apic_id(unsigned int id)
 +{
 +      return id;
 +}
 +
 +int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
 +{
 +      return initial_apicid >> index_msb;
 +}
 +
 +void x2apic_send_IPI_self(int vector)
 +{
 +      apic_write(APIC_SELF_IPI, vector);
 +}
 +
  static struct apic apic_x2apic_phys __ro_after_init = {
  
        .name                           = "physical x2apic",
        .irq_delivery_mode              = dest_Fixed,
        .irq_dest_mode                  = 0, /* physical */
  
 -      .target_cpus                    = online_target_cpus,
        .disable_esr                    = 0,
        .dest_logical                   = 0,
        .check_apicid_used              = NULL,
  
 -      .vector_allocation_domain       = default_vector_allocation_domain,
        .init_apic_ldr                  = init_x2apic_ldr,
  
        .ioapic_phys_id_map             = NULL,
        .get_apic_id                    = x2apic_get_apic_id,
        .set_apic_id                    = x2apic_set_apic_id,
  
 -      .cpu_mask_to_apicid             = default_cpu_mask_to_apicid,
 +      .calc_dest_apicid               = apic_default_calc_apicid,
  
        .send_IPI                       = x2apic_send_IPI,
        .send_IPI_mask                  = x2apic_send_IPI_mask,
diff --combined arch/x86/kernel/i8259.c
index 317c5b38a3185ccb1ecf7152d151bf291af34a47,8f5cb2c7060cfc29b46ec24669ec418dfdf0c0df..86c4439f9d74963b7d9317e6a487ed0d7c085765
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  #include <linux/linkage.h>
  #include <linux/errno.h>
  #include <linux/signal.h>
@@@ -113,7 -114,6 +114,7 @@@ static void make_8259A_irq(unsigned in
        io_apic_irqs &= ~(1<<irq);
        irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
        enable_irq(irq);
 +      lapic_assign_legacy_vector(irq, true);
  }
  
  /*
index c7fb57505f2de9816cbbba525a22ac53ae6d5645,1e4094eba15e95e6f9b3c6f19de6d748bb894dd1..8da3e909e967dd1cd2c55955f7e5445574a07a24
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  #include <linux/linkage.h>
  #include <linux/errno.h>
  #include <linux/signal.h>
@@@ -60,6 -61,9 +61,6 @@@ void __init init_ISA_irqs(void
        struct irq_chip *chip = legacy_pic->chip;
        int i;
  
 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
 -      init_bsp_APIC();
 -#endif
        legacy_pic->init(0);
  
        for (i = 0; i < nr_legacy_irqs(); i++)
@@@ -90,7 -94,6 +91,7 @@@ void __init native_init_IRQ(void
        x86_init.irqs.pre_vector_init();
  
        idt_setup_apic_and_irq_gates();
 +      lapic_assign_system_vectors();
  
        if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
                setup_irq(2, &irq2);
diff --combined arch/x86/kernel/time.c
index 3ceb834233c89b48ea7a858c9331fcf3c6c9bb35,879af864d99afd6c8645f0d74fe71bf6a2bade07..749d189f8cd4675de3267310e38217a8082c0485
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   *  Copyright (c) 1991,1992,1995  Linus Torvalds
   *  Copyright (c) 1994  Alan Modra
@@@ -84,11 -85,6 +85,11 @@@ void __init hpet_time_init(void
  static __init void x86_late_time_init(void)
  {
        x86_init.timers.timer_init();
 +      /*
 +       * After PIT/HPET timers init, select and setup
 +       * the final interrupt mode for delivering IRQs.
 +       */
 +      x86_init.irqs.intr_mode_init();
        tsc_init();
  }
  
diff --combined arch/x86/xen/apic.c
index 4ba3fd7039b0220ee7564ba202d50235cd9ca3a1,30434b8708f206d62888b1b9bd46466a25af7861..6b830d4cb4c8e8e78c44dd87a12f642234533b4e
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  #include <linux/init.h>
  
  #include <asm/x86_init.h>
@@@ -30,7 -31,7 +31,7 @@@ static unsigned int xen_io_apic_read(un
        return 0xfd;
  }
  
 -static unsigned long xen_set_apic_id(unsigned int x)
 +static u32 xen_set_apic_id(unsigned int x)
  {
        WARN_ON(1);
        return x;
@@@ -160,10 -161,12 +161,10 @@@ static struct apic xen_pv_apic = 
        /* .irq_delivery_mode - used in native_compose_msi_msg only */
        /* .irq_dest_mode     - used in native_compose_msi_msg only */
  
 -      .target_cpus                    = default_target_cpus,
        .disable_esr                    = 0,
        /* .dest_logical      -  default_send_IPI_ use it but we use our own. */
        .check_apicid_used              = default_check_apicid_used, /* Used on 32-bit */
  
 -      .vector_allocation_domain       = flat_vector_allocation_domain,
        .init_apic_ldr                  = xen_noop, /* setup_local_APIC calls it */
  
        .ioapic_phys_id_map             = default_ioapic_phys_id_map, /* Used on 32-bit */
        .get_apic_id                    = xen_get_apic_id,
        .set_apic_id                    = xen_set_apic_id, /* Can be NULL on 32-bit. */
  
 -      .cpu_mask_to_apicid             = flat_cpu_mask_to_apicid,
 +      .calc_dest_apicid               = apic_flat_calc_apicid,
  
  #ifdef CONFIG_SMP
        .send_IPI_mask                  = xen_send_IPI_mask,
index 73f809a6ca87f079c9e79da60cf48fb5b6720e96,d4396e27b1fb755994fc839a08b86e45e625db42..7b3b17f2ab5048c266b42836f8fff1bc17acef64
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * Core of Xen paravirt_ops implementation.
   *
@@@ -1230,7 -1231,6 +1231,7 @@@ asmlinkage __visible void __init xen_st
        x86_platform.get_nmi_reason = xen_get_nmi_reason;
  
        x86_init.resources.memory_setup = xen_memory_setup;
 +      x86_init.irqs.intr_mode_init    = x86_init_noop;
        x86_init.oem.arch_setup = xen_arch_setup;
        x86_init.oem.banner = xen_banner;
  
index a78fa34f113a90ac091531e66875856e03a0a05f,8e8874d23717ab5120c87f180146562be3e03ef6..9c848e36f20904b0397bbb643906fbc3474c70c2
@@@ -2773,14 -2773,16 +2773,16 @@@ int __init amd_iommu_init_api(void
  
  int __init amd_iommu_init_dma_ops(void)
  {
-       swiotlb        = iommu_pass_through ? 1 : 0;
+       swiotlb        = (iommu_pass_through || sme_me_mask) ? 1 : 0;
        iommu_detected = 1;
  
        /*
         * In case we don't initialize SWIOTLB (actually the common case
-        * when AMD IOMMU is enabled), make sure there are global
-        * dma_ops set as a fall-back for devices not handled by this
-        * driver (for example non-PCI devices).
+        * when AMD IOMMU is enabled and SME is not active), make sure there
+        * are global dma_ops set as a fall-back for devices not handled by
+        * this driver (for example non-PCI devices). When SME is active,
+        * make sure that swiotlb variable remains set so the global dma_ops
+        * continue to be SWIOTLB.
         */
        if (!swiotlb)
                dma_ops = &nommu_dma_ops;
@@@ -3046,6 -3048,7 +3048,7 @@@ static size_t amd_iommu_unmap(struct io
        mutex_unlock(&domain->api_lock);
  
        domain_flush_tlb_pde(domain);
+       domain_flush_complete(domain);
  
        return unmap_size;
  }
@@@ -4170,26 -4173,16 +4173,26 @@@ static void irq_remapping_free(struct i
        irq_domain_free_irqs_common(domain, virq, nr_irqs);
  }
  
 -static void irq_remapping_activate(struct irq_domain *domain,
 -                                 struct irq_data *irq_data)
 +static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
 +                             struct amd_ir_data *ir_data,
 +                             struct irq_2_irte *irte_info,
 +                             struct irq_cfg *cfg);
 +
 +static int irq_remapping_activate(struct irq_domain *domain,
 +                                struct irq_data *irq_data, bool early)
  {
        struct amd_ir_data *data = irq_data->chip_data;
        struct irq_2_irte *irte_info = &data->irq_2_irte;
        struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid];
 +      struct irq_cfg *cfg = irqd_cfg(irq_data);
  
 -      if (iommu)
 -              iommu->irte_ops->activate(data->entry, irte_info->devid,
 -                                        irte_info->index);
 +      if (!iommu)
 +              return 0;
 +
 +      iommu->irte_ops->activate(data->entry, irte_info->devid,
 +                                irte_info->index);
 +      amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg);
 +      return 0;
  }
  
  static void irq_remapping_deactivate(struct irq_domain *domain,
@@@ -4276,22 -4269,6 +4279,22 @@@ static int amd_ir_set_vcpu_affinity(str
        return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data);
  }
  
 +
 +static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu,
 +                             struct amd_ir_data *ir_data,
 +                             struct irq_2_irte *irte_info,
 +                             struct irq_cfg *cfg)
 +{
 +
 +      /*
 +       * Atomically updates the IRTE with the new destination, vector
 +       * and flushes the interrupt entry cache.
 +       */
 +      iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
 +                                    irte_info->index, cfg->vector,
 +                                    cfg->dest_apicid);
 +}
 +
  static int amd_ir_set_affinity(struct irq_data *data,
                               const struct cpumask *mask, bool force)
  {
        if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
                return ret;
  
 -      /*
 -       * Atomically updates the IRTE with the new destination, vector
 -       * and flushes the interrupt entry cache.
 -       */
 -      iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid,
 -                          irte_info->index, cfg->vector, cfg->dest_apicid);
 -
 +      amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg);
        /*
         * After this point, all the interrupts will start arriving
         * at the new destination. So, time to cleanup the previous
index e274d9d12ba4fcd9cc7ede550ae85bc17b7f22b4,25842b566c39c1497d1cecd91b6af8a079492185..76a193c7fcfc69b012d3e6e1f1dd246ab8d5acc8
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  
  #define pr_fmt(fmt)     "DMAR-IR: " fmt
  
@@@ -1121,24 -1122,6 +1122,24 @@@ struct irq_remap_ops intel_irq_remap_op
        .get_irq_domain         = intel_get_irq_domain,
  };
  
 +static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
 +{
 +      struct intel_ir_data *ir_data = irqd->chip_data;
 +      struct irte *irte = &ir_data->irte_entry;
 +      struct irq_cfg *cfg = irqd_cfg(irqd);
 +
 +      /*
 +       * Atomically updates the IRTE with the new destination, vector
 +       * and flushes the interrupt entry cache.
 +       */
 +      irte->vector = cfg->vector;
 +      irte->dest_id = IRTE_DEST(cfg->dest_apicid);
 +
 +      /* Update the hardware only if the interrupt is in remapped mode. */
 +      if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
 +              modify_irte(&ir_data->irq_2_iommu, irte);
 +}
 +
  /*
   * Migrate the IO-APIC irq in the presence of intr-remapping.
   *
@@@ -1157,15 -1140,27 +1158,15 @@@ static in
  intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
                      bool force)
  {
 -      struct intel_ir_data *ir_data = data->chip_data;
 -      struct irte *irte = &ir_data->irte_entry;
 -      struct irq_cfg *cfg = irqd_cfg(data);
        struct irq_data *parent = data->parent_data;
 +      struct irq_cfg *cfg = irqd_cfg(data);
        int ret;
  
        ret = parent->chip->irq_set_affinity(parent, mask, force);
        if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
                return ret;
  
 -      /*
 -       * Atomically updates the IRTE with the new destination, vector
 -       * and flushes the interrupt entry cache.
 -       */
 -      irte->vector = cfg->vector;
 -      irte->dest_id = IRTE_DEST(cfg->dest_apicid);
 -
 -      /* Update the hardware only if the interrupt is in remapped mode. */
 -      if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
 -              modify_irte(&ir_data->irq_2_iommu, irte);
 -
 +      intel_ir_reconfigure_irte(data, false);
        /*
         * After this point, all the interrupts will start arriving
         * at the new destination. So, time to cleanup the previous
@@@ -1395,11 -1390,12 +1396,11 @@@ static void intel_irq_remapping_free(st
        irq_domain_free_irqs_common(domain, virq, nr_irqs);
  }
  
 -static void intel_irq_remapping_activate(struct irq_domain *domain,
 -                                       struct irq_data *irq_data)
 +static int intel_irq_remapping_activate(struct irq_domain *domain,
 +                                      struct irq_data *irq_data, bool early)
  {
 -      struct intel_ir_data *data = irq_data->chip_data;
 -
 -      modify_irte(&data->irq_2_iommu, &data->irte_entry);
 +      intel_ir_reconfigure_irte(irq_data, true);
 +      return 0;
  }
  
  static void intel_irq_remapping_deactivate(struct irq_domain *domain,
index 20e2b5fac7b99a88b5bcf48b774b0f0af9de5ae0,e88395605e32dc32960974a4e8707a30037b107b..e2339af8054c172b9f2ee06bfd4a17d8ae60b0f0
@@@ -107,6 -107,10 +107,10 @@@ struct its_node 
  
  #define ITS_ITT_ALIGN         SZ_256
  
+ /* The maximum number of VPEID bits supported by VLPI commands */
+ #define ITS_MAX_VPEID_BITS    (16)
+ #define ITS_MAX_VPEID         (1 << (ITS_MAX_VPEID_BITS))
  /* Convert page order to size in bytes */
  #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
  
@@@ -308,7 -312,7 +312,7 @@@ static void its_encode_size(struct its_
  
  static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
  {
-       its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8);
+       its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
  }
  
  static void its_encode_valid(struct its_cmd_block *cmd, int valid)
  
  static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
  {
-       its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16);
+       its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
  }
  
  static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
@@@ -358,7 -362,7 +362,7 @@@ static void its_encode_its_list(struct 
  
  static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
  {
-       its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16);
+       its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
  }
  
  static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
@@@ -1478,9 -1482,9 +1482,9 @@@ static int its_setup_baser(struct its_n
        u64 val = its_read_baser(its, baser);
        u64 esz = GITS_BASER_ENTRY_SIZE(val);
        u64 type = GITS_BASER_TYPE(val);
+       u64 baser_phys, tmp;
        u32 alloc_pages;
        void *base;
-       u64 tmp;
  
  retry_alloc_baser:
        alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
        if (!base)
                return -ENOMEM;
  
+       baser_phys = virt_to_phys(base);
+       /* Check if the physical address of the memory is above 48bits */
+       if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
+               /* 52bit PA is supported only when PageSize=64K */
+               if (psz != SZ_64K) {
+                       pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
+                       free_pages((unsigned long)base, order);
+                       return -ENXIO;
+               }
+               /* Convert 52bit PA to 48bit field */
+               baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+       }
  retry_baser:
-       val = (virt_to_phys(base)                                |
+       val = (baser_phys                                        |
                (type << GITS_BASER_TYPE_SHIFT)                  |
                ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT)       |
                ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT)    |
  
  static bool its_parse_indirect_baser(struct its_node *its,
                                     struct its_baser *baser,
-                                    u32 psz, u32 *order)
+                                    u32 psz, u32 *order, u32 ids)
  {
        u64 tmp = its_read_baser(its, baser);
        u64 type = GITS_BASER_TYPE(tmp);
        u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
        u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
-       u32 ids = its->device_ids;
        u32 new_order = *order;
        bool indirect = false;
  
@@@ -1680,9 -1699,13 +1699,13 @@@ static int its_alloc_tables(struct its_
                        continue;
  
                case GITS_BASER_TYPE_DEVICE:
+                       indirect = its_parse_indirect_baser(its, baser,
+                                                           psz, &order,
+                                                           its->device_ids);
                case GITS_BASER_TYPE_VCPU:
                        indirect = its_parse_indirect_baser(its, baser,
-                                                           psz, &order);
+                                                           psz, &order,
+                                                           ITS_MAX_VPEID_BITS);
                        break;
                }
  
@@@ -2186,8 -2209,8 +2209,8 @@@ static int its_irq_domain_alloc(struct 
        return 0;
  }
  
 -static void its_irq_domain_activate(struct irq_domain *domain,
 -                                  struct irq_data *d)
 +static int its_irq_domain_activate(struct irq_domain *domain,
 +                                 struct irq_data *d, bool early)
  {
        struct its_device *its_dev = irq_data_get_irq_chip_data(d);
        u32 event = its_get_event_id(d);
  
        /* Map the GIC IRQ and event to the device */
        its_send_mapti(its_dev, d->hwirq, event);
 +      return 0;
  }
  
  static void its_irq_domain_deactivate(struct irq_domain *domain,
@@@ -2552,7 -2574,7 +2575,7 @@@ static struct irq_chip its_vpe_irq_chi
  
  static int its_vpe_id_alloc(void)
  {
-       return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL);
+       return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
  }
  
  static void its_vpe_id_free(u16 id)
@@@ -2679,8 -2701,8 +2702,8 @@@ static int its_vpe_irq_domain_alloc(str
        return err;
  }
  
 -static void its_vpe_irq_domain_activate(struct irq_domain *domain,
 -                                      struct irq_data *d)
 +static int its_vpe_irq_domain_activate(struct irq_domain *domain,
 +                                     struct irq_data *d, bool early)
  {
        struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
  
        vpe->col_idx = cpumask_first(cpu_online_mask);
        its_send_vmapp(vpe, true);
        its_send_vinvall(vpe);
 +      return 0;
  }
  
  static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
@@@ -2853,7 -2874,7 +2876,7 @@@ static int its_init_vpe_domain(void
                return -ENOMEM;
        }
  
-       BUG_ON(entries != vpe_proxy.dev->nr_ites);
+       BUG_ON(entries > vpe_proxy.dev->nr_ites);
  
        raw_spin_lock_init(&vpe_proxy.lock);
        vpe_proxy.next_victim = 0;
diff --combined include/linux/irq.h
index fda8da7c45e738f3b9e7c5313b572b9277420bd4,4536286cc4d24bcbb64dcc6a4db6a47f399f82d5..b01d06db9101ae73b08952ba6e11abf86c1bbb6d
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_IRQ_H
  #define _LINUX_IRQ_H
  
@@@ -1009,7 -1010,7 +1010,7 @@@ void irq_gc_mask_clr_bit(struct irq_dat
  void irq_gc_unmask_enable_reg(struct irq_data *d);
  void irq_gc_ack_set_bit(struct irq_data *d);
  void irq_gc_ack_clr_bit(struct irq_data *d);
- void irq_gc_mask_disable_reg_and_ack(struct irq_data *d);
+ void irq_gc_mask_disable_and_ack_set(struct irq_data *d);
  void irq_gc_eoi(struct irq_data *d);
  int irq_gc_set_wake(struct irq_data *d, unsigned int on);
  
@@@ -1113,28 -1114,6 +1114,28 @@@ static inline u32 irq_reg_readl(struct 
                return readl(gc->reg_base + reg_offset);
  }
  
 +struct irq_matrix;
 +struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
 +                                  unsigned int alloc_start,
 +                                  unsigned int alloc_end);
 +void irq_matrix_online(struct irq_matrix *m);
 +void irq_matrix_offline(struct irq_matrix *m);
 +void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
 +int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
 +void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
 +int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
 +void irq_matrix_reserve(struct irq_matrix *m);
 +void irq_matrix_remove_reserved(struct irq_matrix *m);
 +int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
 +                   bool reserved, unsigned int *mapped_cpu);
 +void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
 +                   unsigned int bit, bool managed);
 +void irq_matrix_assign(struct irq_matrix *m, unsigned int bit);
 +unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown);
 +unsigned int irq_matrix_allocated(struct irq_matrix *m);
 +unsigned int irq_matrix_reserved(struct irq_matrix *m);
 +void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind);
 +
  /* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */
  #define INVALID_HWIRQ (~0UL)
  irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu);
diff --combined include/linux/irqdesc.h
index b55b113c049b1885b6c71c7191464a6a2d9aef0f,b6084898d33017e01edaef49893169a26c9c0164..60e3100b0809a372258b6acc045d14d6cc86a8eb
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef _LINUX_IRQDESC_H
  #define _LINUX_IRQDESC_H
  
@@@ -93,7 -94,6 +94,7 @@@ struct irq_desc 
  #endif
  #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
        struct dentry           *debugfs_file;
 +      const char              *dev_name;
  #endif
  #ifdef CONFIG_SPARSE_IRQ
        struct rcu_head         rcu;
index 7d0c6c1447084f57568cfc612aff107d15be8d22,b1037dfc47e40eff76735022b7582024b6d89290..0d6f05cab0fe7eba9d003f60ad65f6074e07593d
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /*
   * irq_domain - IRQ translation domains
   *
@@@ -40,7 -41,6 +41,7 @@@ struct of_device_id
  struct irq_chip;
  struct irq_data;
  struct cpumask;
 +struct seq_file;
  
  /* Number of irqs reserved for a legacy isa controller */
  #define NUM_ISA_INTERRUPTS    16
@@@ -105,21 -105,18 +106,21 @@@ struct irq_domain_ops 
        int (*xlate)(struct irq_domain *d, struct device_node *node,
                     const u32 *intspec, unsigned int intsize,
                     unsigned long *out_hwirq, unsigned int *out_type);
 -
  #ifdef        CONFIG_IRQ_DOMAIN_HIERARCHY
        /* extended V2 interfaces to support hierarchy irq_domains */
        int (*alloc)(struct irq_domain *d, unsigned int virq,
                     unsigned int nr_irqs, void *arg);
        void (*free)(struct irq_domain *d, unsigned int virq,
                     unsigned int nr_irqs);
 -      void (*activate)(struct irq_domain *d, struct irq_data *irq_data);
 +      int (*activate)(struct irq_domain *d, struct irq_data *irqd, bool early);
        void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data);
        int (*translate)(struct irq_domain *d, struct irq_fwspec *fwspec,
                         unsigned long *out_hwirq, unsigned int *out_type);
  #endif
 +#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
 +      void (*debug_show)(struct seq_file *m, struct irq_domain *d,
 +                         struct irq_data *irqd, int ind);
 +#endif
  };
  
  extern struct irq_domain_ops irq_generic_chip_ops;
@@@ -441,7 -438,7 +442,7 @@@ extern int __irq_domain_alloc_irqs(stru
                                   unsigned int nr_irqs, int node, void *arg,
                                   bool realloc, const struct cpumask *affinity);
  extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
 -extern void irq_domain_activate_irq(struct irq_data *irq_data);
 +extern int irq_domain_activate_irq(struct irq_data *irq_data, bool early);
  extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
  
  static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
@@@ -511,6 -508,8 +512,6 @@@ static inline bool irq_domain_is_msi_re
  extern bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain);
  
  #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
 -static inline void irq_domain_activate_irq(struct irq_data *data) { }
 -static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
  static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
                        unsigned int nr_irqs, int node, void *arg)
  {
@@@ -559,6 -558,8 +560,6 @@@ irq_domain_hierarchical_is_msi_remap(st
  
  #else /* CONFIG_IRQ_DOMAIN */
  static inline void irq_dispose_mapping(unsigned int virq) { }
 -static inline void irq_domain_activate_irq(struct irq_data *data) { }
 -static inline void irq_domain_deactivate_irq(struct irq_data *data) { }
  static inline struct irq_domain *irq_find_matching_fwnode(
        struct fwnode_handle *fwnode, enum irq_domain_bus_token bus_token)
  {
diff --combined include/linux/msi.h
index eff16ef81f43b2db56b5e037e80421888d5a2ac8,cdd069cf9ed83acc413be8a534a04da08c290cf6..1f1bbb5b46794af1efc19fb73897c1b3e000bdd2
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  #ifndef LINUX_MSI_H
  #define LINUX_MSI_H
  
@@@ -283,11 -284,6 +284,11 @@@ enum 
        MSI_FLAG_PCI_MSIX               = (1 << 3),
        /* Needs early activate, required for PCI */
        MSI_FLAG_ACTIVATE_EARLY         = (1 << 4),
 +      /*
 +       * Must reactivate when irq is started even when
 +       * MSI_FLAG_ACTIVATE_EARLY has been set.
 +       */
 +      MSI_FLAG_MUST_REACTIVATE        = (1 << 5),
  };
  
  int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
diff --combined kernel/irq/Makefile
index 329c9193b4bfa03cce39a333c039db02a7666a18,ed15d142694b8e49e8f2c7f0d9ff3831eab054ef..ff6e352e3a6cbc7409f6170e1b0c8a66cefb3e61
@@@ -1,3 -1,4 +1,4 @@@
+ # SPDX-License-Identifier: GPL-2.0
  
  obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
  obj-$(CONFIG_IRQ_TIMINGS) += timings.o
@@@ -13,4 -14,3 +14,4 @@@ obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.
  obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
  obj-$(CONFIG_SMP) += affinity.o
  obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
 +obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o
diff --combined kernel/irq/autoprobe.c
index 6608d03efb23ff7ea231ca618b2148207ce2bf79,befa671fba644917c5b696d4ea5c5199bf6dc318..4e8089b319aedef183bef8e0131e0c5dafcb6d1b
@@@ -1,3 -1,4 +1,4 @@@
+ // SPDX-License-Identifier: GPL-2.0
  /*
   * linux/kernel/irq/autoprobe.c
   *
@@@ -53,7 -54,7 +54,7 @@@ unsigned long probe_irq_on(void
                        if (desc->irq_data.chip->irq_set_type)
                                desc->irq_data.chip->irq_set_type(&desc->irq_data,
                                                         IRQ_TYPE_PROBE);
 -                      irq_startup(desc, IRQ_NORESEND, IRQ_START_FORCE);
 +                      irq_activate_and_startup(desc, IRQ_NORESEND);
                }
                raw_spin_unlock_irq(&desc->lock);
        }
diff --combined kernel/irq/internals.h
index a0327136e469a13850553a0e26a8e35b714221f1,44ed5f8c8759051896fcdae2a38ae6eb0c4edd83..07d08ca701ec4627b558d0435c54cf6de7e147d2
@@@ -1,3 -1,4 +1,4 @@@
+ /* SPDX-License-Identifier: GPL-2.0 */
  /*
   * IRQ subsystem internal functions and variables:
   *
@@@ -74,8 -75,6 +75,8 @@@ extern void __enable_irq(struct irq_des
  #define IRQ_START_FORCE       true
  #define IRQ_START_COND        false
  
 +extern int irq_activate(struct irq_desc *desc);
 +extern void irq_activate_and_startup(struct irq_desc *desc, bool resend);
  extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
  
  extern void irq_shutdown(struct irq_desc *desc);
@@@ -438,18 -437,6 +439,18 @@@ static inline bool irq_fixup_move_pendi
  }
  #endif /* !CONFIG_GENERIC_PENDING_IRQ */
  
 +#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
 +static inline int irq_domain_activate_irq(struct irq_data *data, bool early)
 +{
 +      irqd_set_activated(data);
 +      return 0;
 +}
 +static inline void irq_domain_deactivate_irq(struct irq_data *data)
 +{
 +      irqd_clr_activated(data);
 +}
 +#endif
 +
  #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  #include <linux/debugfs.h>
  
@@@ -457,9 -444,7 +458,9 @@@ void irq_add_debugfs_entry(unsigned in
  static inline void irq_remove_debugfs_entry(struct irq_desc *desc)
  {
        debugfs_remove(desc->debugfs_file);
 +      kfree(desc->dev_name);
  }
 +void irq_debugfs_copy_devname(int irq, struct device *dev);
  # ifdef CONFIG_IRQ_DOMAIN
  void irq_domain_debugfs_init(struct dentry *root);
  # else
@@@ -474,7 -459,4 +475,7 @@@ static inline void irq_add_debugfs_entr
  static inline void irq_remove_debugfs_entry(struct irq_desc *d)
  {
  }
 +static inline void irq_debugfs_copy_devname(int irq, struct device *dev)
 +{
 +}
  #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */
This page took 0.257746 seconds and 4 git commands to generate.