]> Git Repo - linux.git/commitdiff
Merge tag 'irq-core-2020-12-23' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Thu, 24 Dec 2020 21:50:23 +0000 (13:50 -0800)
committerLinus Torvalds <[email protected]>
Thu, 24 Dec 2020 21:50:23 +0000 (13:50 -0800)
Pull irq updates from Thomas Gleixner:
 "This is the second attempt after the first one failed miserably and
  got zapped to unblock the rest of the interrupt related patches.

  A treewide cleanup of interrupt descriptor (ab)use with all sorts of
  racy accesses, inefficient and disfunctional code. The goal is to
  remove the export of irq_to_desc() to prevent these things from
  creeping up again"

* tag 'irq-core-2020-12-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits)
  genirq: Restrict export of irq_to_desc()
  xen/events: Implement irq distribution
  xen/events: Reduce irq_info:: Spurious_cnt storage size
  xen/events: Only force affinity mask for percpu interrupts
  xen/events: Use immediate affinity setting
  xen/events: Remove disfunct affinity spreading
  xen/events: Remove unused bind_evtchn_to_irq_lateeoi()
  net/mlx5: Use effective interrupt affinity
  net/mlx5: Replace irq_to_desc() abuse
  net/mlx4: Use effective interrupt affinity
  net/mlx4: Replace irq_to_desc() abuse
  PCI: mobiveil: Use irq_data_get_irq_chip_data()
  PCI: xilinx-nwl: Use irq_data_get_irq_chip_data()
  NTB/msi: Use irq_has_action()
  mfd: ab8500-debugfs: Remove the racy fiddling with irq_desc
  pinctrl: nomadik: Use irq_has_action()
  drm/i915/pmu: Replace open coded kstat_irqs() copy
  drm/i915/lpe_audio: Remove pointless irq_to_desc() usage
  s390/irq: Use irq_desc_kstat_cpu() in show_msi_interrupt()
  parisc/irq: Use irq_desc_kstat_cpu() in show_interrupts()
  ...

13 files changed:
1  2 
arch/arm/kernel/smp.c
arch/arm64/kernel/smp.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/i915_pmu.h
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
drivers/pci/controller/pcie-xilinx-nwl.c

diff --combined arch/arm/kernel/smp.c
index 6ab2b0ad5f400aed30d173a24c61e813f1968889,e66c46aba5b44ee4f03349d7c3d18cc0aafc937d..5c48eb4fd0e5ea7d51470bfff94c2989ed8644a9
@@@ -524,13 -524,14 +524,13 @@@ void __init smp_prepare_cpus(unsigned i
  }
  
  static const char *ipi_types[NR_IPI] __tracepoint_string = {
 -#define S(x,s)        [x] = s
 -      S(IPI_WAKEUP, "CPU wakeup interrupts"),
 -      S(IPI_TIMER, "Timer broadcast interrupts"),
 -      S(IPI_RESCHEDULE, "Rescheduling interrupts"),
 -      S(IPI_CALL_FUNC, "Function call interrupts"),
 -      S(IPI_CPU_STOP, "CPU stop interrupts"),
 -      S(IPI_IRQ_WORK, "IRQ work interrupts"),
 -      S(IPI_COMPLETION, "completion interrupts"),
 +      [IPI_WAKEUP]            = "CPU wakeup interrupts",
 +      [IPI_TIMER]             = "Timer broadcast interrupts",
 +      [IPI_RESCHEDULE]        = "Rescheduling interrupts",
 +      [IPI_CALL_FUNC]         = "Function call interrupts",
 +      [IPI_CPU_STOP]          = "CPU stop interrupts",
 +      [IPI_IRQ_WORK]          = "IRQ work interrupts",
 +      [IPI_COMPLETION]        = "completion interrupts",
  };
  
  static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
@@@ -549,7 -550,7 +549,7 @@@ void show_ipi_list(struct seq_file *p, 
                seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
  
                for_each_online_cpu(cpu)
-                       seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+                       seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
  
                seq_printf(p, " %s\n", ipi_types[i]);
        }
diff --combined arch/arm64/kernel/smp.c
index 19b1705ae5cb639d5ec23584a3cd696122b81dda,b2e5dc11abedbb5862fa725342d04eb8962ac57c..6bc3a3698c3d1ea9178a239c8ad96835b0d9d1c2
@@@ -462,8 -462,6 +462,8 @@@ void __init smp_prepare_boot_cpu(void
        /* Conditionally switch to GIC PMR for interrupt masking */
        if (system_uses_irq_prio_masking())
                init_gic_priority_masking();
 +
 +      kasan_init_hw_tags();
  }
  
  static u64 __init of_get_cpu_mpidr(struct device_node *dn)
@@@ -789,13 -787,14 +789,13 @@@ void __init smp_prepare_cpus(unsigned i
  }
  
  static const char *ipi_types[NR_IPI] __tracepoint_string = {
 -#define S(x,s)        [x] = s
 -      S(IPI_RESCHEDULE, "Rescheduling interrupts"),
 -      S(IPI_CALL_FUNC, "Function call interrupts"),
 -      S(IPI_CPU_STOP, "CPU stop interrupts"),
 -      S(IPI_CPU_CRASH_STOP, "CPU stop (for crash dump) interrupts"),
 -      S(IPI_TIMER, "Timer broadcast interrupts"),
 -      S(IPI_IRQ_WORK, "IRQ work interrupts"),
 -      S(IPI_WAKEUP, "CPU wake-up interrupts"),
 +      [IPI_RESCHEDULE]        = "Rescheduling interrupts",
 +      [IPI_CALL_FUNC]         = "Function call interrupts",
 +      [IPI_CPU_STOP]          = "CPU stop interrupts",
 +      [IPI_CPU_CRASH_STOP]    = "CPU stop (for crash dump) interrupts",
 +      [IPI_TIMER]             = "Timer broadcast interrupts",
 +      [IPI_IRQ_WORK]          = "IRQ work interrupts",
 +      [IPI_WAKEUP]            = "CPU wake-up interrupts",
  };
  
  static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
@@@ -811,7 -810,7 +811,7 @@@ int arch_show_interrupts(struct seq_fil
                seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
                           prec >= 4 ? " " : "");
                for_each_online_cpu(cpu)
-                       seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+                       seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
                seq_printf(p, "      %s\n", ipi_types[i]);
        }
  
index c80eeac539525de144b11db3c8420791127b8ee4,e741cd7f7fc66d7ea886137c66a5307e8fb1477f..6cdb052e385050c1e74f45c0a0fd066649d6a3e2
   * and related files, but that will be described in separate chapters.
   */
  
+ /*
+  * Interrupt statistic for PMU. Increments the counter only if the
+  * interrupt originated from the the GPU so interrupts from a device which
+  * shares the interrupt line are not accounted.
+  */
+ static inline void pmu_irq_stats(struct drm_i915_private *i915,
+                                irqreturn_t res)
+ {
+       if (unlikely(res != IRQ_HANDLED))
+               return;
+       /*
+        * A clever compiler translates that into INC. A not so clever one
+        * should at least prevent store tearing.
+        */
+       WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
+ }
  typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
 +typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
 +                                  enum hpd_pin pin);
  
  static const u32 hpd_ilk[HPD_NUM_PINS] = {
        [HPD_PORT_A] = DE_DP_A_HOTPLUG,
@@@ -73,7 -89,7 +91,7 @@@ static const u32 hpd_ivb[HPD_NUM_PINS] 
  };
  
  static const u32 hpd_bdw[HPD_NUM_PINS] = {
 -      [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
 +      [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
  };
  
  static const u32 hpd_ibx[HPD_NUM_PINS] = {
@@@ -128,37 -144,30 +146,37 @@@ static const u32 hpd_status_i915[HPD_NU
  };
  
  static const u32 hpd_bxt[HPD_NUM_PINS] = {
 -      [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
 -      [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
 -      [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
 +      [HPD_PORT_A] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_A),
 +      [HPD_PORT_B] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_B),
 +      [HPD_PORT_C] = GEN8_DE_PORT_HOTPLUG(HPD_PORT_C),
  };
  
  static const u32 hpd_gen11[HPD_NUM_PINS] = {
 -      [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(PORT_TC1) | GEN11_TBT_HOTPLUG(PORT_TC1),
 -      [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(PORT_TC2) | GEN11_TBT_HOTPLUG(PORT_TC2),
 -      [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(PORT_TC3) | GEN11_TBT_HOTPLUG(PORT_TC3),
 -      [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(PORT_TC4) | GEN11_TBT_HOTPLUG(PORT_TC4),
 -      [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(PORT_TC5) | GEN11_TBT_HOTPLUG(PORT_TC5),
 -      [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(PORT_TC6) | GEN11_TBT_HOTPLUG(PORT_TC6),
 +      [HPD_PORT_TC1] = GEN11_TC_HOTPLUG(HPD_PORT_TC1) | GEN11_TBT_HOTPLUG(HPD_PORT_TC1),
 +      [HPD_PORT_TC2] = GEN11_TC_HOTPLUG(HPD_PORT_TC2) | GEN11_TBT_HOTPLUG(HPD_PORT_TC2),
 +      [HPD_PORT_TC3] = GEN11_TC_HOTPLUG(HPD_PORT_TC3) | GEN11_TBT_HOTPLUG(HPD_PORT_TC3),
 +      [HPD_PORT_TC4] = GEN11_TC_HOTPLUG(HPD_PORT_TC4) | GEN11_TBT_HOTPLUG(HPD_PORT_TC4),
 +      [HPD_PORT_TC5] = GEN11_TC_HOTPLUG(HPD_PORT_TC5) | GEN11_TBT_HOTPLUG(HPD_PORT_TC5),
 +      [HPD_PORT_TC6] = GEN11_TC_HOTPLUG(HPD_PORT_TC6) | GEN11_TBT_HOTPLUG(HPD_PORT_TC6),
  };
  
  static const u32 hpd_icp[HPD_NUM_PINS] = {
 -      [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A),
 -      [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B),
 -      [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C),
 -      [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(PORT_TC1),
 -      [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(PORT_TC2),
 -      [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(PORT_TC3),
 -      [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(PORT_TC4),
 -      [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(PORT_TC5),
 -      [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
 +      [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
 +      [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
 +      [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
 +      [HPD_PORT_TC1] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1),
 +      [HPD_PORT_TC2] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2),
 +      [HPD_PORT_TC3] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC3),
 +      [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4),
 +      [HPD_PORT_TC5] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC5),
 +      [HPD_PORT_TC6] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC6),
 +};
 +
 +static const u32 hpd_sde_dg1[HPD_NUM_PINS] = {
 +      [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_A),
 +      [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_B),
 +      [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_C),
 +      [HPD_PORT_D] = SDE_DDI_HOTPLUG_ICP(HPD_PORT_D),
  };
  
  static void intel_hpd_init_pins(struct drm_i915_private *dev_priv)
        else
                hpd->hpd = hpd_ilk;
  
 -      if (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv))
 +      if ((INTEL_PCH_TYPE(dev_priv) < PCH_DG1) &&
 +          (!HAS_PCH_SPLIT(dev_priv) || HAS_PCH_NOP(dev_priv)))
                return;
  
 -      if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
 -          HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
 +      if (HAS_PCH_DG1(dev_priv))
 +              hpd->pch_hpd = hpd_sde_dg1;
 +      else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_JSP(dev_priv) ||
 +               HAS_PCH_ICP(dev_priv) || HAS_PCH_MCC(dev_priv))
                hpd->pch_hpd = hpd_icp;
        else if (HAS_PCH_CNP(dev_priv) || HAS_PCH_SPT(dev_priv))
                hpd->pch_hpd = hpd_spt;
@@@ -349,14 -355,17 +367,14 @@@ void ilk_update_display_irq(struct drm_
        u32 new_val;
  
        lockdep_assert_held(&dev_priv->irq_lock);
 -
        drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
  
 -      if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
 -              return;
 -
        new_val = dev_priv->irq_mask;
        new_val &= ~interrupt_mask;
        new_val |= (~enabled_irq_mask & interrupt_mask);
  
 -      if (new_val != dev_priv->irq_mask) {
 +      if (new_val != dev_priv->irq_mask &&
 +          !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) {
                dev_priv->irq_mask = new_val;
                I915_WRITE(DEIMR, dev_priv->irq_mask);
                POSTING_READ(DEIMR);
@@@ -691,12 -700,8 +709,12 @@@ u32 i915_get_vblank_counter(struct drm_
  u32 g4x_get_vblank_counter(struct drm_crtc *crtc)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 +      struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[drm_crtc_index(crtc)];
        enum pipe pipe = to_intel_crtc(crtc)->pipe;
  
 +      if (!vblank->max_vblank_count)
 +              return 0;
 +
        return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
  }
  
@@@ -1044,17 -1049,17 +1062,17 @@@ static bool gen11_port_hotplug_long_det
  {
        switch (pin) {
        case HPD_PORT_TC1:
 -              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC1);
 +              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1);
        case HPD_PORT_TC2:
 -              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC2);
 +              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2);
        case HPD_PORT_TC3:
 -              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC3);
 +              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3);
        case HPD_PORT_TC4:
 -              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC4);
 +              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4);
        case HPD_PORT_TC5:
 -              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC5);
 +              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5);
        case HPD_PORT_TC6:
 -              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(PORT_TC6);
 +              return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6);
        default:
                return false;
        }
@@@ -1078,13 -1083,11 +1096,13 @@@ static bool icp_ddi_port_hotplug_long_d
  {
        switch (pin) {
        case HPD_PORT_A:
 -              return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A);
 +              return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A);
        case HPD_PORT_B:
 -              return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B);
 +              return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B);
        case HPD_PORT_C:
 -              return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C);
 +              return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C);
 +      case HPD_PORT_D:
 +              return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D);
        default:
                return false;
        }
@@@ -1094,17 -1097,17 +1112,17 @@@ static bool icp_tc_port_hotplug_long_de
  {
        switch (pin) {
        case HPD_PORT_TC1:
 -              return val & ICP_TC_HPD_LONG_DETECT(PORT_TC1);
 +              return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1);
        case HPD_PORT_TC2:
 -              return val & ICP_TC_HPD_LONG_DETECT(PORT_TC2);
 +              return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2);
        case HPD_PORT_TC3:
 -              return val & ICP_TC_HPD_LONG_DETECT(PORT_TC3);
 +              return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3);
        case HPD_PORT_TC4:
 -              return val & ICP_TC_HPD_LONG_DETECT(PORT_TC4);
 +              return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4);
        case HPD_PORT_TC5:
 -              return val & ICP_TC_HPD_LONG_DETECT(PORT_TC5);
 +              return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5);
        case HPD_PORT_TC6:
 -              return val & ICP_TC_HPD_LONG_DETECT(PORT_TC6);
 +              return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6);
        default:
                return false;
        }
@@@ -1207,43 -1210,6 +1225,43 @@@ static void intel_get_hpd_pins(struct d
  
  }
  
 +static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
 +                                const u32 hpd[HPD_NUM_PINS])
 +{
 +      struct intel_encoder *encoder;
 +      u32 enabled_irqs = 0;
 +
 +      for_each_intel_encoder(&dev_priv->drm, encoder)
 +              if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
 +                      enabled_irqs |= hpd[encoder->hpd_pin];
 +
 +      return enabled_irqs;
 +}
 +
 +static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
 +                                const u32 hpd[HPD_NUM_PINS])
 +{
 +      struct intel_encoder *encoder;
 +      u32 hotplug_irqs = 0;
 +
 +      for_each_intel_encoder(&dev_priv->drm, encoder)
 +              hotplug_irqs |= hpd[encoder->hpd_pin];
 +
 +      return hotplug_irqs;
 +}
 +
 +static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915,
 +                                   hotplug_enables_func hotplug_enables)
 +{
 +      struct intel_encoder *encoder;
 +      u32 hotplug = 0;
 +
 +      for_each_intel_encoder(&i915->drm, encoder)
 +              hotplug |= hotplug_enables(i915, encoder->hpd_pin);
 +
 +      return hotplug;
 +}
 +
  static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
  {
        wake_up_all(&dev_priv->gmbus_wait_queue);
@@@ -1297,23 -1263,6 +1315,23 @@@ display_pipe_crc_irq_handler(struct drm
                             u32 crc4) {}
  #endif
  
 +static void flip_done_handler(struct drm_i915_private *i915,
 +                            enum pipe pipe)
 +{
 +      struct intel_crtc *crtc = intel_get_crtc_for_pipe(i915, pipe);
 +      struct drm_crtc_state *crtc_state = crtc->base.state;
 +      struct drm_pending_vblank_event *e = crtc_state->event;
 +      struct drm_device *dev = &i915->drm;
 +      unsigned long irqflags;
 +
 +      spin_lock_irqsave(&dev->event_lock, irqflags);
 +
 +      crtc_state->event = NULL;
 +
 +      drm_crtc_send_vblank_event(&crtc->base, e);
 +
 +      spin_unlock_irqrestore(&dev->event_lock, irqflags);
 +}
  
  static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
                                     enum pipe pipe)
@@@ -1668,6 -1617,8 +1686,8 @@@ static irqreturn_t valleyview_irq_handl
                valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
        } while (0);
  
+       pmu_irq_stats(dev_priv, ret);
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
        return ret;
@@@ -1745,6 -1696,8 +1765,8 @@@ static irqreturn_t cherryview_irq_handl
                valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
        } while (0);
  
+       pmu_irq_stats(dev_priv, ret);
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
        return ret;
@@@ -1909,10 -1862,27 +1931,10 @@@ static void cpt_irq_handler(struct drm_
  
  static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
  {
 -      u32 ddi_hotplug_trigger, tc_hotplug_trigger;
 +      u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP;
 +      u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP;
        u32 pin_mask = 0, long_mask = 0;
  
 -      if (HAS_PCH_TGP(dev_priv)) {
 -              ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
 -              tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP;
 -      } else if (HAS_PCH_JSP(dev_priv)) {
 -              ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP;
 -              tc_hotplug_trigger = 0;
 -      } else if (HAS_PCH_MCC(dev_priv)) {
 -              ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
 -              tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1);
 -      } else {
 -              drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
 -                       "Unrecognized PCH type 0x%x\n",
 -                       INTEL_PCH_TYPE(dev_priv));
 -
 -              ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
 -              tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
 -      }
 -
        if (ddi_hotplug_trigger) {
                u32 dig_hotplug_reg;
  
@@@ -2155,6 -2125,8 +2177,8 @@@ static irqreturn_t ilk_irq_handler(int 
        if (sde_ier)
                raw_reg_write(regs, SDEIER, sde_ier);
  
+       pmu_irq_stats(i915, ret);
        /* IRQs are synced during runtime_suspend, we don't require a wakeref */
        enable_rpm_wakeref_asserts(&i915->runtime_pm);
  
@@@ -2289,63 -2261,6 +2313,63 @@@ gen8_de_misc_irq_handler(struct drm_i91
                drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
  }
  
 +static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
 +                                         u32 te_trigger)
 +{
 +      enum pipe pipe = INVALID_PIPE;
 +      enum transcoder dsi_trans;
 +      enum port port;
 +      u32 val, tmp;
 +
 +      /*
 +       * Incase of dual link, TE comes from DSI_1
 +       * this is to check if dual link is enabled
 +       */
 +      val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0));
 +      val &= PORT_SYNC_MODE_ENABLE;
 +
 +      /*
 +       * if dual link is enabled, then read DSI_0
 +       * transcoder registers
 +       */
 +      port = ((te_trigger & DSI1_TE && val) || (te_trigger & DSI0_TE)) ?
 +                                                PORT_A : PORT_B;
 +      dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1;
 +
 +      /* Check if DSI configured in command mode */
 +      val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
 +      val = val & OP_MODE_MASK;
 +
 +      if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) {
 +              drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n");
 +              return;
 +      }
 +
 +      /* Get PIPE for handling VBLANK event */
 +      val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
 +      switch (val & TRANS_DDI_EDP_INPUT_MASK) {
 +      case TRANS_DDI_EDP_INPUT_A_ON:
 +              pipe = PIPE_A;
 +              break;
 +      case TRANS_DDI_EDP_INPUT_B_ONOFF:
 +              pipe = PIPE_B;
 +              break;
 +      case TRANS_DDI_EDP_INPUT_C_ONOFF:
 +              pipe = PIPE_C;
 +              break;
 +      default:
 +              drm_err(&dev_priv->drm, "Invalid PIPE\n");
 +              return;
 +      }
 +
 +      intel_handle_vblank(dev_priv, pipe);
 +
 +      /* clear TE in dsi IIR */
 +      port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A;
 +      tmp = I915_READ(DSI_INTR_IDENT_REG(port));
 +      I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
 +}
 +
  static irqreturn_t
  gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
  {
        if (master_ctl & GEN8_DE_PORT_IRQ) {
                iir = I915_READ(GEN8_DE_PORT_IIR);
                if (iir) {
 -                      u32 tmp_mask;
                        bool found = false;
  
                        I915_WRITE(GEN8_DE_PORT_IIR, iir);
                        }
  
                        if (IS_GEN9_LP(dev_priv)) {
 -                              tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
 -                              if (tmp_mask) {
 -                                      bxt_hpd_irq_handler(dev_priv, tmp_mask);
 +                              u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK;
 +
 +                              if (hotplug_trigger) {
 +                                      bxt_hpd_irq_handler(dev_priv, hotplug_trigger);
                                        found = true;
                                }
                        } else if (IS_BROADWELL(dev_priv)) {
 -                              tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
 -                              if (tmp_mask) {
 -                                      ilk_hpd_irq_handler(dev_priv, tmp_mask);
 +                              u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK;
 +
 +                              if (hotplug_trigger) {
 +                                      ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
                                        found = true;
                                }
                        }
                                found = true;
                        }
  
 +                      if (INTEL_GEN(dev_priv) >= 11) {
 +                              u32 te_trigger = iir & (DSI0_TE | DSI1_TE);
 +
 +                              if (te_trigger) {
 +                                      gen11_dsi_te_interrupt_handler(dev_priv, te_trigger);
 +                                      found = true;
 +                              }
 +                      }
 +
                        if (!found)
                                drm_err(&dev_priv->drm,
                                        "Unexpected DE Port interrupt\n");
                if (iir & GEN8_PIPE_VBLANK)
                        intel_handle_vblank(dev_priv, pipe);
  
 +              if (iir & GEN9_PIPE_PLANE1_FLIP_DONE)
 +                      flip_done_handler(dev_priv, pipe);
 +
                if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
                        hsw_pipe_crc_irq_handler(dev_priv, pipe);
  
@@@ -2541,6 -2443,8 +2565,8 @@@ static irqreturn_t gen8_irq_handler(in
  
        gen8_master_intr_enable(regs);
  
+       pmu_irq_stats(dev_priv, IRQ_HANDLED);
        return IRQ_HANDLED;
  }
  
@@@ -2636,6 -2540,8 +2662,8 @@@ __gen11_irq_handler(struct drm_i915_pri
  
        gen11_gu_misc_irq_handler(gt, gu_misc_iir);
  
+       pmu_irq_stats(i915, IRQ_HANDLED);
        return IRQ_HANDLED;
  }
  
@@@ -2753,47 -2659,12 +2781,47 @@@ int ilk_enable_vblank(struct drm_crtc *
        return 0;
  }
  
 +static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc,
 +                                 bool enable)
 +{
 +      struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
 +      enum port port;
 +      u32 tmp;
 +
 +      if (!(intel_crtc->mode_flags &
 +          (I915_MODE_FLAG_DSI_USE_TE1 | I915_MODE_FLAG_DSI_USE_TE0)))
 +              return false;
 +
 +      /* for dual link cases we consider TE from slave */
 +      if (intel_crtc->mode_flags & I915_MODE_FLAG_DSI_USE_TE1)
 +              port = PORT_B;
 +      else
 +              port = PORT_A;
 +
 +      tmp =  I915_READ(DSI_INTR_MASK_REG(port));
 +      if (enable)
 +              tmp &= ~DSI_TE_EVENT;
 +      else
 +              tmp |= DSI_TE_EVENT;
 +
 +      I915_WRITE(DSI_INTR_MASK_REG(port), tmp);
 +
 +      tmp = I915_READ(DSI_INTR_IDENT_REG(port));
 +      I915_WRITE(DSI_INTR_IDENT_REG(port), tmp);
 +
 +      return true;
 +}
 +
  int bdw_enable_vblank(struct drm_crtc *crtc)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 -      enum pipe pipe = to_intel_crtc(crtc)->pipe;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      enum pipe pipe = intel_crtc->pipe;
        unsigned long irqflags;
  
 +      if (gen11_dsi_configure_te(intel_crtc, true))
 +              return 0;
 +
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        return 0;
  }
  
 +void skl_enable_flip_done(struct intel_crtc *crtc)
 +{
 +      struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
 +      unsigned long irqflags;
 +
 +      spin_lock_irqsave(&i915->irq_lock, irqflags);
 +
 +      bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
 +
 +      spin_unlock_irqrestore(&i915->irq_lock, irqflags);
 +}
 +
  /* Called from drm generic code, passed 'crtc' which
   * we use as a pipe index
   */
@@@ -2872,31 -2730,14 +2900,31 @@@ void ilk_disable_vblank(struct drm_crt
  void bdw_disable_vblank(struct drm_crtc *crtc)
  {
        struct drm_i915_private *dev_priv = to_i915(crtc->dev);
 -      enum pipe pipe = to_intel_crtc(crtc)->pipe;
 +      struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 +      enum pipe pipe = intel_crtc->pipe;
        unsigned long irqflags;
  
 +      if (gen11_dsi_configure_te(intel_crtc, false))
 +              return;
 +
        spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
        bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
        spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
  }
  
 +void skl_disable_flip_done(struct intel_crtc *crtc)
 +{
 +      struct drm_i915_private *i915 = to_i915(crtc->base.dev);
 +      enum pipe pipe = crtc->pipe;
 +      unsigned long irqflags;
 +
 +      spin_lock_irqsave(&i915->irq_lock, irqflags);
 +
 +      bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE1_FLIP_DONE);
 +
 +      spin_unlock_irqrestore(&i915->irq_lock, irqflags);
 +}
 +
  static void ibx_irq_reset(struct drm_i915_private *dev_priv)
  {
        struct intel_uncore *uncore = &dev_priv->uncore;
                I915_WRITE(SERR_INT, 0xffffffff);
  }
  
 -/*
 - * SDEIER is also touched by the interrupt handler to work around missed PCH
 - * interrupts. Hence we can't update it after the interrupt handler is enabled -
 - * instead we unconditionally enable all PCH interrupt sources here, but then
 - * only unmask them as needed with SDEIMR.
 - *
 - * This function needs to be called before interrupts are enabled.
 - */
 -static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
 -{
 -      if (HAS_PCH_NOP(dev_priv))
 -              return;
 -
 -      drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
 -      I915_WRITE(SDEIER, 0xffffffff);
 -      POSTING_READ(SDEIER);
 -}
 -
  static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
  {
        struct intel_uncore *uncore = &dev_priv->uncore;
@@@ -2966,8 -2825,6 +2994,8 @@@ static void ilk_irq_reset(struct drm_i9
        struct intel_uncore *uncore = &dev_priv->uncore;
  
        GEN3_IRQ_RESET(uncore, DE);
 +      dev_priv->irq_mask = ~0u;
 +
        if (IS_GEN(dev_priv, 7))
                intel_uncore_write(uncore, GEN7_ERR_INT, 0xffffffff);
  
@@@ -3058,10 -2915,8 +3086,10 @@@ static void gen11_display_irq_reset(str
        if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
                GEN3_IRQ_RESET(uncore, SDE);
  
 -      /* Wa_14010685332:icl,jsl,ehl,tgl,rkl */
 -      if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) {
 +      /* Wa_14010685332:cnp/cmp,tgp,adp */
 +      if (INTEL_PCH_TYPE(dev_priv) == PCH_CNP ||
 +          (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP &&
 +           INTEL_PCH_TYPE(dev_priv) < PCH_DG1)) {
                intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
                                 SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
                intel_uncore_rmw(uncore, SOUTH_CHICKEN1,
@@@ -3093,9 -2948,6 +3121,9 @@@ void gen8_irq_power_well_post_enable(st
        u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
        enum pipe pipe;
  
 +      if (INTEL_GEN(dev_priv) >= 9)
 +              extra_ier |= GEN9_PIPE_PLANE1_FLIP_DONE;
 +
        spin_lock_irq(&dev_priv->irq_lock);
  
        if (!intel_irqs_enabled(dev_priv)) {
@@@ -3150,29 -3002,29 +3178,29 @@@ static void cherryview_irq_reset(struc
        spin_unlock_irq(&dev_priv->irq_lock);
  }
  
 -static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
 -                                const u32 hpd[HPD_NUM_PINS])
 -{
 -      struct intel_encoder *encoder;
 -      u32 enabled_irqs = 0;
 -
 -      for_each_intel_encoder(&dev_priv->drm, encoder)
 -              if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
 -                      enabled_irqs |= hpd[encoder->hpd_pin];
 -
 -      return enabled_irqs;
 -}
 -
 -static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv,
 -                                const u32 hpd[HPD_NUM_PINS])
 +static u32 ibx_hotplug_enables(struct drm_i915_private *i915,
 +                             enum hpd_pin pin)
  {
 -      struct intel_encoder *encoder;
 -      u32 hotplug_irqs = 0;
 -
 -      for_each_intel_encoder(&dev_priv->drm, encoder)
 -              hotplug_irqs |= hpd[encoder->hpd_pin];
 -
 -      return hotplug_irqs;
 +      switch (pin) {
 +      case HPD_PORT_A:
 +              /*
 +               * When CPU and PCH are on the same package, port A
 +               * HPD must be enabled in both north and south.
 +               */
 +              return HAS_PCH_LPT_LP(i915) ?
 +                      PORTA_HOTPLUG_ENABLE : 0;
 +      case HPD_PORT_B:
 +              return PORTB_HOTPLUG_ENABLE |
 +                      PORTB_PULSE_DURATION_2ms;
 +      case HPD_PORT_C:
 +              return PORTC_HOTPLUG_ENABLE |
 +                      PORTC_PULSE_DURATION_2ms;
 +      case HPD_PORT_D:
 +              return PORTD_HOTPLUG_ENABLE |
 +                      PORTD_PULSE_DURATION_2ms;
 +      default:
 +              return 0;
 +      }
  }
  
  static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv)
         * The pulse duration bits are reserved on LPT+.
         */
        hotplug = I915_READ(PCH_PORT_HOTPLUG);
 -      hotplug &= ~(PORTB_PULSE_DURATION_MASK |
 +      hotplug &= ~(PORTA_HOTPLUG_ENABLE |
 +                   PORTB_HOTPLUG_ENABLE |
 +                   PORTC_HOTPLUG_ENABLE |
 +                   PORTD_HOTPLUG_ENABLE |
 +                   PORTB_PULSE_DURATION_MASK |
                     PORTC_PULSE_DURATION_MASK |
                     PORTD_PULSE_DURATION_MASK);
 -      hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
 -      hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
 -      hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
 -      /*
 -       * When CPU and PCH are on the same package, port A
 -       * HPD must be enabled in both north and south.
 -       */
 -      if (HAS_PCH_LPT_LP(dev_priv))
 -              hotplug |= PORTA_HOTPLUG_ENABLE;
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables);
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  }
  
@@@ -3208,65 -3064,28 +3236,65 @@@ static void ibx_hpd_irq_setup(struct dr
        ibx_hpd_detection_setup(dev_priv);
  }
  
 -static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv,
 -                                      u32 enable_mask)
 +static u32 icp_ddi_hotplug_enables(struct drm_i915_private *i915,
 +                                 enum hpd_pin pin)
 +{
 +      switch (pin) {
 +      case HPD_PORT_A:
 +      case HPD_PORT_B:
 +      case HPD_PORT_C:
 +      case HPD_PORT_D:
 +              return SHOTPLUG_CTL_DDI_HPD_ENABLE(pin);
 +      default:
 +              return 0;
 +      }
 +}
 +
 +static u32 icp_tc_hotplug_enables(struct drm_i915_private *i915,
 +                                enum hpd_pin pin)
 +{
 +      switch (pin) {
 +      case HPD_PORT_TC1:
 +      case HPD_PORT_TC2:
 +      case HPD_PORT_TC3:
 +      case HPD_PORT_TC4:
 +      case HPD_PORT_TC5:
 +      case HPD_PORT_TC6:
 +              return ICP_TC_HPD_ENABLE(pin);
 +      default:
 +              return 0;
 +      }
 +}
 +
 +static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv)
  {
        u32 hotplug;
  
        hotplug = I915_READ(SHOTPLUG_CTL_DDI);
 -      hotplug |= enable_mask;
 +      hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) |
 +                   SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) |
 +                   SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) |
 +                   SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D));
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables);
        I915_WRITE(SHOTPLUG_CTL_DDI, hotplug);
  }
  
 -static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv,
 -                                     u32 enable_mask)
 +static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
  {
        u32 hotplug;
  
        hotplug = I915_READ(SHOTPLUG_CTL_TC);
 -      hotplug |= enable_mask;
 +      hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) |
 +                   ICP_TC_HPD_ENABLE(HPD_PORT_TC2) |
 +                   ICP_TC_HPD_ENABLE(HPD_PORT_TC3) |
 +                   ICP_TC_HPD_ENABLE(HPD_PORT_TC4) |
 +                   ICP_TC_HPD_ENABLE(HPD_PORT_TC5) |
 +                   ICP_TC_HPD_ENABLE(HPD_PORT_TC6));
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables);
        I915_WRITE(SHOTPLUG_CTL_TC, hotplug);
  }
  
 -static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv,
 -                            u32 ddi_enable_mask, u32 tc_enable_mask)
 +static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv)
  {
        u32 hotplug_irqs, enabled_irqs;
  
  
        ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
  
 -      icp_ddi_hpd_detection_setup(dev_priv, ddi_enable_mask);
 -      if (tc_enable_mask)
 -              icp_tc_hpd_detection_setup(dev_priv, tc_enable_mask);
 +      icp_ddi_hpd_detection_setup(dev_priv);
 +      icp_tc_hpd_detection_setup(dev_priv);
  }
  
 -/*
 - * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the
 - * equivalent of SDE.
 - */
 -static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv)
 +static u32 gen11_hotplug_enables(struct drm_i915_private *i915,
 +                               enum hpd_pin pin)
  {
 -      icp_hpd_irq_setup(dev_priv,
 -                        ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1));
 +      switch (pin) {
 +      case HPD_PORT_TC1:
 +      case HPD_PORT_TC2:
 +      case HPD_PORT_TC3:
 +      case HPD_PORT_TC4:
 +      case HPD_PORT_TC5:
 +      case HPD_PORT_TC6:
 +              return GEN11_HOTPLUG_CTL_ENABLE(pin);
 +      default:
 +              return 0;
 +      }
  }
  
 -/*
 - * JSP behaves exactly the same as MCC above except that port C is mapped to
 - * the DDI-C pins instead of the TC1 pins.  This means we should follow TGP's
 - * masks & tables rather than ICP's masks & tables.
 - */
 -static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv)
 +static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv)
  {
 -      icp_hpd_irq_setup(dev_priv,
 -                        TGP_DDI_HPD_ENABLE_MASK, 0);
 +      u32 val;
 +
 +      val = I915_READ(SOUTH_CHICKEN1);
 +      val |= (INVERT_DDIA_HPD |
 +              INVERT_DDIB_HPD |
 +              INVERT_DDIC_HPD |
 +              INVERT_DDID_HPD);
 +      I915_WRITE(SOUTH_CHICKEN1, val);
 +
 +      icp_hpd_irq_setup(dev_priv);
  }
  
 -static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv)
 +static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv)
  {
        u32 hotplug;
  
        hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL);
 -      hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
 +      hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
        I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug);
 +}
 +
 +static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv)
 +{
 +      u32 hotplug;
  
        hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL);
 -      hotplug |= GEN11_HOTPLUG_CTL_ENABLE(PORT_TC1) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC2) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC3) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC4) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC5) |
 -                 GEN11_HOTPLUG_CTL_ENABLE(PORT_TC6);
 +      hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC4) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) |
 +                   GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6));
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables);
        I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug);
  }
  
@@@ -3356,39 -3160,14 +3384,39 @@@ static void gen11_hpd_irq_setup(struct 
        I915_WRITE(GEN11_DE_HPD_IMR, val);
        POSTING_READ(GEN11_DE_HPD_IMR);
  
 -      gen11_hpd_detection_setup(dev_priv);
 +      gen11_tc_hpd_detection_setup(dev_priv);
 +      gen11_tbt_hpd_detection_setup(dev_priv);
  
 -      if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP)
 -              icp_hpd_irq_setup(dev_priv,
 -                                TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK);
 -      else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 -              icp_hpd_irq_setup(dev_priv,
 -                                ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK);
 +      if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
 +              icp_hpd_irq_setup(dev_priv);
 +}
 +
 +static u32 spt_hotplug_enables(struct drm_i915_private *i915,
 +                             enum hpd_pin pin)
 +{
 +      switch (pin) {
 +      case HPD_PORT_A:
 +              return PORTA_HOTPLUG_ENABLE;
 +      case HPD_PORT_B:
 +              return PORTB_HOTPLUG_ENABLE;
 +      case HPD_PORT_C:
 +              return PORTC_HOTPLUG_ENABLE;
 +      case HPD_PORT_D:
 +              return PORTD_HOTPLUG_ENABLE;
 +      default:
 +              return 0;
 +      }
 +}
 +
 +static u32 spt_hotplug2_enables(struct drm_i915_private *i915,
 +                              enum hpd_pin pin)
 +{
 +      switch (pin) {
 +      case HPD_PORT_E:
 +              return PORTE_HOTPLUG_ENABLE;
 +      default:
 +              return 0;
 +      }
  }
  
  static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv)
  
        /* Enable digital hotplug on the PCH */
        hotplug = I915_READ(PCH_PORT_HOTPLUG);
 -      hotplug |= PORTA_HOTPLUG_ENABLE |
 -                 PORTB_HOTPLUG_ENABLE |
 -                 PORTC_HOTPLUG_ENABLE |
 -                 PORTD_HOTPLUG_ENABLE;
 +      hotplug &= ~(PORTA_HOTPLUG_ENABLE |
 +                   PORTB_HOTPLUG_ENABLE |
 +                   PORTC_HOTPLUG_ENABLE |
 +                   PORTD_HOTPLUG_ENABLE);
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables);
        I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  
        hotplug = I915_READ(PCH_PORT_HOTPLUG2);
 -      hotplug |= PORTE_HOTPLUG_ENABLE;
 +      hotplug &= ~PORTE_HOTPLUG_ENABLE;
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables);
        I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
  }
  
@@@ -3433,18 -3210,6 +3461,18 @@@ static void spt_hpd_irq_setup(struct dr
        spt_hpd_detection_setup(dev_priv);
  }
  
 +static u32 ilk_hotplug_enables(struct drm_i915_private *i915,
 +                             enum hpd_pin pin)
 +{
 +      switch (pin) {
 +      case HPD_PORT_A:
 +              return DIGITAL_PORTA_HOTPLUG_ENABLE |
 +                      DIGITAL_PORTA_PULSE_DURATION_2ms;
 +      default:
 +              return 0;
 +      }
 +}
 +
  static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv)
  {
        u32 hotplug;
         * The pulse duration bits are reserved on HSW+.
         */
        hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
 -      hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
 -      hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE |
 -                 DIGITAL_PORTA_PULSE_DURATION_2ms;
 +      hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE |
 +                   DIGITAL_PORTA_PULSE_DURATION_MASK);
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables);
        I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
  }
  
@@@ -3478,45 -3243,41 +3506,45 @@@ static void ilk_hpd_irq_setup(struct dr
        ibx_hpd_irq_setup(dev_priv);
  }
  
 -static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
 -                                    u32 enabled_irqs)
 +static u32 bxt_hotplug_enables(struct drm_i915_private *i915,
 +                             enum hpd_pin pin)
  {
        u32 hotplug;
  
 -      hotplug = I915_READ(PCH_PORT_HOTPLUG);
 -      hotplug |= PORTA_HOTPLUG_ENABLE |
 -                 PORTB_HOTPLUG_ENABLE |
 -                 PORTC_HOTPLUG_ENABLE;
 -
 -      drm_dbg_kms(&dev_priv->drm,
 -                  "Invert bit setting: hp_ctl:%x hp_port:%x\n",
 -                  hotplug, enabled_irqs);
 -      hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
 -
 -      /*
 -       * For BXT invert bit has to be set based on AOB design
 -       * for HPD detection logic, update it based on VBT fields.
 -       */
 -      if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
 -          intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
 -              hotplug |= BXT_DDIA_HPD_INVERT;
 -      if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
 -          intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
 -              hotplug |= BXT_DDIB_HPD_INVERT;
 -      if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
 -          intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
 -              hotplug |= BXT_DDIC_HPD_INVERT;
 -
 -      I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
 +      switch (pin) {
 +      case HPD_PORT_A:
 +              hotplug = PORTA_HOTPLUG_ENABLE;
 +              if (intel_bios_is_port_hpd_inverted(i915, PORT_A))
 +                      hotplug |= BXT_DDIA_HPD_INVERT;
 +              return hotplug;
 +      case HPD_PORT_B:
 +              hotplug = PORTB_HOTPLUG_ENABLE;
 +              if (intel_bios_is_port_hpd_inverted(i915, PORT_B))
 +                      hotplug |= BXT_DDIB_HPD_INVERT;
 +              return hotplug;
 +      case HPD_PORT_C:
 +              hotplug = PORTC_HOTPLUG_ENABLE;
 +              if (intel_bios_is_port_hpd_inverted(i915, PORT_C))
 +                      hotplug |= BXT_DDIC_HPD_INVERT;
 +              return hotplug;
 +      default:
 +              return 0;
 +      }
  }
  
  static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv)
  {
 -      __bxt_hpd_detection_setup(dev_priv, BXT_DE_PORT_HOTPLUG_MASK);
 +      u32 hotplug;
 +
 +      hotplug = I915_READ(PCH_PORT_HOTPLUG);
 +      hotplug &= ~(PORTA_HOTPLUG_ENABLE |
 +                   PORTB_HOTPLUG_ENABLE |
 +                   PORTC_HOTPLUG_ENABLE |
 +                   BXT_DDIA_HPD_INVERT |
 +                   BXT_DDIB_HPD_INVERT |
 +                   BXT_DDIC_HPD_INVERT);
 +      hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables);
 +      I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
  }
  
  static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
  
        bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
  
 -      __bxt_hpd_detection_setup(dev_priv, enabled_irqs);
 +      bxt_hpd_detection_setup(dev_priv);
  }
  
 +/*
 + * SDEIER is also touched by the interrupt handler to work around missed PCH
 + * interrupts. Hence we can't update it after the interrupt handler is enabled -
 + * instead we unconditionally enable all PCH interrupt sources here, but then
 + * only unmask them as needed with SDEIMR.
 + *
 + * Note that we currently do this after installing the interrupt handler,
 + * but before we enable the master interrupt. That should be sufficient
 + * to avoid races with the irq handler, assuming we have MSI. Shared legacy
 + * interrupts could still race.
 + */
  static void ibx_irq_postinstall(struct drm_i915_private *dev_priv)
  {
 +      struct intel_uncore *uncore = &dev_priv->uncore;
        u32 mask;
  
        if (HAS_PCH_NOP(dev_priv))
        else
                mask = SDE_GMBUS_CPT;
  
 -      gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
 -      I915_WRITE(SDEIMR, ~mask);
 -
 -      if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
 -          HAS_PCH_LPT(dev_priv))
 -              ibx_hpd_detection_setup(dev_priv);
 -      else
 -              spt_hpd_detection_setup(dev_priv);
 +      GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
  }
  
  static void ilk_irq_postinstall(struct drm_i915_private *dev_priv)
                display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
                                DE_AUX_CHANNEL_A | DE_PIPEB_CRC_DONE |
                                DE_PIPEA_CRC_DONE | DE_POISON);
 -              extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
 +              extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK |
                              DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
                              DE_DP_A_HOTPLUG);
        }
                display_mask |= DE_EDP_PSR_INT_HSW;
        }
  
 -      dev_priv->irq_mask = ~display_mask;
 +      if (IS_IRONLAKE_M(dev_priv))
 +              extra_mask |= DE_PCU_EVENT;
  
 -      ibx_irq_pre_postinstall(dev_priv);
 +      dev_priv->irq_mask = ~display_mask;
  
 -      GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
 -                    display_mask | extra_mask);
 +      ibx_irq_postinstall(dev_priv);
  
        gen5_gt_irq_postinstall(&dev_priv->gt);
  
 -      ilk_hpd_detection_setup(dev_priv);
 -
 -      ibx_irq_postinstall(dev_priv);
 -
 -      if (IS_IRONLAKE_M(dev_priv)) {
 -              /* Enable PCU event interrupts
 -               *
 -               * spinlocking not required here for correctness since interrupt
 -               * setup is guaranteed to run in single-threaded context. But we
 -               * need it to make the assert_spin_locked happy. */
 -              spin_lock_irq(&dev_priv->irq_lock);
 -              ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
 -              spin_unlock_irq(&dev_priv->irq_lock);
 -      }
 +      GEN3_IRQ_INIT(uncore, DE, dev_priv->irq_mask,
 +                    display_mask | extra_mask);
  }
  
  void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
@@@ -3660,24 -3428,14 +3688,24 @@@ static void gen8_de_irq_postinstall(str
        if (IS_GEN9_LP(dev_priv))
                de_port_masked |= BXT_DE_PORT_GMBUS;
  
 +      if (INTEL_GEN(dev_priv) >= 11) {
 +              enum port port;
 +
 +              if (intel_bios_is_dsi_present(dev_priv, &port))
 +                      de_port_masked |= DSI0_TE | DSI1_TE;
 +      }
 +
        de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
                                           GEN8_PIPE_FIFO_UNDERRUN;
  
 +      if (INTEL_GEN(dev_priv) >= 9)
 +              de_pipe_enables |= GEN9_PIPE_PLANE1_FLIP_DONE;
 +
        de_port_enables = de_port_masked;
        if (IS_GEN9_LP(dev_priv))
                de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
        else if (IS_BROADWELL(dev_priv))
 -              de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
 +              de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK;
  
        if (INTEL_GEN(dev_priv) >= 12) {
                enum transcoder trans;
  
                GEN3_IRQ_INIT(uncore, GEN11_DE_HPD_, ~de_hpd_masked,
                              de_hpd_enables);
 -              gen11_hpd_detection_setup(dev_priv);
 -      } else if (IS_GEN9_LP(dev_priv)) {
 -              bxt_hpd_detection_setup(dev_priv);
 -      } else if (IS_BROADWELL(dev_priv)) {
 -              ilk_hpd_detection_setup(dev_priv);
        }
  }
  
  static void gen8_irq_postinstall(struct drm_i915_private *dev_priv)
  {
        if (HAS_PCH_SPLIT(dev_priv))
 -              ibx_irq_pre_postinstall(dev_priv);
 +              ibx_irq_postinstall(dev_priv);
  
        gen8_gt_irq_postinstall(&dev_priv->gt);
        gen8_de_irq_postinstall(dev_priv);
  
 -      if (HAS_PCH_SPLIT(dev_priv))
 -              ibx_irq_postinstall(dev_priv);
 -
        gen8_master_intr_enable(dev_priv->uncore.regs);
  }
  
  static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
  {
 +      struct intel_uncore *uncore = &dev_priv->uncore;
        u32 mask = SDE_GMBUS_ICP;
  
 -      drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
 -      I915_WRITE(SDEIER, 0xffffffff);
 -      POSTING_READ(SDEIER);
 -
 -      gen3_assert_iir_is_zero(&dev_priv->uncore, SDEIIR);
 -      I915_WRITE(SDEIMR, ~mask);
 -
 -      if (HAS_PCH_TGP(dev_priv)) {
 -              icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
 -              icp_tc_hpd_detection_setup(dev_priv, TGP_TC_HPD_ENABLE_MASK);
 -      } else if (HAS_PCH_JSP(dev_priv)) {
 -              icp_ddi_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK);
 -      } else if (HAS_PCH_MCC(dev_priv)) {
 -              icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
 -              icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE(PORT_TC1));
 -      } else {
 -              icp_ddi_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK);
 -              icp_tc_hpd_detection_setup(dev_priv, ICP_TC_HPD_ENABLE_MASK);
 -      }
 +      GEN3_IRQ_INIT(uncore, SDE, ~mask, 0xffffffff);
  }
  
  static void gen11_irq_postinstall(struct drm_i915_private *dev_priv)
@@@ -3781,7 -3564,6 +3809,7 @@@ static void i8xx_irq_reset(struct drm_i
        i9xx_pipestat_irq_reset(dev_priv);
  
        GEN2_IRQ_RESET(uncore);
 +      dev_priv->irq_mask = ~0u;
  }
  
  static void i8xx_irq_postinstall(struct drm_i915_private *dev_priv)
@@@ -3934,6 -3716,8 +3962,8 @@@ static irqreturn_t i8xx_irq_handler(in
                i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
        } while (0);
  
+       pmu_irq_stats(dev_priv, ret);
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
        return ret;
@@@ -3951,7 -3735,6 +3981,7 @@@ static void i915_irq_reset(struct drm_i
        i9xx_pipestat_irq_reset(dev_priv);
  
        GEN3_IRQ_RESET(uncore, GEN2_);
 +      dev_priv->irq_mask = ~0u;
  }
  
  static void i915_irq_postinstall(struct drm_i915_private *dev_priv)
@@@ -4043,6 -3826,8 +4073,8 @@@ static irqreturn_t i915_irq_handler(in
                i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
        } while (0);
  
+       pmu_irq_stats(dev_priv, ret);
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
        return ret;
@@@ -4058,7 -3843,6 +4090,7 @@@ static void i965_irq_reset(struct drm_i
        i9xx_pipestat_irq_reset(dev_priv);
  
        GEN3_IRQ_RESET(uncore, GEN2_);
 +      dev_priv->irq_mask = ~0u;
  }
  
  static void i965_irq_postinstall(struct drm_i915_private *dev_priv)
@@@ -4189,6 -3973,8 +4221,8 @@@ static irqreturn_t i965_irq_handler(in
                i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
        } while (0);
  
+       pmu_irq_stats(dev_priv, IRQ_HANDLED);
        enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
  
        return ret;
@@@ -4206,6 -3992,10 +4240,6 @@@ void intel_irq_init(struct drm_i915_pri
        struct drm_device *dev = &dev_priv->drm;
        int i;
  
 -      intel_hpd_init_pins(dev_priv);
 -
 -      intel_hpd_init_work(dev_priv);
 -
        INIT_WORK(&dev_priv->l3_parity.error_work, ivb_parity_work);
        for (i = 0; i < MAX_L3_SLICES; ++i)
                dev_priv->l3_parity.remap_info[i] = NULL;
        if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11)
                dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16;
  
 +      if (!HAS_DISPLAY(dev_priv))
 +              return;
 +
 +      intel_hpd_init_pins(dev_priv);
 +
 +      intel_hpd_init_work(dev_priv);
 +
        dev->vblank_disable_immediate = true;
  
        /* Most platforms treat the display irq block as an always-on
                if (I915_HAS_HOTPLUG(dev_priv))
                        dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
        } else {
 -              if (HAS_PCH_JSP(dev_priv))
 -                      dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup;
 -              else if (HAS_PCH_MCC(dev_priv))
 -                      dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup;
 +              if (HAS_PCH_DG1(dev_priv))
 +                      dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
                else if (INTEL_GEN(dev_priv) >= 11)
                        dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
                else if (IS_GEN9_LP(dev_priv))
index cd786ad12be704f596f51f67ba84f61ab2ae50e8,3b88cb01b4da7ed5da2eae5249750e3f50dd0d14..d76685ce03998143a090a52e787f799bcfc19a5b
@@@ -4,7 -4,6 +4,6 @@@
   * Copyright Â© 2017-2018 Intel Corporation
   */
  
- #include <linux/irq.h>
  #include <linux/pm_runtime.h>
  
  #include "gt/intel_engine.h"
@@@ -30,7 -29,6 +29,7 @@@
  #define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS)
  
  static cpumask_t i915_pmu_cpumask;
 +static unsigned int i915_pmu_target_cpu = -1;
  
  static u8 engine_config_sample(u64 config)
  {
@@@ -424,30 -422,12 +423,14 @@@ static enum hrtimer_restart i915_sample
        return HRTIMER_RESTART;
  }
  
- static u64 count_interrupts(struct drm_i915_private *i915)
- {
-       /* open-coded kstat_irqs() */
-       struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
-       u64 sum = 0;
-       int cpu;
-       if (!desc || !desc->kstat_irqs)
-               return 0;
-       for_each_possible_cpu(cpu)
-               sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
-       return sum;
- }
  static void i915_pmu_event_destroy(struct perf_event *event)
  {
        struct drm_i915_private *i915 =
                container_of(event->pmu, typeof(*i915), pmu.base);
  
        drm_WARN_ON(&i915->drm, event->parent);
 +
 +      drm_dev_put(&i915->drm);
  }
  
  static int
@@@ -513,12 -493,8 +496,12 @@@ static int i915_pmu_event_init(struct p
  {
        struct drm_i915_private *i915 =
                container_of(event->pmu, typeof(*i915), pmu.base);
 +      struct i915_pmu *pmu = &i915->pmu;
        int ret;
  
 +      if (pmu->closed)
 +              return -ENODEV;
 +
        if (event->attr.type != event->pmu->type)
                return -ENOENT;
  
        if (ret)
                return ret;
  
 -      if (!event->parent)
 +      if (!event->parent) {
 +              drm_dev_get(&i915->drm);
                event->destroy = i915_pmu_event_destroy;
 +      }
  
        return 0;
  }
@@@ -590,7 -564,7 +573,7 @@@ static u64 __i915_pmu_event_read(struc
                                   USEC_PER_SEC /* to MHz */);
                        break;
                case I915_PMU_INTERRUPTS:
-                       val = count_interrupts(i915);
+                       val = READ_ONCE(pmu->irq_count);
                        break;
                case I915_PMU_RC6_RESIDENCY:
                        val = get_rc6(&i915->gt);
  
  static void i915_pmu_event_read(struct perf_event *event)
  {
 +      struct drm_i915_private *i915 =
 +              container_of(event->pmu, typeof(*i915), pmu.base);
        struct hw_perf_event *hwc = &event->hw;
 +      struct i915_pmu *pmu = &i915->pmu;
        u64 prev, new;
  
 +      if (pmu->closed) {
 +              event->hw.state = PERF_HES_STOPPED;
 +              return;
 +      }
  again:
        prev = local64_read(&hwc->prev_count);
        new = __i915_pmu_event_read(event);
@@@ -740,13 -707,6 +723,13 @@@ static void i915_pmu_disable(struct per
  
  static void i915_pmu_event_start(struct perf_event *event, int flags)
  {
 +      struct drm_i915_private *i915 =
 +              container_of(event->pmu, typeof(*i915), pmu.base);
 +      struct i915_pmu *pmu = &i915->pmu;
 +
 +      if (pmu->closed)
 +              return;
 +
        i915_pmu_enable(event);
        event->hw.state = 0;
  }
@@@ -761,13 -721,6 +744,13 @@@ static void i915_pmu_event_stop(struct 
  
  static int i915_pmu_event_add(struct perf_event *event, int flags)
  {
 +      struct drm_i915_private *i915 =
 +              container_of(event->pmu, typeof(*i915), pmu.base);
 +      struct i915_pmu *pmu = &i915->pmu;
 +
 +      if (pmu->closed)
 +              return -ENODEV;
 +
        if (flags & PERF_EF_START)
                i915_pmu_event_start(event, flags);
  
@@@ -1050,39 -1003,25 +1033,39 @@@ static int i915_pmu_cpu_online(unsigne
  static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
  {
        struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
 -      unsigned int target;
 +      unsigned int target = i915_pmu_target_cpu;
  
        GEM_BUG_ON(!pmu->base.event_init);
  
 +      /*
 +       * Unregistering an instance generates a CPU offline event which we must
 +       * ignore to avoid incorrectly modifying the shared i915_pmu_cpumask.
 +       */
 +      if (pmu->closed)
 +              return 0;
 +
        if (cpumask_test_and_clear_cpu(cpu, &i915_pmu_cpumask)) {
                target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu);
 +
                /* Migrate events if there is a valid target */
                if (target < nr_cpu_ids) {
                        cpumask_set_cpu(target, &i915_pmu_cpumask);
 -                      perf_pmu_migrate_context(&pmu->base, cpu, target);
 +                      i915_pmu_target_cpu = target;
                }
        }
  
 +      if (target < nr_cpu_ids && target != pmu->cpuhp.cpu) {
 +              perf_pmu_migrate_context(&pmu->base, cpu, target);
 +              pmu->cpuhp.cpu = target;
 +      }
 +
        return 0;
  }
  
 -static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
 +static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
 +
 +void i915_pmu_init(void)
  {
 -      enum cpuhp_state slot;
        int ret;
  
        ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
                                      i915_pmu_cpu_online,
                                      i915_pmu_cpu_offline);
        if (ret < 0)
 -              return ret;
 +              pr_notice("Failed to setup cpuhp state for i915 PMU! (%d)\n",
 +                        ret);
 +      else
 +              cpuhp_slot = ret;
 +}
  
 -      slot = ret;
 -      ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
 -      if (ret) {
 -              cpuhp_remove_multi_state(slot);
 -              return ret;
 -      }
 +void i915_pmu_exit(void)
 +{
 +      if (cpuhp_slot != CPUHP_INVALID)
 +              cpuhp_remove_multi_state(cpuhp_slot);
 +}
  
 -      pmu->cpuhp.slot = slot;
 -      return 0;
 +static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
 +{
 +      if (cpuhp_slot == CPUHP_INVALID)
 +              return -EINVAL;
 +
 +      return cpuhp_state_add_instance(cpuhp_slot, &pmu->cpuhp.node);
  }
  
  static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
  {
 -      struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
 -
 -      drm_WARN_ON(&i915->drm, pmu->cpuhp.slot == CPUHP_INVALID);
 -      drm_WARN_ON(&i915->drm, cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
 -      cpuhp_remove_multi_state(pmu->cpuhp.slot);
 -      pmu->cpuhp.slot = CPUHP_INVALID;
 +      cpuhp_state_remove_instance(cpuhp_slot, &pmu->cpuhp.node);
  }
  
  static bool is_igp(struct drm_i915_private *i915)
@@@ -1146,7 -1083,7 +1129,7 @@@ void i915_pmu_register(struct drm_i915_
        spin_lock_init(&pmu->lock);
        hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        pmu->timer.function = i915_sample;
 -      pmu->cpuhp.slot = CPUHP_INVALID;
 +      pmu->cpuhp.cpu = -1;
  
        if (!is_igp(i915)) {
                pmu->name = kasprintf(GFP_KERNEL,
@@@ -1213,13 -1150,7 +1196,13 @@@ void i915_pmu_unregister(struct drm_i91
        if (!pmu->base.event_init)
                return;
  
 -      drm_WARN_ON(&i915->drm, pmu->enable);
 +      /*
 +       * "Disconnect" the PMU callbacks - since all are atomic synchronize_rcu
 +       * ensures all currently executing ones will have exited before we
 +       * proceed with unregistration.
 +       */
 +      pmu->closed = true;
 +      synchronize_rcu();
  
        hrtimer_cancel(&pmu->timer);
  
index a24885ab415c288267a45562c7fa11be4ba6bacd,9e49c649078069d56ea8176419900ca73276beab..8405d6da5b9a65ef0a9a78a7044b9bb098c18b93
@@@ -43,16 -43,12 +43,16 @@@ struct i915_pmu 
         */
        struct {
                struct hlist_node node;
 -              enum cpuhp_state slot;
 +              unsigned int cpu;
        } cpuhp;
        /**
         * @base: PMU base.
         */
        struct pmu base;
 +      /**
 +       * @closed: i915 is unregistering.
 +       */
 +      bool closed;
        /**
         * @name: Name as registered with perf core.
         */
         * @sleep_last: Last time GT parked for RC6 estimation.
         */
        ktime_t sleep_last;
+       /**
+        * @irq_count: Number of interrupts
+        *
+        * Intentionally unsigned long to avoid atomics or heuristics on 32bit.
+        * 4e9 interrupts are a lot and postprocessing can really deal with an
+        * occasional wraparound easily. It's 32bit after all.
+        */
+       unsigned long irq_count;
        /**
         * @events_attr_group: Device events attribute group.
         */
  };
  
  #ifdef CONFIG_PERF_EVENTS
 +void i915_pmu_init(void);
 +void i915_pmu_exit(void);
  void i915_pmu_register(struct drm_i915_private *i915);
  void i915_pmu_unregister(struct drm_i915_private *i915);
  void i915_pmu_gt_parked(struct drm_i915_private *i915);
  void i915_pmu_gt_unparked(struct drm_i915_private *i915);
  #else
 +static inline void i915_pmu_init(void) {}
 +static inline void i915_pmu_exit(void) {}
  static inline void i915_pmu_register(struct drm_i915_private *i915) {}
  static inline void i915_pmu_unregister(struct drm_i915_private *i915) {}
  static inline void i915_pmu_gt_parked(struct drm_i915_private *i915) {}
index 7954c1daf2b64315d66a8e18e96eb1a0014a9396,399459adee7dc908fc387e255b921551506d0d17..c1c9118a66c9326982d33bcf2245feed73a3e0c7
@@@ -283,7 -283,7 +283,7 @@@ int mlx4_en_create_rx_ring(struct mlx4_
        ring->log_stride = ffs(ring->stride) - 1;
        ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
  
 -      if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
 +      if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index, 0) < 0)
                goto err_ring;
  
        tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
@@@ -684,7 -684,7 +684,7 @@@ int mlx4_en_process_rx_cq(struct net_de
        xdp_prog = rcu_dereference(ring->xdp_prog);
        xdp.rxq = &ring->xdp_rxq;
        xdp.frame_sz = priv->frag_info[0].frag_stride;
 -      doorbell_pending = 0;
 +      doorbell_pending = false;
  
        /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
         * descriptor offset can be deduced from the CQE index instead of
@@@ -914,6 -914,7 +914,6 @@@ next
                wmb(); /* ensure HW sees CQ consumer before we post new buffers */
                ring->cons = cq->mcq.cons_index;
        }
 -      AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
  
        mlx4_en_refill_rx_buffers(priv, ring);
  
@@@ -958,18 -959,16 +958,14 @@@ int mlx4_en_poll_rx_cq(struct napi_stru
  
        /* If we used up all the quota - we're probably not done yet... */
        if (done == budget || !clean_complete) {
-               const struct cpumask *aff;
-               struct irq_data *idata;
                int cpu_curr;
  
                /* in case we got here because of !clean_complete */
                done = budget;
  
 -              INC_PERF_COUNTER(priv->pstats.napi_quota);
 -
                cpu_curr = smp_processor_id();
-               idata = irq_desc_get_irq_data(cq->irq_desc);
-               aff = irq_data_get_affinity_mask(idata);
  
-               if (likely(cpumask_test_cpu(cpu_curr, aff)))
+               if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
                        return budget;
  
                /* Current cpu is not according to smp_irq_affinity -
index 17f2b19193789bdb8562258bdbccc8efd3220519,48d71e0c71e4a1f7dd6088f153f7d9fa7655ad7b..e8ed23190de01b1b4c7e961387bb53a22d783c23
@@@ -36,7 -36,6 +36,7 @@@
  
  #include <linux/bitops.h>
  #include <linux/compiler.h>
 +#include <linux/ethtool.h>
  #include <linux/list.h>
  #include <linux/mutex.h>
  #include <linux/netdevice.h>
@@@ -47,6 -46,7 +47,7 @@@
  #endif
  #include <linux/cpu_rmap.h>
  #include <linux/ptp_clock_kernel.h>
+ #include <linux/irq.h>
  #include <net/xdp.h>
  
  #include <linux/mlx4/device.h>
  #define MLX4_EN_LOOPBACK_RETRIES      5
  #define MLX4_EN_LOOPBACK_TIMEOUT      100
  
 -#ifdef MLX4_EN_PERF_STAT
 -/* Number of samples to 'average' */
 -#define AVG_SIZE                      128
 -#define AVG_FACTOR                    1024
 -
 -#define INC_PERF_COUNTER(cnt)         (++(cnt))
 -#define ADD_PERF_COUNTER(cnt, add)    ((cnt) += (add))
 -#define AVG_PERF_COUNTER(cnt, sample) \
 -      ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
 -#define GET_PERF_COUNTER(cnt)         (cnt)
 -#define GET_AVG_PERF_COUNTER(cnt)     ((cnt) / AVG_FACTOR)
 -
 -#else
 -
 -#define INC_PERF_COUNTER(cnt)         do {} while (0)
 -#define ADD_PERF_COUNTER(cnt, add)    do {} while (0)
 -#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
 -#define GET_PERF_COUNTER(cnt)         (0)
 -#define GET_AVG_PERF_COUNTER(cnt)     (0)
 -#endif /* MLX4_EN_PERF_STAT */
 -
  /* Constants for TX flow */
  enum {
        MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
@@@ -251,10 -272,6 +252,10 @@@ struct mlx4_en_page_cache 
        } buf[MLX4_EN_CACHE_SIZE];
  };
  
 +enum {
 +      MLX4_EN_TX_RING_STATE_RECOVERING,
 +};
 +
  struct mlx4_en_priv;
  
  struct mlx4_en_tx_ring {
         * Only queue_stopped might be used if BQL is not properly working.
         */
        unsigned long           queue_stopped;
 +      unsigned long           state;
        struct mlx4_hwq_resources sp_wqres;
        struct mlx4_qp          sp_qp;
        struct mlx4_qp_context  sp_context;
@@@ -365,7 -381,7 +366,7 @@@ struct mlx4_en_cq 
        struct mlx4_cqe *buf;
  #define MLX4_EN_OPCODE_ERROR  0x1e
  
-       struct irq_desc *irq_desc;
+       const struct cpumask *aff_mask;
  };
  
  struct mlx4_en_port_profile {
@@@ -515,10 -531,6 +516,10 @@@ struct mlx4_en_stats_bitmap 
        struct mutex mutex; /* for mutual access to stats bitmap */
  };
  
 +enum {
 +      MLX4_EN_STATE_FLAG_RESTARTING,
 +};
 +
  struct mlx4_en_priv {
        struct mlx4_en_dev *mdev;
        struct mlx4_en_port_profile *prof;
        struct mlx4_en_cq *rx_cq[MAX_RX_RINGS];
        struct mlx4_qp drop_qp;
        struct work_struct rx_mode_task;
 -      struct work_struct watchdog_task;
 +      struct work_struct restart_task;
        struct work_struct linkstate_task;
        struct delayed_work stats_task;
        struct delayed_work service_task;
 -      struct mlx4_en_perf_stats pstats;
        struct mlx4_en_pkt_stats pkstats;
        struct mlx4_en_counter_stats pf_stats;
        struct mlx4_en_flow_stats_rx rx_priority_flowstats[MLX4_NUM_PRIORITIES];
        u32 pflags;
        u8 rss_key[MLX4_EN_RSS_KEY_SIZE];
        u8 rss_hash_fn;
 +      unsigned long state;
  };
  
  enum mlx4_en_wol {
index a1a81cfeb607db56ee342a407380acb9579ca4a4,45fd585d101b528f8c634727c4b1221f082858ec..055baf3b6cb1050779684e546a0a9b2721adfc4e
@@@ -227,7 -227,6 +227,7 @@@ enum mlx5e_priv_flag 
        MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
        MLX5E_PFLAG_XDP_TX_MPWQE,
        MLX5E_PFLAG_SKB_TX_MPWQE,
 +      MLX5E_PFLAG_TX_PORT_TS,
        MLX5E_NUM_PFLAGS, /* Keep last */
  };
  
@@@ -283,12 -282,10 +283,12 @@@ struct mlx5e_cq 
        u16                        event_ctr;
        struct napi_struct        *napi;
        struct mlx5_core_cq        mcq;
 -      struct mlx5e_channel      *channel;
 +      struct mlx5e_ch_stats     *ch_stats;
  
        /* control */
 +      struct net_device         *netdev;
        struct mlx5_core_dev      *mdev;
 +      struct mlx5e_priv         *priv;
        struct mlx5_wq_ctrl        wq_ctrl;
  } ____cacheline_aligned_in_smp;
  
@@@ -332,15 -329,6 +332,15 @@@ struct mlx5e_tx_mpwqe 
        u8 inline_on;
  };
  
 +struct mlx5e_skb_fifo {
 +      struct sk_buff **fifo;
 +      u16 *pc;
 +      u16 *cc;
 +      u16 mask;
 +};
 +
 +struct mlx5e_ptpsq;
 +
  struct mlx5e_txqsq {
        /* data path */
  
        /* read only */
        struct mlx5_wq_cyc         wq;
        u32                        dma_fifo_mask;
 -      u16                        skb_fifo_mask;
        struct mlx5e_sq_stats     *stats;
        struct {
                struct mlx5e_sq_dma       *dma_fifo;
 -              struct sk_buff           **skb_fifo;
 +              struct mlx5e_skb_fifo      skb_fifo;
                struct mlx5e_tx_wqe_info  *wqe_info;
        } db;
        void __iomem              *uar_map;
        unsigned int               hw_mtu;
        struct hwtstamp_config    *tstamp;
        struct mlx5_clock         *clock;
 +      struct net_device         *netdev;
 +      struct mlx5_core_dev      *mdev;
 +      struct mlx5e_priv         *priv;
  
        /* control path */
        struct mlx5_wq_ctrl        wq_ctrl;
 -      struct mlx5e_channel      *channel;
        int                        ch_ix;
        int                        txq_ix;
        u32                        rate_limit;
        struct work_struct         recover_work;
 +      struct mlx5e_ptpsq        *ptpsq;
  } ____cacheline_aligned_in_smp;
  
  struct mlx5e_dma_info {
@@@ -607,6 -593,7 +607,6 @@@ struct mlx5e_rq 
                u8             map_dir;   /* dma map direction */
        } buff;
  
 -      struct mlx5e_channel  *channel;
        struct device         *pdev;
        struct net_device     *netdev;
        struct mlx5e_rq_stats *stats;
        struct mlx5e_page_cache page_cache;
        struct hwtstamp_config *tstamp;
        struct mlx5_clock      *clock;
 +      struct mlx5e_icosq    *icosq;
 +      struct mlx5e_priv     *priv;
  
        mlx5e_fp_handle_rx_cqe handle_rx_cqe;
        mlx5e_fp_post_rx_wqes  post_wqes;
@@@ -684,7 -669,7 +684,7 @@@ struct mlx5e_channel 
        spinlock_t                 async_icosq_lock;
  
        /* data path - accessed per napi poll */
-       struct irq_desc *irq_desc;
+       const struct cpumask      *aff_mask;
        struct mlx5e_ch_stats     *stats;
  
        /* control */
        int                        cpu;
  };
  
 +struct mlx5e_port_ptp;
 +
  struct mlx5e_channels {
        struct mlx5e_channel **c;
 +      struct mlx5e_port_ptp  *port_ptp;
        unsigned int           num;
        struct mlx5e_params    params;
  };
@@@ -715,12 -697,6 +715,12 @@@ struct mlx5e_channel_stats 
        struct mlx5e_xdpsq_stats xsksq;
  } ____cacheline_aligned_in_smp;
  
 +struct mlx5e_port_ptp_stats {
 +      struct mlx5e_ch_stats ch;
 +      struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC];
 +      struct mlx5e_ptp_cq_stats cq[MLX5E_MAX_NUM_TC];
 +} ____cacheline_aligned_in_smp;
 +
  enum {
        MLX5E_STATE_OPENED,
        MLX5E_STATE_DESTROYING,
@@@ -790,10 -766,8 +790,10 @@@ struct mlx5e_scratchpad 
  
  struct mlx5e_priv {
        /* priv data path fields - start */
 -      struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
 +      /* +1 for port ptp ts */
 +      struct mlx5e_txqsq *txq2sq[(MLX5E_MAX_NUM_CHANNELS + 1) * MLX5E_MAX_NUM_TC];
        int channel_tc2realtxq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
 +      int port_ptp_tc2realtxq[MLX5E_MAX_NUM_TC];
  #ifdef CONFIG_MLX5_CORE_EN_DCB
        struct mlx5e_dcbx_dp       dcbx_dp;
  #endif
        struct net_device         *netdev;
        struct mlx5e_stats         stats;
        struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
 +      struct mlx5e_port_ptp_stats port_ptp_stats;
        u16                        max_nch;
        u8                         max_opened_tc;
 +      bool                       port_ptp_opened;
        struct hwtstamp_config     tstamp;
        u16                        q_counter;
        u16                        drop_rq_q_counter;
        struct notifier_block      events_nb;
 +      int                        num_tc_x_num_ch;
  
        struct udp_tunnel_nic_info nic_info;
  #ifdef CONFIG_MLX5_CORE_EN_DCB
@@@ -952,17 -923,9 +952,17 @@@ int mlx5e_open_xdpsq(struct mlx5e_chann
                     struct mlx5e_xdpsq *sq, bool is_redirect);
  void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq);
  
 +struct mlx5e_create_cq_param {
 +      struct napi_struct *napi;
 +      struct mlx5e_ch_stats *ch_stats;
 +      int node;
 +      int ix;
 +};
 +
  struct mlx5e_cq_param;
 -int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
 -                struct mlx5e_cq_param *param, struct mlx5e_cq *cq);
 +int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
 +                struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
 +                struct mlx5e_cq *cq);
  void mlx5e_close_cq(struct mlx5e_cq *cq);
  
  int mlx5e_open_locked(struct net_device *netdev);
@@@ -1011,17 -974,7 +1011,17 @@@ void mlx5e_deactivate_icosq(struct mlx5
  int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
                    struct mlx5e_modify_sq_param *p);
  void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
 +void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
 +void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
  void mlx5e_tx_disable_queue(struct netdev_queue *txq);
 +int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa);
 +void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq);
 +struct mlx5e_create_sq_param;
 +int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
 +                      struct mlx5e_sq_param *param,
 +                      struct mlx5e_create_sq_param *csp,
 +                      u32 *sqn);
 +void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
  
  static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
  {
index 351118985a577bffe42ece4861fb44e3036a2336,0000000000000000000000000000000000000000..2a2bac30daaa712ecccec93be7aad2ee5ec4306f
mode 100644,000000..100644
--- /dev/null
@@@ -1,529 -1,0 +1,528 @@@
-       c->irq_desc = irq_to_desc(irq);
 +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
 +// Copyright (c) 2020 Mellanox Technologies
 +
 +#include "en/ptp.h"
 +#include "en/txrx.h"
 +#include "lib/clock.h"
 +
 +struct mlx5e_skb_cb_hwtstamp {
 +      ktime_t cqe_hwtstamp;
 +      ktime_t port_hwtstamp;
 +};
 +
 +void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb)
 +{
 +      memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
 +}
 +
 +static struct mlx5e_skb_cb_hwtstamp *mlx5e_skb_cb_get_hwts(struct sk_buff *skb)
 +{
 +      BUILD_BUG_ON(sizeof(struct mlx5e_skb_cb_hwtstamp) > sizeof(skb->cb));
 +      return (struct mlx5e_skb_cb_hwtstamp *)skb->cb;
 +}
 +
 +static void mlx5e_skb_cb_hwtstamp_tx(struct sk_buff *skb,
 +                                   struct mlx5e_ptp_cq_stats *cq_stats)
 +{
 +      struct skb_shared_hwtstamps hwts = {};
 +      ktime_t diff;
 +
 +      diff = abs(mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp -
 +                 mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp);
 +
 +      /* Maximal allowed diff is 1 / 128 second */
 +      if (diff > (NSEC_PER_SEC >> 7)) {
 +              cq_stats->abort++;
 +              cq_stats->abort_abs_diff_ns += diff;
 +              return;
 +      }
 +
 +      hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp;
 +      skb_tstamp_tx(skb, &hwts);
 +}
 +
 +void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
 +                                 ktime_t hwtstamp,
 +                                 struct mlx5e_ptp_cq_stats *cq_stats)
 +{
 +      switch (hwtstamp_type) {
 +      case (MLX5E_SKB_CB_CQE_HWTSTAMP):
 +              mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp = hwtstamp;
 +              break;
 +      case (MLX5E_SKB_CB_PORT_HWTSTAMP):
 +              mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp = hwtstamp;
 +              break;
 +      }
 +
 +      /* If both CQEs arrive, check and report the port tstamp, and clear skb cb as
 +       * skb soon to be released.
 +       */
 +      if (!mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp ||
 +          !mlx5e_skb_cb_get_hwts(skb)->port_hwtstamp)
 +              return;
 +
 +      mlx5e_skb_cb_hwtstamp_tx(skb, cq_stats);
 +      memset(skb->cb, 0, sizeof(struct mlx5e_skb_cb_hwtstamp));
 +}
 +
 +static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
 +                                  struct mlx5_cqe64 *cqe,
 +                                  int budget)
 +{
 +      struct sk_buff *skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
 +      ktime_t hwtstamp;
 +
 +      if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
 +              ptpsq->cq_stats->err_cqe++;
 +              goto out;
 +      }
 +
 +      hwtstamp = mlx5_timecounter_cyc2time(ptpsq->txqsq.clock, get_cqe_ts(cqe));
 +      mlx5e_skb_cb_hwtstamp_handler(skb, MLX5E_SKB_CB_PORT_HWTSTAMP,
 +                                    hwtstamp, ptpsq->cq_stats);
 +      ptpsq->cq_stats->cqe++;
 +
 +out:
 +      napi_consume_skb(skb, budget);
 +}
 +
 +static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
 +{
 +      struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
 +      struct mlx5_cqwq *cqwq = &cq->wq;
 +      struct mlx5_cqe64 *cqe;
 +      int work_done = 0;
 +
 +      if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
 +              return false;
 +
 +      cqe = mlx5_cqwq_get_cqe(cqwq);
 +      if (!cqe)
 +              return false;
 +
 +      do {
 +              mlx5_cqwq_pop(cqwq);
 +
 +              mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
 +      } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
 +
 +      mlx5_cqwq_update_db_record(cqwq);
 +
 +      /* ensure cq space is freed before enabling more cqes */
 +      wmb();
 +
 +      return work_done == budget;
 +}
 +
 +static int mlx5e_ptp_napi_poll(struct napi_struct *napi, int budget)
 +{
 +      struct mlx5e_port_ptp *c = container_of(napi, struct mlx5e_port_ptp,
 +                                              napi);
 +      struct mlx5e_ch_stats *ch_stats = c->stats;
 +      bool busy = false;
 +      int work_done = 0;
 +      int i;
 +
 +      rcu_read_lock();
 +
 +      ch_stats->poll++;
 +
 +      for (i = 0; i < c->num_tc; i++) {
 +              busy |= mlx5e_poll_tx_cq(&c->ptpsq[i].txqsq.cq, budget);
 +              busy |= mlx5e_ptp_poll_ts_cq(&c->ptpsq[i].ts_cq, budget);
 +      }
 +
 +      if (busy) {
 +              work_done = budget;
 +              goto out;
 +      }
 +
 +      if (unlikely(!napi_complete_done(napi, work_done)))
 +              goto out;
 +
 +      ch_stats->arm++;
 +
 +      for (i = 0; i < c->num_tc; i++) {
 +              mlx5e_cq_arm(&c->ptpsq[i].txqsq.cq);
 +              mlx5e_cq_arm(&c->ptpsq[i].ts_cq);
 +      }
 +
 +out:
 +      rcu_read_unlock();
 +
 +      return work_done;
 +}
 +
 +static int mlx5e_ptp_alloc_txqsq(struct mlx5e_port_ptp *c, int txq_ix,
 +                               struct mlx5e_params *params,
 +                               struct mlx5e_sq_param *param,
 +                               struct mlx5e_txqsq *sq, int tc,
 +                               struct mlx5e_ptpsq *ptpsq)
 +{
 +      void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
 +      struct mlx5_core_dev *mdev = c->mdev;
 +      struct mlx5_wq_cyc *wq = &sq->wq;
 +      int err;
 +      int node;
 +
 +      sq->pdev      = c->pdev;
 +      sq->tstamp    = c->tstamp;
 +      sq->clock     = &mdev->clock;
 +      sq->mkey_be   = c->mkey_be;
 +      sq->netdev    = c->netdev;
 +      sq->priv      = c->priv;
 +      sq->mdev      = mdev;
 +      sq->ch_ix     = c->ix;
 +      sq->txq_ix    = txq_ix;
 +      sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 +      sq->min_inline_mode = params->tx_min_inline_mode;
 +      sq->hw_mtu    = MLX5E_SW2HW_MTU(params, params->sw_mtu);
 +      sq->stats     = &c->priv->port_ptp_stats.sq[tc];
 +      sq->ptpsq     = ptpsq;
 +      INIT_WORK(&sq->recover_work, mlx5e_tx_err_cqe_work);
 +      if (!MLX5_CAP_ETH(mdev, wqe_vlan_insert))
 +              set_bit(MLX5E_SQ_STATE_VLAN_NEED_L2_INLINE, &sq->state);
 +      sq->stop_room = param->stop_room;
 +
 +      node = dev_to_node(mlx5_core_dma_dev(mdev));
 +
 +      param->wq.db_numa_node = node;
 +      err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
 +      if (err)
 +              return err;
 +      wq->db    = &wq->db[MLX5_SND_DBR];
 +
 +      err = mlx5e_alloc_txqsq_db(sq, node);
 +      if (err)
 +              goto err_sq_wq_destroy;
 +
 +      return 0;
 +
 +err_sq_wq_destroy:
 +      mlx5_wq_destroy(&sq->wq_ctrl);
 +
 +      return err;
 +}
 +
 +static void mlx5e_ptp_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
 +{
 +      mlx5_core_destroy_sq(mdev, sqn);
 +}
 +
 +static int mlx5e_ptp_alloc_traffic_db(struct mlx5e_ptpsq *ptpsq, int numa)
 +{
 +      int wq_sz = mlx5_wq_cyc_get_size(&ptpsq->txqsq.wq);
 +
 +      ptpsq->skb_fifo.fifo = kvzalloc_node(array_size(wq_sz, sizeof(*ptpsq->skb_fifo.fifo)),
 +                                           GFP_KERNEL, numa);
 +      if (!ptpsq->skb_fifo.fifo)
 +              return -ENOMEM;
 +
 +      ptpsq->skb_fifo.pc   = &ptpsq->skb_fifo_pc;
 +      ptpsq->skb_fifo.cc   = &ptpsq->skb_fifo_cc;
 +      ptpsq->skb_fifo.mask = wq_sz - 1;
 +
 +      return 0;
 +}
 +
 +static void mlx5e_ptp_drain_skb_fifo(struct mlx5e_skb_fifo *skb_fifo)
 +{
 +      while (*skb_fifo->pc != *skb_fifo->cc) {
 +              struct sk_buff *skb = mlx5e_skb_fifo_pop(skb_fifo);
 +
 +              dev_kfree_skb_any(skb);
 +      }
 +}
 +
 +static void mlx5e_ptp_free_traffic_db(struct mlx5e_skb_fifo *skb_fifo)
 +{
 +      mlx5e_ptp_drain_skb_fifo(skb_fifo);
 +      kvfree(skb_fifo->fifo);
 +}
 +
 +static int mlx5e_ptp_open_txqsq(struct mlx5e_port_ptp *c, u32 tisn,
 +                              int txq_ix, struct mlx5e_ptp_params *cparams,
 +                              int tc, struct mlx5e_ptpsq *ptpsq)
 +{
 +      struct mlx5e_sq_param *sqp = &cparams->txq_sq_param;
 +      struct mlx5e_txqsq *txqsq = &ptpsq->txqsq;
 +      struct mlx5e_create_sq_param csp = {};
 +      int err;
 +
 +      err = mlx5e_ptp_alloc_txqsq(c, txq_ix, &cparams->params, sqp,
 +                                  txqsq, tc, ptpsq);
 +      if (err)
 +              return err;
 +
 +      csp.tisn            = tisn;
 +      csp.tis_lst_sz      = 1;
 +      csp.cqn             = txqsq->cq.mcq.cqn;
 +      csp.wq_ctrl         = &txqsq->wq_ctrl;
 +      csp.min_inline_mode = txqsq->min_inline_mode;
 +      csp.ts_cqe_to_dest_cqn = ptpsq->ts_cq.mcq.cqn;
 +
 +      err = mlx5e_create_sq_rdy(c->mdev, sqp, &csp, &txqsq->sqn);
 +      if (err)
 +              goto err_free_txqsq;
 +
 +      err = mlx5e_ptp_alloc_traffic_db(ptpsq,
 +                                       dev_to_node(mlx5_core_dma_dev(c->mdev)));
 +      if (err)
 +              goto err_free_txqsq;
 +
 +      return 0;
 +
 +err_free_txqsq:
 +      mlx5e_free_txqsq(txqsq);
 +
 +      return err;
 +}
 +
 +static void mlx5e_ptp_close_txqsq(struct mlx5e_ptpsq *ptpsq)
 +{
 +      struct mlx5e_txqsq *sq = &ptpsq->txqsq;
 +      struct mlx5_core_dev *mdev = sq->mdev;
 +
 +      mlx5e_ptp_free_traffic_db(&ptpsq->skb_fifo);
 +      cancel_work_sync(&sq->recover_work);
 +      mlx5e_ptp_destroy_sq(mdev, sq->sqn);
 +      mlx5e_free_txqsq_descs(sq);
 +      mlx5e_free_txqsq(sq);
 +}
 +
 +static int mlx5e_ptp_open_txqsqs(struct mlx5e_port_ptp *c,
 +                               struct mlx5e_ptp_params *cparams)
 +{
 +      struct mlx5e_params *params = &cparams->params;
 +      int ix_base;
 +      int err;
 +      int tc;
 +
 +      ix_base = params->num_tc * params->num_channels;
 +
 +      for (tc = 0; tc < params->num_tc; tc++) {
 +              int txq_ix = ix_base + tc;
 +
 +              err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
 +                                         cparams, tc, &c->ptpsq[tc]);
 +              if (err)
 +                      goto close_txqsq;
 +      }
 +
 +      return 0;
 +
 +close_txqsq:
 +      for (--tc; tc >= 0; tc--)
 +              mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
 +
 +      return err;
 +}
 +
 +static void mlx5e_ptp_close_txqsqs(struct mlx5e_port_ptp *c)
 +{
 +      int tc;
 +
 +      for (tc = 0; tc < c->num_tc; tc++)
 +              mlx5e_ptp_close_txqsq(&c->ptpsq[tc]);
 +}
 +
 +static int mlx5e_ptp_open_cqs(struct mlx5e_port_ptp *c,
 +                            struct mlx5e_ptp_params *cparams)
 +{
 +      struct mlx5e_params *params = &cparams->params;
 +      struct mlx5e_create_cq_param ccp = {};
 +      struct dim_cq_moder ptp_moder = {};
 +      struct mlx5e_cq_param *cq_param;
 +      int err;
 +      int tc;
 +
 +      ccp.node     = dev_to_node(mlx5_core_dma_dev(c->mdev));
 +      ccp.ch_stats = c->stats;
 +      ccp.napi     = &c->napi;
 +      ccp.ix       = c->ix;
 +
 +      cq_param = &cparams->txq_sq_param.cqp;
 +
 +      for (tc = 0; tc < params->num_tc; tc++) {
 +              struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq;
 +
 +              err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
 +              if (err)
 +                      goto out_err_txqsq_cq;
 +      }
 +
 +      for (tc = 0; tc < params->num_tc; tc++) {
 +              struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq;
 +              struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc];
 +
 +              err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq);
 +              if (err)
 +                      goto out_err_ts_cq;
 +
 +              ptpsq->cq_stats = &c->priv->port_ptp_stats.cq[tc];
 +      }
 +
 +      return 0;
 +
 +out_err_ts_cq:
 +      for (--tc; tc >= 0; tc--)
 +              mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
 +      tc = params->num_tc;
 +out_err_txqsq_cq:
 +      for (--tc; tc >= 0; tc--)
 +              mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
 +
 +      return err;
 +}
 +
 +static void mlx5e_ptp_close_cqs(struct mlx5e_port_ptp *c)
 +{
 +      int tc;
 +
 +      for (tc = 0; tc < c->num_tc; tc++)
 +              mlx5e_close_cq(&c->ptpsq[tc].ts_cq);
 +
 +      for (tc = 0; tc < c->num_tc; tc++)
 +              mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq);
 +}
 +
 +static void mlx5e_ptp_build_sq_param(struct mlx5e_priv *priv,
 +                                   struct mlx5e_params *params,
 +                                   struct mlx5e_sq_param *param)
 +{
 +      void *sqc = param->sqc;
 +      void *wq;
 +
 +      mlx5e_build_sq_param_common(priv, param);
 +
 +      wq = MLX5_ADDR_OF(sqc, sqc, wq);
 +      MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
 +      param->stop_room = mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
 +      mlx5e_build_tx_cq_param(priv, params, &param->cqp);
 +}
 +
 +static void mlx5e_ptp_build_params(struct mlx5e_port_ptp *c,
 +                                 struct mlx5e_ptp_params *cparams,
 +                                 struct mlx5e_params *orig)
 +{
 +      struct mlx5e_params *params = &cparams->params;
 +
 +      params->tx_min_inline_mode = orig->tx_min_inline_mode;
 +      params->num_channels = orig->num_channels;
 +      params->hard_mtu = orig->hard_mtu;
 +      params->sw_mtu = orig->sw_mtu;
 +      params->num_tc = orig->num_tc;
 +
 +      /* SQ */
 +      params->log_sq_size = orig->log_sq_size;
 +
 +      mlx5e_ptp_build_sq_param(c->priv, params, &cparams->txq_sq_param);
 +}
 +
 +static int mlx5e_ptp_open_queues(struct mlx5e_port_ptp *c,
 +                               struct mlx5e_ptp_params *cparams)
 +{
 +      int err;
 +
 +      err = mlx5e_ptp_open_cqs(c, cparams);
 +      if (err)
 +              return err;
 +
 +      napi_enable(&c->napi);
 +
 +      err = mlx5e_ptp_open_txqsqs(c, cparams);
 +      if (err)
 +              goto disable_napi;
 +
 +      return 0;
 +
 +disable_napi:
 +      napi_disable(&c->napi);
 +      mlx5e_ptp_close_cqs(c);
 +
 +      return err;
 +}
 +
 +static void mlx5e_ptp_close_queues(struct mlx5e_port_ptp *c)
 +{
 +      mlx5e_ptp_close_txqsqs(c);
 +      napi_disable(&c->napi);
 +      mlx5e_ptp_close_cqs(c);
 +}
 +
 +int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
 +                      u8 lag_port, struct mlx5e_port_ptp **cp)
 +{
 +      struct net_device *netdev = priv->netdev;
 +      struct mlx5_core_dev *mdev = priv->mdev;
 +      struct mlx5e_ptp_params *cparams;
 +      struct mlx5e_port_ptp *c;
 +      unsigned int irq;
 +      int err;
 +      int eqn;
 +
 +      err = mlx5_vector2eqn(priv->mdev, 0, &eqn, &irq);
 +      if (err)
 +              return err;
 +
 +      c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
 +      cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
 +      if (!c || !cparams)
 +              return -ENOMEM;
 +
 +      c->priv     = priv;
 +      c->mdev     = priv->mdev;
 +      c->tstamp   = &priv->tstamp;
 +      c->ix       = 0;
 +      c->pdev     = mlx5_core_dma_dev(priv->mdev);
 +      c->netdev   = priv->netdev;
 +      c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
 +      c->num_tc   = params->num_tc;
 +      c->stats    = &priv->port_ptp_stats.ch;
 +      c->lag_port = lag_port;
 +
 +      netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
 +
 +      mlx5e_ptp_build_params(c, cparams, params);
 +
 +      err = mlx5e_ptp_open_queues(c, cparams);
 +      if (unlikely(err))
 +              goto err_napi_del;
 +
 +      *cp = c;
 +
 +      kvfree(cparams);
 +
 +      return 0;
 +
 +err_napi_del:
 +      netif_napi_del(&c->napi);
 +
 +      kvfree(cparams);
 +      kvfree(c);
 +      return err;
 +}
 +
 +void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c)
 +{
 +      mlx5e_ptp_close_queues(c);
 +      netif_napi_del(&c->napi);
 +
 +      kvfree(c);
 +}
 +
 +void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c)
 +{
 +      int tc;
 +
 +      for (tc = 0; tc < c->num_tc; tc++)
 +              mlx5e_activate_txqsq(&c->ptpsq[tc].txqsq);
 +}
 +
 +void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c)
 +{
 +      int tc;
 +
 +      for (tc = 0; tc < c->num_tc; tc++)
 +              mlx5e_deactivate_txqsq(&c->ptpsq[tc].txqsq);
 +}
index 28aa5ae118f45755121413eca221ff916f588ab4,0000000000000000000000000000000000000000..90c98ea63b7fb97ec805e06bb8851ea73d84550a
mode 100644,000000..100644
--- /dev/null
@@@ -1,63 -1,0 +1,62 @@@
-       struct irq_desc *irq_desc;
 +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
 +/* Copyright (c) 2020 Mellanox Technologies. */
 +
 +#ifndef __MLX5_EN_PTP_H__
 +#define __MLX5_EN_PTP_H__
 +
 +#include "en.h"
 +#include "en/params.h"
 +#include "en_stats.h"
 +
 +struct mlx5e_ptpsq {
 +      struct mlx5e_txqsq       txqsq;
 +      struct mlx5e_cq          ts_cq;
 +      u16                      skb_fifo_cc;
 +      u16                      skb_fifo_pc;
 +      struct mlx5e_skb_fifo    skb_fifo;
 +      struct mlx5e_ptp_cq_stats *cq_stats;
 +};
 +
 +struct mlx5e_port_ptp {
 +      /* data path */
 +      struct mlx5e_ptpsq         ptpsq[MLX5E_MAX_NUM_TC];
 +      struct napi_struct         napi;
 +      struct device             *pdev;
 +      struct net_device         *netdev;
 +      __be32                     mkey_be;
 +      u8                         num_tc;
 +      u8                         lag_port;
 +
 +      /* data path - accessed per napi poll */
 +      struct mlx5e_ch_stats     *stats;
 +
 +      /* control */
 +      struct mlx5e_priv         *priv;
 +      struct mlx5_core_dev      *mdev;
 +      struct hwtstamp_config    *tstamp;
 +      DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
 +      int                        ix;
 +};
 +
 +struct mlx5e_ptp_params {
 +      struct mlx5e_params        params;
 +      struct mlx5e_sq_param      txq_sq_param;
 +};
 +
 +int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
 +                      u8 lag_port, struct mlx5e_port_ptp **cp);
 +void mlx5e_port_ptp_close(struct mlx5e_port_ptp *c);
 +void mlx5e_ptp_activate_channel(struct mlx5e_port_ptp *c);
 +void mlx5e_ptp_deactivate_channel(struct mlx5e_port_ptp *c);
 +
 +enum {
 +      MLX5E_SKB_CB_CQE_HWTSTAMP  = BIT(0),
 +      MLX5E_SKB_CB_PORT_HWTSTAMP = BIT(1),
 +};
 +
 +void mlx5e_skb_cb_hwtstamp_handler(struct sk_buff *skb, int hwtstamp_type,
 +                                 ktime_t hwtstamp,
 +                                 struct mlx5e_ptp_cq_stats *cq_stats);
 +
 +void mlx5e_skb_cb_hwtstamp_init(struct sk_buff *skb);
 +#endif /* __MLX5_EN_PTP_H__ */
index 03831650f6558c7ebae31cd62f796a34aa2a84ab,9bcf73f417d0a0bef87c40d0fe48967fdf194050..7a79d330c0751f6e6be3c88c483326274f84f993
@@@ -64,7 -64,6 +64,7 @@@
  #include "en/hv_vhca_stats.h"
  #include "en/devlink.h"
  #include "lib/mlx5.h"
 +#include "en/ptp.h"
  
  bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
  {
@@@ -413,10 -412,9 +413,10 @@@ static int mlx5e_alloc_rq(struct mlx5e_
        rq->wq_type = params->rq_wq_type;
        rq->pdev    = c->pdev;
        rq->netdev  = c->netdev;
 +      rq->priv    = c->priv;
        rq->tstamp  = c->tstamp;
        rq->clock   = &mdev->clock;
 -      rq->channel = c;
 +      rq->icosq   = &c->icosq;
        rq->ix      = c->ix;
        rq->mdev    = mdev;
        rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        rq_xdp_ix = rq->ix;
        if (xsk)
                rq_xdp_ix += params->num_channels * MLX5E_RQ_GROUP_XSK;
 -      err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix);
 +      err = xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq_xdp_ix, 0);
        if (err < 0)
                goto err_rq_xdp_prog;
  
@@@ -615,11 -613,14 +615,11 @@@ err_rq_xdp_prog
  
  static void mlx5e_free_rq(struct mlx5e_rq *rq)
  {
 -      struct mlx5e_channel *c = rq->channel;
 -      struct bpf_prog *old_prog = NULL;
 +      struct bpf_prog *old_prog;
        int i;
  
 -      /* drop_rq has neither channel nor xdp_prog. */
 -      if (c)
 -              old_prog = rcu_dereference_protected(rq->xdp_prog,
 -                                                   lockdep_is_held(&c->priv->state_lock));
 +      old_prog = rcu_dereference_protected(rq->xdp_prog,
 +                                           lockdep_is_held(&rq->priv->state_lock));
        if (old_prog)
                bpf_prog_put(old_prog);
  
@@@ -719,7 -720,9 +719,7 @@@ int mlx5e_modify_rq_state(struct mlx5e_
  
  static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
  {
 -      struct mlx5e_channel *c = rq->channel;
 -      struct mlx5e_priv *priv = c->priv;
 -      struct mlx5_core_dev *mdev = priv->mdev;
 +      struct mlx5_core_dev *mdev = rq->mdev;
  
        void *in;
        void *rqc;
  
  static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
  {
 -      struct mlx5e_channel *c = rq->channel;
 -      struct mlx5_core_dev *mdev = c->mdev;
 +      struct mlx5_core_dev *mdev = rq->mdev;
        void *in;
        void *rqc;
        int inlen;
@@@ -782,6 -786,7 +782,6 @@@ static void mlx5e_destroy_rq(struct mlx
  int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
  {
        unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
 -      struct mlx5e_channel *c = rq->channel;
  
        u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5e_rqwq_get_size(rq));
  
                msleep(20);
        } while (time_before(jiffies, exp_time));
  
 -      netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
 -                  c->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
 +      netdev_warn(rq->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
 +                  rq->ix, rq->rqn, mlx5e_rqwq_get_cur_sz(rq), min_wqes);
  
        mlx5e_reporter_rx_timeout(rq);
        return -ETIMEDOUT;
@@@ -908,7 -913,7 +908,7 @@@ err_free_rq
  void mlx5e_activate_rq(struct mlx5e_rq *rq)
  {
        set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
 -      mlx5e_trigger_irq(&rq->channel->icosq);
 +      mlx5e_trigger_irq(rq->icosq);
  }
  
  void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
  void mlx5e_close_rq(struct mlx5e_rq *rq)
  {
        cancel_work_sync(&rq->dim.work);
 -      cancel_work_sync(&rq->channel->icosq.recover_work);
 +      cancel_work_sync(&rq->icosq->recover_work);
        cancel_work_sync(&rq->recover_work);
        mlx5e_destroy_rq(rq);
        mlx5e_free_rx_descs(rq);
@@@ -1084,14 -1089,14 +1084,14 @@@ static void mlx5e_free_icosq(struct mlx
        mlx5_wq_destroy(&sq->wq_ctrl);
  }
  
 -static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
 +void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
  {
        kvfree(sq->db.wqe_info);
 -      kvfree(sq->db.skb_fifo);
 +      kvfree(sq->db.skb_fifo.fifo);
        kvfree(sq->db.dma_fifo);
  }
  
 -static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
 +int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
  {
        int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
        int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
        sq->db.dma_fifo = kvzalloc_node(array_size(df_sz,
                                                   sizeof(*sq->db.dma_fifo)),
                                        GFP_KERNEL, numa);
 -      sq->db.skb_fifo = kvzalloc_node(array_size(df_sz,
 -                                                 sizeof(*sq->db.skb_fifo)),
 +      sq->db.skb_fifo.fifo = kvzalloc_node(array_size(df_sz,
 +                                                      sizeof(*sq->db.skb_fifo.fifo)),
                                        GFP_KERNEL, numa);
        sq->db.wqe_info = kvzalloc_node(array_size(wq_sz,
                                                   sizeof(*sq->db.wqe_info)),
                                        GFP_KERNEL, numa);
 -      if (!sq->db.dma_fifo || !sq->db.skb_fifo || !sq->db.wqe_info) {
 +      if (!sq->db.dma_fifo || !sq->db.skb_fifo.fifo || !sq->db.wqe_info) {
                mlx5e_free_txqsq_db(sq);
                return -ENOMEM;
        }
  
        sq->dma_fifo_mask = df_sz - 1;
 -      sq->skb_fifo_mask = df_sz - 1;
 -
 -      return 0;
 -}
 -
 -static int mlx5e_calc_sq_stop_room(struct mlx5e_txqsq *sq, u8 log_sq_size)
 -{
 -      int sq_size = 1 << log_sq_size;
  
 -      sq->stop_room  = mlx5e_tls_get_stop_room(sq);
 -      sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
 -      if (test_bit(MLX5E_SQ_STATE_MPWQE, &sq->state))
 -              /* A MPWQE can take up to the maximum-sized WQE + all the normal
 -               * stop room can be taken if a new packet breaks the active
 -               * MPWQE session and allocates its WQEs right away.
 -               */
 -              sq->stop_room += mlx5e_stop_room_for_wqe(MLX5_SEND_WQE_MAX_WQEBBS);
 -
 -      if (WARN_ON(sq->stop_room >= sq_size)) {
 -              netdev_err(sq->channel->netdev, "Stop room %hu is bigger than the SQ size %d\n",
 -                         sq->stop_room, sq_size);
 -              return -ENOSPC;
 -      }
 +      sq->db.skb_fifo.pc   = &sq->skb_fifo_pc;
 +      sq->db.skb_fifo.cc   = &sq->skb_fifo_cc;
 +      sq->db.skb_fifo.mask = df_sz - 1;
  
        return 0;
  }
  
 -static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work);
  static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                             int txq_ix,
                             struct mlx5e_params *params,
        sq->tstamp    = c->tstamp;
        sq->clock     = &mdev->clock;
        sq->mkey_be   = c->mkey_be;
 -      sq->channel   = c;
 +      sq->netdev    = c->netdev;
 +      sq->mdev      = c->mdev;
 +      sq->priv      = c->priv;
        sq->ch_ix     = c->ix;
        sq->txq_ix    = txq_ix;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
                set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
        if (param->is_mpw)
                set_bit(MLX5E_SQ_STATE_MPWQE, &sq->state);
 -      err = mlx5e_calc_sq_stop_room(sq, params->log_sq_size);
 -      if (err)
 -              return err;
 +      sq->stop_room = param->stop_room;
  
        param->wq.db_numa_node = cpu_to_node(c->cpu);
        err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
@@@ -1176,12 -1201,20 +1176,12 @@@ err_sq_wq_destroy
        return err;
  }
  
 -static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
 +void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
  {
        mlx5e_free_txqsq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
  }
  
 -struct mlx5e_create_sq_param {
 -      struct mlx5_wq_ctrl        *wq_ctrl;
 -      u32                         cqn;
 -      u32                         tisn;
 -      u8                          tis_lst_sz;
 -      u8                          min_inline_mode;
 -};
 -
  static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
                           struct mlx5e_sq_param *param,
                           struct mlx5e_create_sq_param *csp,
        MLX5_SET(sqc,  sqc, tis_lst_sz, csp->tis_lst_sz);
        MLX5_SET(sqc,  sqc, tis_num_0, csp->tisn);
        MLX5_SET(sqc,  sqc, cqn, csp->cqn);
 +      MLX5_SET(sqc,  sqc, ts_cqe_to_dest_cqn, csp->ts_cqe_to_dest_cqn);
  
        if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
                MLX5_SET(sqc,  sqc, min_wqe_inline_mode, csp->min_inline_mode);
@@@ -1264,10 -1296,10 +1264,10 @@@ static void mlx5e_destroy_sq(struct mlx
        mlx5_core_destroy_sq(mdev, sqn);
  }
  
 -static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
 -                             struct mlx5e_sq_param *param,
 -                             struct mlx5e_create_sq_param *csp,
 -                             u32 *sqn)
 +int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
 +                      struct mlx5e_sq_param *param,
 +                      struct mlx5e_create_sq_param *csp,
 +                      u32 *sqn)
  {
        struct mlx5e_modify_sq_param msp = {0};
        int err;
@@@ -1330,7 -1362,7 +1330,7 @@@ err_free_txqsq
  
  void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
  {
 -      sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
 +      sq->txq = netdev_get_tx_queue(sq->netdev, sq->txq_ix);
        set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
        netdev_tx_reset_queue(sq->txq);
        netif_tx_start_queue(sq->txq);
@@@ -1343,7 -1375,7 +1343,7 @@@ void mlx5e_tx_disable_queue(struct netd
        __netif_tx_unlock_bh(txq);
  }
  
 -static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
 +void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
  {
        struct mlx5_wq_cyc *wq = &sq->wq;
  
  
  static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
  {
 -      struct mlx5e_channel *c = sq->channel;
 -      struct mlx5_core_dev *mdev = c->mdev;
 +      struct mlx5_core_dev *mdev = sq->mdev;
        struct mlx5_rate_limit rl = {0};
  
        cancel_work_sync(&sq->dim.work);
        mlx5e_free_txqsq(sq);
  }
  
 -static void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
 +void mlx5e_tx_err_cqe_work(struct work_struct *recover_work)
  {
        struct mlx5e_txqsq *sq = container_of(recover_work, struct mlx5e_txqsq,
                                              recover_work);
@@@ -1509,11 -1542,10 +1509,11 @@@ void mlx5e_close_xdpsq(struct mlx5e_xdp
        mlx5e_free_xdpsq(sq);
  }
  
 -static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
 +static int mlx5e_alloc_cq_common(struct mlx5e_priv *priv,
                                 struct mlx5e_cq_param *param,
                                 struct mlx5e_cq *cq)
  {
 +      struct mlx5_core_dev *mdev = priv->mdev;
        struct mlx5_core_cq *mcq = &cq->mcq;
        int eqn_not_used;
        unsigned int irqn;
        }
  
        cq->mdev = mdev;
 +      cq->netdev = priv->netdev;
 +      cq->priv = priv;
  
        return 0;
  }
  
 -static int mlx5e_alloc_cq(struct mlx5e_channel *c,
 +static int mlx5e_alloc_cq(struct mlx5e_priv *priv,
                          struct mlx5e_cq_param *param,
 +                        struct mlx5e_create_cq_param *ccp,
                          struct mlx5e_cq *cq)
  {
 -      struct mlx5_core_dev *mdev = c->priv->mdev;
        int err;
  
 -      param->wq.buf_numa_node = cpu_to_node(c->cpu);
 -      param->wq.db_numa_node  = cpu_to_node(c->cpu);
 -      param->eq_ix   = c->ix;
 +      param->wq.buf_numa_node = ccp->node;
 +      param->wq.db_numa_node  = ccp->node;
 +      param->eq_ix            = ccp->ix;
  
 -      err = mlx5e_alloc_cq_common(mdev, param, cq);
 +      err = mlx5e_alloc_cq_common(priv, param, cq);
  
 -      cq->napi    = &c->napi;
 -      cq->channel = c;
 +      cq->napi     = ccp->napi;
 +      cq->ch_stats = ccp->ch_stats;
  
        return err;
  }
@@@ -1630,14 -1660,13 +1630,14 @@@ static void mlx5e_destroy_cq(struct mlx
        mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
  }
  
 -int mlx5e_open_cq(struct mlx5e_channel *c, struct dim_cq_moder moder,
 -                struct mlx5e_cq_param *param, struct mlx5e_cq *cq)
 +int mlx5e_open_cq(struct mlx5e_priv *priv, struct dim_cq_moder moder,
 +                struct mlx5e_cq_param *param, struct mlx5e_create_cq_param *ccp,
 +                struct mlx5e_cq *cq)
  {
 -      struct mlx5_core_dev *mdev = c->mdev;
 +      struct mlx5_core_dev *mdev = priv->mdev;
        int err;
  
 -      err = mlx5e_alloc_cq(c, param, cq);
 +      err = mlx5e_alloc_cq(priv, param, ccp, cq);
        if (err)
                return err;
  
@@@ -1663,15 -1692,14 +1663,15 @@@ void mlx5e_close_cq(struct mlx5e_cq *cq
  
  static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
 +                           struct mlx5e_create_cq_param *ccp,
                             struct mlx5e_channel_param *cparam)
  {
        int err;
        int tc;
  
        for (tc = 0; tc < c->num_tc; tc++) {
 -              err = mlx5e_open_cq(c, params->tx_cq_moderation,
 -                                  &cparam->txq_sq.cqp, &c->sq[tc].cq);
 +              err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->txq_sq.cqp,
 +                                  ccp, &c->sq[tc].cq);
                if (err)
                        goto err_close_tx_cqs;
        }
@@@ -1806,52 -1834,35 +1806,52 @@@ static int mlx5e_set_tx_maxrate(struct 
        return err;
  }
  
 +void mlx5e_build_create_cq_param(struct mlx5e_create_cq_param *ccp, struct mlx5e_channel *c)
 +{
 +      *ccp = (struct mlx5e_create_cq_param) {
 +              .napi = &c->napi,
 +              .ch_stats = c->stats,
 +              .node = cpu_to_node(c->cpu),
 +              .ix = c->ix,
 +      };
 +}
 +
  static int mlx5e_open_queues(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
                             struct mlx5e_channel_param *cparam)
  {
        struct dim_cq_moder icocq_moder = {0, 0};
 +      struct mlx5e_create_cq_param ccp;
        int err;
  
 -      err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq.cqp, &c->async_icosq.cq);
 +      mlx5e_build_create_cq_param(&ccp, c);
 +
 +      err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->icosq.cqp, &ccp,
 +                          &c->async_icosq.cq);
        if (err)
                return err;
  
 -      err = mlx5e_open_cq(c, icocq_moder, &cparam->async_icosq.cqp, &c->icosq.cq);
 +      err = mlx5e_open_cq(c->priv, icocq_moder, &cparam->async_icosq.cqp, &ccp,
 +                          &c->icosq.cq);
        if (err)
                goto err_close_async_icosq_cq;
  
 -      err = mlx5e_open_tx_cqs(c, params, cparam);
 +      err = mlx5e_open_tx_cqs(c, params, &ccp, cparam);
        if (err)
                goto err_close_icosq_cq;
  
 -      err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &c->xdpsq.cq);
 +      err = mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp, &ccp,
 +                          &c->xdpsq.cq);
        if (err)
                goto err_close_tx_cqs;
  
 -      err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rq.cqp, &c->rq.cq);
 +      err = mlx5e_open_cq(c->priv, params->rx_cq_moderation, &cparam->rq.cqp, &ccp,
 +                          &c->rq.cq);
        if (err)
                goto err_close_xdp_tx_cqs;
  
 -      err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
 -                                   &cparam->xdp_sq.cqp, &c->rq_xdpsq.cq) : 0;
 +      err = c->xdp ? mlx5e_open_cq(c->priv, params->tx_cq_moderation, &cparam->xdp_sq.cqp,
 +                                   &ccp, &c->rq_xdpsq.cq) : 0;
        if (err)
                goto err_close_rx_cq;
  
@@@ -1987,7 -1998,7 +1987,7 @@@ static int mlx5e_open_channel(struct ml
        c->num_tc   = params->num_tc;
        c->xdp      = !!params->xdp_prog;
        c->stats    = &priv->channel_stats[ix].ch;
-       c->irq_desc = irq_to_desc(irq);
+       c->aff_mask = irq_get_effective_affinity_mask(irq);
        c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
  
        netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@@ -2214,7 -2225,6 +2214,7 @@@ static void mlx5e_build_sq_param(struc
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        MLX5_SET(sqc, sqc, allow_swp, allow_swp);
        param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE);
 +      param->stop_room = mlx5e_calc_sq_stop_room(priv->mdev, params);
        mlx5e_build_tx_cq_param(priv, params, &param->cqp);
  }
  
@@@ -2374,13 -2384,6 +2374,13 @@@ int mlx5e_open_channels(struct mlx5e_pr
                        goto err_close_channels;
        }
  
 +      if (MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS)) {
 +              err = mlx5e_port_ptp_open(priv, &chs->params, chs->c[0]->lag_port,
 +                                        &chs->port_ptp);
 +              if (err)
 +                      goto err_close_channels;
 +      }
 +
        mlx5e_health_channels_update(priv);
        kvfree(cparam);
        return 0;
@@@ -2402,9 -2405,6 +2402,9 @@@ static void mlx5e_activate_channels(str
  
        for (i = 0; i < chs->num; i++)
                mlx5e_activate_channel(chs->c[i]);
 +
 +      if (chs->port_ptp)
 +              mlx5e_ptp_activate_channel(chs->port_ptp);
  }
  
  #define MLX5E_RQ_WQES_TIMEOUT 20000 /* msecs */
@@@ -2431,9 -2431,6 +2431,9 @@@ static void mlx5e_deactivate_channels(s
  {
        int i;
  
 +      if (chs->port_ptp)
 +              mlx5e_ptp_deactivate_channel(chs->port_ptp);
 +
        for (i = 0; i < chs->num; i++)
                mlx5e_deactivate_channel(chs->c[i]);
  }
@@@ -2442,9 -2439,6 +2442,9 @@@ void mlx5e_close_channels(struct mlx5e_
  {
        int i;
  
 +      if (chs->port_ptp)
 +              mlx5e_port_ptp_close(chs->port_ptp);
 +
        for (i = 0; i < chs->num; i++)
                mlx5e_close_channel(chs->c[i]);
  
@@@ -2930,8 -2924,6 +2930,8 @@@ static int mlx5e_update_netdev_queues(s
        nch = priv->channels.params.num_channels;
        ntc = priv->channels.params.num_tc;
        num_txqs = nch * ntc;
 +      if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
 +              num_txqs += ntc;
        num_rxqs = nch * priv->profile->rq_groups;
  
        mlx5e_netdev_set_tcs(netdev, nch, ntc);
@@@ -3005,13 -2997,14 +3005,13 @@@ MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(ml
  
  static void mlx5e_build_txq_maps(struct mlx5e_priv *priv)
  {
 -      int i, ch;
 +      int i, ch, tc, num_tc;
  
        ch = priv->channels.num;
 +      num_tc = priv->channels.params.num_tc;
  
        for (i = 0; i < ch; i++) {
 -              int tc;
 -
 -              for (tc = 0; tc < priv->channels.params.num_tc; tc++) {
 +              for (tc = 0; tc < num_tc; tc++) {
                        struct mlx5e_channel *c = priv->channels.c[i];
                        struct mlx5e_txqsq *sq = &c->sq[tc];
  
                        priv->channel_tc2realtxq[i][tc] = i + tc * ch;
                }
        }
 +
 +      if (!priv->channels.port_ptp)
 +              return;
 +
 +      for (tc = 0; tc < num_tc; tc++) {
 +              struct mlx5e_port_ptp *c = priv->channels.port_ptp;
 +              struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq;
 +
 +              priv->txq2sq[sq->txq_ix] = sq;
 +              priv->port_ptp_tc2realtxq[tc] = priv->num_tc_x_num_ch + tc;
 +      }
 +}
 +
 +static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv)
 +{
 +      /* Sync with mlx5e_select_queue. */
 +      WRITE_ONCE(priv->num_tc_x_num_ch,
 +                 priv->channels.params.num_tc * priv->channels.num);
  }
  
  void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
  {
 +      mlx5e_update_num_tc_x_num_ch(priv);
        mlx5e_build_txq_maps(priv);
        mlx5e_activate_channels(&priv->channels);
        mlx5e_xdp_tx_enable(priv);
@@@ -3161,7 -3135,7 +3161,7 @@@ static void mlx5e_modify_admin_state(st
  
        mlx5_set_port_admin_status(mdev, state);
  
 -      if (!MLX5_ESWITCH_MANAGER(mdev) ||  mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
 +      if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY)
                return;
  
        if (state == MLX5_PORT_UP)
@@@ -3245,11 -3219,6 +3245,11 @@@ int mlx5e_close(struct net_device *netd
        return err;
  }
  
 +static void mlx5e_free_drop_rq(struct mlx5e_rq *rq)
 +{
 +      mlx5_wq_destroy(&rq->wq_ctrl);
 +}
 +
  static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
                               struct mlx5e_rq *rq,
                               struct mlx5e_rq_param *param)
        return 0;
  }
  
 -static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
 +static int mlx5e_alloc_drop_cq(struct mlx5e_priv *priv,
                               struct mlx5e_cq *cq,
                               struct mlx5e_cq_param *param)
  {
 +      struct mlx5_core_dev *mdev = priv->mdev;
 +
        param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
        param->wq.db_numa_node  = dev_to_node(mlx5_core_dma_dev(mdev));
  
 -      return mlx5e_alloc_cq_common(mdev, param, cq);
 +      return mlx5e_alloc_cq_common(priv, param, cq);
  }
  
  int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
  
        mlx5e_build_drop_rq_param(priv, &rq_param);
  
 -      err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
 +      err = mlx5e_alloc_drop_cq(priv, cq, &cq_param);
        if (err)
                return err;
  
        return 0;
  
  err_free_rq:
 -      mlx5e_free_rq(drop_rq);
 +      mlx5e_free_drop_rq(drop_rq);
  
  err_destroy_cq:
        mlx5e_destroy_cq(cq);
@@@ -3333,7 -3300,7 +3333,7 @@@ err_free_cq
  void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
  {
        mlx5e_destroy_rq(drop_rq);
 -      mlx5e_free_rq(drop_rq);
 +      mlx5e_free_drop_rq(drop_rq);
        mlx5e_destroy_cq(&drop_rq->cq);
        mlx5e_free_cq(&drop_rq->cq);
  }
@@@ -4032,9 -3999,6 +4032,9 @@@ int mlx5e_change_mtu(struct net_device 
  
        new_channels.params = *params;
        new_channels.params.sw_mtu = new_mtu;
 +      err = mlx5e_validate_params(priv, &new_channels.params);
 +      if (err)
 +              goto out;
  
        if (params->xdp_prog &&
            !mlx5e_rx_is_linear_skb(&new_channels.params, NULL)) {
@@@ -4287,20 -4251,6 +4287,20 @@@ int mlx5e_get_vf_stats(struct net_devic
  }
  #endif
  
 +static bool mlx5e_tunnel_proto_supported_tx(struct mlx5_core_dev *mdev, u8 proto_type)
 +{
 +      switch (proto_type) {
 +      case IPPROTO_GRE:
 +              return MLX5_CAP_ETH(mdev, tunnel_stateless_gre);
 +      case IPPROTO_IPIP:
 +      case IPPROTO_IPV6:
 +              return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) ||
 +                      MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_tx));
 +      default:
 +              return false;
 +      }
 +}
 +
  static bool mlx5e_gre_tunnel_inner_proto_offload_supported(struct mlx5_core_dev *mdev,
                                                           struct sk_buff *skb)
  {
@@@ -4343,7 -4293,7 +4343,7 @@@ static netdev_features_t mlx5e_tunnel_f
                break;
        case IPPROTO_IPIP:
        case IPPROTO_IPV6:
 -              if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
 +              if (mlx5e_tunnel_proto_supported_tx(priv->mdev, IPPROTO_IPIP))
                        return features;
                break;
        case IPPROTO_UDP:
@@@ -4392,7 -4342,6 +4392,7 @@@ static void mlx5e_tx_timeout_work(struc
  {
        struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
                                               tx_timeout_work);
 +      struct net_device *netdev = priv->netdev;
        int i;
  
        rtnl_lock();
        if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
                goto unlock;
  
 -      for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
 +      for (i = 0; i < netdev->real_num_tx_queues; i++) {
                struct netdev_queue *dev_queue =
 -                      netdev_get_tx_queue(priv->netdev, i);
 +                      netdev_get_tx_queue(netdev, i);
                struct mlx5e_txqsq *sq = priv->txq2sq[i];
  
                if (!netif_xmit_stopped(dev_queue))
@@@ -4463,7 -4412,7 +4463,7 @@@ static void mlx5e_rq_replace_xdp_prog(s
        struct bpf_prog *old_prog;
  
        old_prog = rcu_replace_pointer(rq->xdp_prog, prog,
 -                                     lockdep_is_held(&rq->channel->priv->state_lock));
 +                                     lockdep_is_held(&rq->priv->state_lock));
        if (old_prog)
                bpf_prog_put(old_prog);
  }
@@@ -4648,6 -4597,31 +4648,6 @@@ const struct net_device_ops mlx5e_netde
        .ndo_get_devlink_port    = mlx5e_get_devlink_port,
  };
  
 -static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 -{
 -      if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
 -              return -EOPNOTSUPP;
 -      if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
 -          !MLX5_CAP_GEN(mdev, nic_flow_table) ||
 -          !MLX5_CAP_ETH(mdev, csum_cap) ||
 -          !MLX5_CAP_ETH(mdev, max_lso_cap) ||
 -          !MLX5_CAP_ETH(mdev, vlan_cap) ||
 -          !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
 -          MLX5_CAP_FLOWTABLE(mdev,
 -                             flow_table_properties_nic_receive.max_ft_level)
 -                             < 3) {
 -              mlx5_core_warn(mdev,
 -                             "Not creating net device, some required device capabilities are missing\n");
 -              return -EOPNOTSUPP;
 -      }
 -      if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
 -              mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
 -      if (!MLX5_CAP_GEN(mdev, cq_moderation))
 -              mlx5_core_warn(mdev, "CQ moderation is not supported\n");
 -
 -      return 0;
 -}
 -
  void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
                                   int num_channels)
  {
@@@ -4903,17 -4877,6 +4903,17 @@@ void mlx5e_vxlan_set_netdev_info(struc
        priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
  }
  
 +static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev)
 +{
 +      int tt;
 +
 +      for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
 +              if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt)))
 +                      return true;
 +      }
 +      return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev));
 +}
 +
  static void mlx5e_build_nic_netdev(struct net_device *netdev)
  {
        struct mlx5e_priv *priv = netdev_priv(netdev);
  
        mlx5e_vxlan_set_netdev_info(priv);
  
 -      if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
 -          mlx5e_any_tunnel_proto_supported(mdev)) {
 +      if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
                netdev->hw_enc_features |= NETIF_F_HW_CSUM;
                netdev->hw_enc_features |= NETIF_F_TSO;
                netdev->hw_enc_features |= NETIF_F_TSO6;
                                         NETIF_F_GSO_UDP_TUNNEL_CSUM;
        }
  
 -      if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_GRE)) {
 +      if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_GRE)) {
                netdev->hw_features     |= NETIF_F_GSO_GRE |
                                           NETIF_F_GSO_GRE_CSUM;
                netdev->hw_enc_features |= NETIF_F_GSO_GRE |
                                                NETIF_F_GSO_GRE_CSUM;
        }
  
 -      if (mlx5e_tunnel_proto_supported(mdev, IPPROTO_IPIP)) {
 +      if (mlx5e_tunnel_proto_supported_tx(mdev, IPPROTO_IPIP)) {
                netdev->hw_features |= NETIF_F_GSO_IPXIP4 |
                                       NETIF_F_GSO_IPXIP6;
                netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4 |
@@@ -5370,14 -5334,10 +5370,14 @@@ struct net_device *mlx5e_create_netdev(
                                       void *ppriv)
  {
        struct net_device *netdev;
 +      unsigned int ptp_txqs = 0;
        int err;
  
 +      if (MLX5_CAP_GEN(mdev, ts_cqe_to_dest_cqn))
 +              ptp_txqs = profile->max_tc;
 +
        netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
 -                                  nch * profile->max_tc,
 +                                  nch * profile->max_tc + ptp_txqs,
                                    nch * profile->rq_groups);
        if (!netdev) {
                mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
@@@ -5481,12 -5441,13 +5481,12 @@@ void mlx5e_destroy_netdev(struct mlx5e_
        free_netdev(netdev);
  }
  
 -/* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
 - * hardware contexts and to connect it to the current netdev.
 - */
 -static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
 +static int mlx5e_resume(struct auxiliary_device *adev)
  {
 -      struct mlx5e_priv *priv = vpriv;
 +      struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
 +      struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
        struct net_device *netdev = priv->netdev;
 +      struct mlx5_core_dev *mdev = edev->mdev;
        int err;
  
        if (netif_device_present(netdev))
        return 0;
  }
  
 -static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
 +static int mlx5e_suspend(struct auxiliary_device *adev, pm_message_t state)
  {
 -      struct mlx5e_priv *priv = vpriv;
 +      struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
        struct net_device *netdev = priv->netdev;
 -
 -#ifdef CONFIG_MLX5_ESWITCH
 -      if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev)
 -              return;
 -#endif
 +      struct mlx5_core_dev *mdev = priv->mdev;
  
        if (!netif_device_present(netdev))
 -              return;
 +              return -ENODEV;
  
        mlx5e_detach_netdev(priv);
        mlx5e_destroy_mdev_resources(mdev);
 +      return 0;
  }
  
 -static void *mlx5e_add(struct mlx5_core_dev *mdev)
 +static int mlx5e_probe(struct auxiliary_device *adev,
 +                     const struct auxiliary_device_id *id)
  {
 +      struct mlx5_adev *edev = container_of(adev, struct mlx5_adev, adev);
 +      struct mlx5_core_dev *mdev = edev->mdev;
        struct net_device *netdev;
 +      pm_message_t state = {};
        void *priv;
        int err;
        int nch;
  
 -      err = mlx5e_check_required_hca_cap(mdev);
 -      if (err)
 -              return NULL;
 -
 -#ifdef CONFIG_MLX5_ESWITCH
 -      if (MLX5_ESWITCH_MANAGER(mdev) &&
 -          mlx5_eswitch_mode(mdev->priv.eswitch) == MLX5_ESWITCH_OFFLOADS) {
 -              mlx5e_rep_register_vport_reps(mdev);
 -              return mdev;
 -      }
 -#endif
 -
        nch = mlx5e_get_max_num_channels(mdev);
        netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL);
        if (!netdev) {
                mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
 -              return NULL;
 +              return -ENOMEM;
        }
  
        dev_net_set(netdev, mlx5_core_net(mdev));
        priv = netdev_priv(netdev);
 +      dev_set_drvdata(&adev->dev, priv);
  
 -      err = mlx5e_attach(mdev, priv);
 +      err = mlx5e_resume(adev);
        if (err) {
 -              mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
 +              mlx5_core_err(mdev, "mlx5e_resume failed, %d\n", err);
                goto err_destroy_netdev;
        }
  
        err = register_netdev(netdev);
        if (err) {
                mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
 -              goto err_detach;
 +              goto err_resume;
        }
  
        mlx5e_devlink_port_type_eth_set(priv);
  
        mlx5e_dcbnl_init_app(priv);
 -      return priv;
 +      return 0;
  
 -err_detach:
 -      mlx5e_detach(mdev, priv);
 +err_resume:
 +      mlx5e_suspend(adev, state);
  err_destroy_netdev:
        mlx5e_destroy_netdev(priv);
 -      return NULL;
 +      return err;
  }
  
 -static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
 +static void mlx5e_remove(struct auxiliary_device *adev)
  {
 -      struct mlx5e_priv *priv;
 +      struct mlx5e_priv *priv = dev_get_drvdata(&adev->dev);
 +      pm_message_t state = {};
  
 -#ifdef CONFIG_MLX5_ESWITCH
 -      if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) {
 -              mlx5e_rep_unregister_vport_reps(mdev);
 -              return;
 -      }
 -#endif
 -      priv = vpriv;
        mlx5e_dcbnl_delete_app(priv);
        unregister_netdev(priv->netdev);
 -      mlx5e_detach(mdev, vpriv);
 +      mlx5e_suspend(adev, state);
        mlx5e_destroy_netdev(priv);
  }
  
 -static struct mlx5_interface mlx5e_interface = {
 -      .add       = mlx5e_add,
 -      .remove    = mlx5e_remove,
 -      .attach    = mlx5e_attach,
 -      .detach    = mlx5e_detach,
 -      .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
 +static const struct auxiliary_device_id mlx5e_id_table[] = {
 +      { .name = MLX5_ADEV_NAME ".eth", },
 +      {},
 +};
 +
 +MODULE_DEVICE_TABLE(auxiliary, mlx5e_id_table);
 +
 +static struct auxiliary_driver mlx5e_driver = {
 +      .name = "eth",
 +      .probe = mlx5e_probe,
 +      .remove = mlx5e_remove,
 +      .suspend = mlx5e_suspend,
 +      .resume = mlx5e_resume,
 +      .id_table = mlx5e_id_table,
  };
  
 -void mlx5e_init(void)
 +int mlx5e_init(void)
  {
 +      int ret;
 +
        mlx5e_ipsec_build_inverse_table();
        mlx5e_build_ptys2ethtool_map();
 -      mlx5_register_interface(&mlx5e_interface);
 +      ret = mlx5e_rep_init();
 +      if (ret)
 +              return ret;
 +
 +      ret = auxiliary_driver_register(&mlx5e_driver);
 +      if (ret)
 +              mlx5e_rep_cleanup();
 +      return ret;
  }
  
  void mlx5e_cleanup(void)
  {
 -      mlx5_unregister_interface(&mlx5e_interface);
 +      auxiliary_driver_unregister(&mlx5e_driver);
 +      mlx5e_rep_cleanup();
  }
index 1ec3d62f026da95fd211a38b3bd49a984ea9301b,793e313dcb8b03485b5c8f30e0a78fa3513ce1c0..a3cfe06d511691206a76e7cc6c8eb8a31a81c923
  static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
  {
        int current_cpu = smp_processor_id();
-       const struct cpumask *aff;
-       struct irq_data *idata;
  
-       idata = irq_desc_get_irq_data(c->irq_desc);
-       aff = irq_data_get_affinity_mask(idata);
-       return cpumask_test_cpu(current_cpu, aff);
+       return cpumask_test_cpu(current_cpu, c->aff_mask);
  }
  
  static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
@@@ -221,13 -217,14 +217,13 @@@ void mlx5e_completion_event(struct mlx5
  
        napi_schedule(cq->napi);
        cq->event_ctr++;
 -      cq->channel->stats->events++;
 +      cq->ch_stats->events++;
  }
  
  void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
  {
        struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
 -      struct mlx5e_channel *c = cq->channel;
 -      struct net_device *netdev = c->netdev;
 +      struct net_device *netdev = cq->netdev;
  
        netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
                   __func__, mcq->cqn, event);
index 7f29c2fdcd51c1355a2e0968620adf510c187550,22135df79d83b2f3751ebe282041b7251825f7dc..07e36661bbc29f312d65eff5e7420719fcd41d0d
@@@ -18,7 -18,6 +18,7 @@@
  #include <linux/of_platform.h>
  #include <linux/of_irq.h>
  #include <linux/pci.h>
 +#include <linux/pci-ecam.h>
  #include <linux/platform_device.h>
  #include <linux/irqchip/chained_irq.h>
  
  #define E_ECAM_CR_ENABLE              BIT(0)
  #define E_ECAM_SIZE_LOC                       GENMASK(20, 16)
  #define E_ECAM_SIZE_SHIFT             16
 -#define ECAM_BUS_LOC_SHIFT            20
 -#define ECAM_DEV_LOC_SHIFT            12
  #define NWL_ECAM_VALUE_DEFAULT                12
  
  #define CFG_DMA_REG_BAR                       GENMASK(2, 0)
@@@ -239,11 -240,15 +239,11 @@@ static void __iomem *nwl_pcie_map_bus(s
                                      int where)
  {
        struct nwl_pcie *pcie = bus->sysdata;
 -      int relbus;
  
        if (!nwl_pcie_valid_device(bus, devfn))
                return NULL;
  
 -      relbus = (bus->number << ECAM_BUS_LOC_SHIFT) |
 -                      (devfn << ECAM_DEV_LOC_SHIFT);
 -
 -      return pcie->ecam_base + relbus + where;
 +      return pcie->ecam_base + PCIE_ECAM_OFFSET(bus->number, devfn, where);
  }
  
  /* PCIe operations */
@@@ -374,13 -379,11 +374,11 @@@ static void nwl_pcie_msi_handler_low(st
  
  static void nwl_mask_leg_irq(struct irq_data *data)
  {
-       struct irq_desc *desc = irq_to_desc(data->irq);
-       struct nwl_pcie *pcie;
+       struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
        unsigned long flags;
        u32 mask;
        u32 val;
  
-       pcie = irq_desc_get_chip_data(desc);
        mask = 1 << (data->hwirq - 1);
        raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
        val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
  
  static void nwl_unmask_leg_irq(struct irq_data *data)
  {
-       struct irq_desc *desc = irq_to_desc(data->irq);
-       struct nwl_pcie *pcie;
+       struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
        unsigned long flags;
        u32 mask;
        u32 val;
  
-       pcie = irq_desc_get_chip_data(desc);
        mask = 1 << (data->hwirq - 1);
        raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
        val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
This page took 0.225379 seconds and 4 git commands to generate.