]> Git Repo - J-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorJakub Kicinski <[email protected]>
Tue, 14 May 2024 17:52:25 +0000 (10:52 -0700)
committerJakub Kicinski <[email protected]>
Tue, 14 May 2024 17:53:19 +0000 (10:53 -0700)
Merge in late fixes to prepare for the 6.10 net-next PR.

Signed-off-by: Jakub Kicinski <[email protected]>
17 files changed:
1  2 
drivers/net/ethernet/cortina/gemini.c
drivers/net/ethernet/intel/ice/ice_ddp.c
drivers/net/ethernet/mediatek/mtk_eth_soc.c
drivers/net/ethernet/mediatek/mtk_eth_soc.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
drivers/net/ethernet/sun/sungem.c
drivers/net/phy/micrel.c
drivers/net/usb/ax88179_178a.c
include/linux/mlx5/driver.h
include/net/ax25.h
kernel/bpf/syscall.c
net/ax25/ax25_dev.c
net/ipv4/devinet.c
net/unix/af_unix.c

index 2f98f644b9d7b5e48c4983dd2450a8c10fe04008,d1fbadbf86d4a23ad978f5090bb5018af6a155a6..5f0c9e1771dbfe12a15fc4983462d63cbf6259db
@@@ -1107,10 -1107,13 +1107,13 @@@ static void gmac_tx_irq_enable(struct n
  {
        struct gemini_ethernet_port *port = netdev_priv(netdev);
        struct gemini_ethernet *geth = port->geth;
+       unsigned long flags;
        u32 val, mask;
  
        netdev_dbg(netdev, "%s device %d\n", __func__, netdev->dev_id);
  
+       spin_lock_irqsave(&geth->irq_lock, flags);
        mask = GMAC0_IRQ0_TXQ0_INTS << (6 * netdev->dev_id + txq);
  
        if (en)
        val = readl(geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
        val = en ? val | mask : val & ~mask;
        writel(val, geth->base + GLOBAL_INTERRUPT_ENABLE_0_REG);
+       spin_unlock_irqrestore(&geth->irq_lock, flags);
  }
  
  static void gmac_tx_irq(struct net_device *netdev, unsigned int txq_num)
@@@ -1415,15 -1420,19 +1420,19 @@@ static unsigned int gmac_rx(struct net_
        union gmac_rxdesc_3 word3;
        struct page *page = NULL;
        unsigned int page_offs;
+       unsigned long flags;
        unsigned short r, w;
        union dma_rwptr rw;
        dma_addr_t mapping;
        int frag_nr = 0;
  
+       spin_lock_irqsave(&geth->irq_lock, flags);
        rw.bits32 = readl(ptr_reg);
        /* Reset interrupt as all packages until here are taken into account */
        writel(DEFAULT_Q0_INT_BIT << netdev->dev_id,
               geth->base + GLOBAL_INTERRUPT_STATUS_1_REG);
+       spin_unlock_irqrestore(&geth->irq_lock, flags);
        r = rw.bits.rptr;
        w = rw.bits.wptr;
  
@@@ -1726,10 -1735,9 +1735,9 @@@ static irqreturn_t gmac_irq(int irq, vo
                gmac_update_hw_stats(netdev);
  
        if (val & (GMAC0_RX_OVERRUN_INT_BIT << (netdev->dev_id * 8))) {
+               spin_lock(&geth->irq_lock);
                writel(GMAC0_RXDERR_INT_BIT << (netdev->dev_id * 8),
                       geth->base + GLOBAL_INTERRUPT_STATUS_4_REG);
-               spin_lock(&geth->irq_lock);
                u64_stats_update_begin(&port->ir_stats_syncp);
                ++port->stats.rx_fifo_errors;
                u64_stats_update_end(&port->ir_stats_syncp);
@@@ -1978,7 -1986,7 +1986,7 @@@ static int gmac_change_mtu(struct net_d
  
        gmac_disable_tx_rx(netdev);
  
 -      netdev->mtu = new_mtu;
 +      WRITE_ONCE(netdev->mtu, new_mtu);
        gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT,
                                CONFIG0_MAXLEN_MASK);
  
index 87d86d8897ad9615018134688b0d71a79524df1c,4df561d64bc38275945861bce10a4fa17846e496..ce5034ed2b240ce7c35bb8d78ffcd85be8c592f3
@@@ -4,7 -4,6 +4,7 @@@
  #include "ice_common.h"
  #include "ice.h"
  #include "ice_ddp.h"
 +#include "ice_sched.h"
  
  /* For supporting double VLAN mode, it is necessary to enable or disable certain
   * boost tcam entries. The metadata labels names that match the following
@@@ -722,12 -721,6 +722,12 @@@ static bool ice_is_gtp_c_profile(u16 pr
        }
  }
  
 +static bool ice_is_pfcp_profile(u16 prof_idx)
 +{
 +      return prof_idx >= ICE_PROFID_IPV4_PFCP_NODE &&
 +             prof_idx <= ICE_PROFID_IPV6_PFCP_SESSION;
 +}
 +
  /**
   * ice_get_sw_prof_type - determine switch profile type
   * @hw: pointer to the HW structure
@@@ -745,9 -738,6 +745,9 @@@ static enum ice_prof_type ice_get_sw_pr
        if (ice_is_gtp_u_profile(prof_idx))
                return ICE_PROF_TUN_GTPU;
  
 +      if (ice_is_pfcp_profile(prof_idx))
 +              return ICE_PROF_TUN_PFCP;
 +
        for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
                /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
                if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
@@@ -1434,14 -1424,14 +1434,14 @@@ ice_dwnld_sign_and_cfg_segs(struct ice_
                goto exit;
        }
  
-       conf_idx = le32_to_cpu(seg->signed_seg_idx);
-       start = le32_to_cpu(seg->signed_buf_start);
        count = le32_to_cpu(seg->signed_buf_count);
        state = ice_download_pkg_sig_seg(hw, seg);
-       if (state)
+       if (state || !count)
                goto exit;
  
+       conf_idx = le32_to_cpu(seg->signed_seg_idx);
+       start = le32_to_cpu(seg->signed_buf_start);
        state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
                                            count);
  
@@@ -2273,211 -2263,3 +2273,211 @@@ enum ice_ddp_state ice_copy_and_init_pk
  
        return state;
  }
 +
 +/**
 + * ice_get_set_tx_topo - get or set Tx topology
 + * @hw: pointer to the HW struct
 + * @buf: pointer to Tx topology buffer
 + * @buf_size: buffer size
 + * @cd: pointer to command details structure or NULL
 + * @flags: pointer to descriptor flags
 + * @set: 0-get, 1-set topology
 + *
 + * The function will get or set Tx topology
 + *
 + * Return: zero when set was successful, negative values otherwise.
 + */
 +static int
 +ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
 +                  struct ice_sq_cd *cd, u8 *flags, bool set)
 +{
 +      struct ice_aqc_get_set_tx_topo *cmd;
 +      struct ice_aq_desc desc;
 +      int status;
 +
 +      cmd = &desc.params.get_set_tx_topo;
 +      if (set) {
 +              ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_tx_topo);
 +              cmd->set_flags = ICE_AQC_TX_TOPO_FLAGS_ISSUED;
 +              /* requested to update a new topology, not a default topology */
 +              if (buf)
 +                      cmd->set_flags |= ICE_AQC_TX_TOPO_FLAGS_SRC_RAM |
 +                                        ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW;
 +
 +              if (ice_is_e825c(hw))
 +                      desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 +      } else {
 +              ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_tx_topo);
 +              cmd->get_flags = ICE_AQC_TX_TOPO_GET_RAM;
 +      }
 +
 +      if (!ice_is_e825c(hw))
 +              desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
 +
 +      status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
 +      if (status)
 +              return status;
 +      /* read the return flag values (first byte) for get operation */
 +      if (!set && flags)
 +              *flags = desc.params.get_set_tx_topo.set_flags;
 +
 +      return 0;
 +}
 +
 +/**
 + * ice_cfg_tx_topo - Initialize new Tx topology if available
 + * @hw: pointer to the HW struct
 + * @buf: pointer to Tx topology buffer
 + * @len: buffer size
 + *
 + * The function will apply the new Tx topology from the package buffer
 + * if available.
 + *
 + * Return: zero when update was successful, negative values otherwise.
 + */
 +int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
 +{
 +      u8 *current_topo, *new_topo = NULL;
 +      struct ice_run_time_cfg_seg *seg;
 +      struct ice_buf_hdr *section;
 +      struct ice_pkg_hdr *pkg_hdr;
 +      enum ice_ddp_state state;
 +      u16 offset, size = 0;
 +      u32 reg = 0;
 +      int status;
 +      u8 flags;
 +
 +      if (!buf || !len)
 +              return -EINVAL;
 +
 +      /* Does FW support new Tx topology mode ? */
 +      if (!hw->func_caps.common_cap.tx_sched_topo_comp_mode_en) {
 +              ice_debug(hw, ICE_DBG_INIT, "FW doesn't support compatibility mode\n");
 +              return -EOPNOTSUPP;
 +      }
 +
 +      current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
 +      if (!current_topo)
 +              return -ENOMEM;
 +
 +      /* Get the current Tx topology */
 +      status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL,
 +                                   &flags, false);
 +
 +      kfree(current_topo);
 +
 +      if (status) {
 +              ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
 +              return status;
 +      }
 +
 +      /* Is default topology already applied ? */
 +      if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
 +          hw->num_tx_sched_layers == ICE_SCHED_9_LAYERS) {
 +              ice_debug(hw, ICE_DBG_INIT, "Default topology already applied\n");
 +              return -EEXIST;
 +      }
 +
 +      /* Is new topology already applied ? */
 +      if ((flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
 +          hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
 +              ice_debug(hw, ICE_DBG_INIT, "New topology already applied\n");
 +              return -EEXIST;
 +      }
 +
 +      /* Setting topology already issued? */
 +      if (flags & ICE_AQC_TX_TOPO_FLAGS_ISSUED) {
 +              ice_debug(hw, ICE_DBG_INIT, "Update Tx topology was done by another PF\n");
 +              /* Add a small delay before exiting */
 +              msleep(2000);
 +              return -EEXIST;
 +      }
 +
 +      /* Change the topology from new to default (5 to 9) */
 +      if (!(flags & ICE_AQC_TX_TOPO_FLAGS_LOAD_NEW) &&
 +          hw->num_tx_sched_layers == ICE_SCHED_5_LAYERS) {
 +              ice_debug(hw, ICE_DBG_INIT, "Change topology from 5 to 9 layers\n");
 +              goto update_topo;
 +      }
 +
 +      pkg_hdr = (struct ice_pkg_hdr *)buf;
 +      state = ice_verify_pkg(pkg_hdr, len);
 +      if (state) {
 +              ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
 +                        state);
 +              return -EIO;
 +      }
 +
 +      /* Find runtime configuration segment */
 +      seg = (struct ice_run_time_cfg_seg *)
 +            ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
 +      if (!seg) {
 +              ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
 +              return -EIO;
 +      }
 +
 +      if (le32_to_cpu(seg->buf_table.buf_count) < ICE_MIN_S_COUNT) {
 +              ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment count(%d) is wrong\n",
 +                        seg->buf_table.buf_count);
 +              return -EIO;
 +      }
 +
 +      section = ice_pkg_val_buf(seg->buf_table.buf_array);
 +      if (!section || le32_to_cpu(section->section_entry[0].type) !=
 +              ICE_SID_TX_5_LAYER_TOPO) {
 +              ice_debug(hw, ICE_DBG_INIT, "5 layer topology section type is wrong\n");
 +              return -EIO;
 +      }
 +
 +      size = le16_to_cpu(section->section_entry[0].size);
 +      offset = le16_to_cpu(section->section_entry[0].offset);
 +      if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ) {
 +              ice_debug(hw, ICE_DBG_INIT, "5 layer topology section size is wrong\n");
 +              return -EIO;
 +      }
 +
 +      /* Make sure the section fits in the buffer */
 +      if (offset + size > ICE_PKG_BUF_SIZE) {
 +              ice_debug(hw, ICE_DBG_INIT, "5 layer topology buffer > 4K\n");
 +              return -EIO;
 +      }
 +
 +      /* Get the new topology buffer */
 +      new_topo = ((u8 *)section) + offset;
 +
 +update_topo:
 +      /* Acquire global lock to make sure that set topology issued
 +       * by one PF.
 +       */
 +      status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, ICE_RES_WRITE,
 +                               ICE_GLOBAL_CFG_LOCK_TIMEOUT);
 +      if (status) {
 +              ice_debug(hw, ICE_DBG_INIT, "Failed to acquire global lock\n");
 +              return status;
 +      }
 +
 +      /* Check if reset was triggered already. */
 +      reg = rd32(hw, GLGEN_RSTAT);
 +      if (reg & GLGEN_RSTAT_DEVSTATE_M) {
 +              /* Reset is in progress, re-init the HW again */
 +              ice_debug(hw, ICE_DBG_INIT, "Reset is in progress. Layer topology might be applied already\n");
 +              ice_check_reset(hw);
 +              return 0;
 +      }
 +
 +      /* Set new topology */
 +      status = ice_get_set_tx_topo(hw, new_topo, size, NULL, NULL, true);
 +      if (status) {
 +              ice_debug(hw, ICE_DBG_INIT, "Failed setting Tx topology\n");
 +              return status;
 +      }
 +
 +      /* New topology is updated, delay 1 second before issuing the CORER */
 +      msleep(1000);
 +      ice_reset(hw, ICE_RESET_CORER);
 +      /* CORER will clear the global lock, so no explicit call
 +       * required for release.
 +       */
 +
 +      return 0;
 +}
index 179c0230655a8e77a0950b6baed41994e914ec5a,d7d73295f0dc4b4b2b213db9f83069c91dc3601b..cae46290a7aee28a2d4feb4ab65ec5340dc91bf8
@@@ -110,16 -110,16 +110,16 @@@ static const struct mtk_reg_map mt7986_
        .tx_irq_mask            = 0x461c,
        .tx_irq_status          = 0x4618,
        .pdma = {
-               .rx_ptr         = 0x6100,
-               .rx_cnt_cfg     = 0x6104,
-               .pcrx_ptr       = 0x6108,
-               .glo_cfg        = 0x6204,
-               .rst_idx        = 0x6208,
-               .delay_irq      = 0x620c,
-               .irq_status     = 0x6220,
-               .irq_mask       = 0x6228,
-               .adma_rx_dbg0   = 0x6238,
-               .int_grp        = 0x6250,
+               .rx_ptr         = 0x4100,
+               .rx_cnt_cfg     = 0x4104,
+               .pcrx_ptr       = 0x4108,
+               .glo_cfg        = 0x4204,
+               .rst_idx        = 0x4208,
+               .delay_irq      = 0x420c,
+               .irq_status     = 0x4220,
+               .irq_mask       = 0x4228,
+               .adma_rx_dbg0   = 0x4238,
+               .int_grp        = 0x4250,
        },
        .qdma = {
                .qtx_cfg        = 0x4400,
@@@ -1107,7 -1107,7 +1107,7 @@@ static bool mtk_rx_get_desc(struct mtk_
        rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
        rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
        rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
-       if (mtk_is_netsys_v2_or_greater(eth)) {
+       if (mtk_is_netsys_v3_or_greater(eth)) {
                rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
                rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
        }
@@@ -1139,7 -1139,7 +1139,7 @@@ static int mtk_init_fq_dma(struct mtk_e
                eth->scratch_ring = eth->sram_base;
        else
                eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
-                                                      cnt * soc->txrx.txd_size,
+                                                      cnt * soc->tx.desc_size,
                                                       &eth->phy_scratch_ring,
                                                       GFP_KERNEL);
        if (unlikely(!eth->scratch_ring))
        if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
                return -ENOMEM;
  
-       phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
+       phy_ring_tail = eth->phy_scratch_ring + soc->tx.desc_size * (cnt - 1);
  
        for (i = 0; i < cnt; i++) {
                dma_addr_t addr = dma_addr + i * MTK_QDMA_PAGE_SIZE;
                struct mtk_tx_dma_v2 *txd;
  
-               txd = eth->scratch_ring + i * soc->txrx.txd_size;
+               txd = eth->scratch_ring + i * soc->tx.desc_size;
                txd->txd1 = addr;
                if (i < cnt - 1)
                        txd->txd2 = eth->phy_scratch_ring +
-                                   (i + 1) * soc->txrx.txd_size;
+                                   (i + 1) * soc->tx.desc_size;
  
                txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
                if (MTK_HAS_CAPS(soc->caps, MTK_36BIT_DMA))
@@@ -1416,7 -1416,7 +1416,7 @@@ static int mtk_tx_map(struct sk_buff *s
        if (itxd == ring->last_free)
                return -ENOMEM;
  
-       itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+       itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
        memset(itx_buf, 0, sizeof(*itx_buf));
  
        txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
  
                        memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
                        txd_info.size = min_t(unsigned int, frag_size,
-                                             soc->txrx.dma_max_len);
+                                             soc->tx.dma_max_len);
                        txd_info.qid = queue;
                        txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
                                        !(frag_size - txd_info.size);
                        mtk_tx_set_dma_desc(dev, txd, &txd_info);
  
                        tx_buf = mtk_desc_to_tx_buf(ring, txd,
-                                                   soc->txrx.txd_size);
+                                                   soc->tx.desc_size);
                        if (new_desc)
                                memset(tx_buf, 0, sizeof(*tx_buf));
                        tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
        } else {
                int next_idx;
  
-               next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+               next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->tx.desc_size),
                                         ring->dma_size);
                mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
        }
  
  err_dma:
        do {
-               tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+               tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->tx.desc_size);
  
                /* unmap dma */
                mtk_tx_unmap(eth, tx_buf, NULL, false);
@@@ -1547,7 -1547,7 +1547,7 @@@ static int mtk_cal_txd_req(struct mtk_e
                for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                        frag = &skb_shinfo(skb)->frags[i];
                        nfrags += DIV_ROUND_UP(skb_frag_size(frag),
-                                              eth->soc->txrx.dma_max_len);
+                                              eth->soc->tx.dma_max_len);
                }
        } else {
                nfrags += skb_shinfo(skb)->nr_frags;
@@@ -1654,7 -1654,7 +1654,7 @@@ static struct mtk_rx_ring *mtk_get_rx_r
  
                ring = &eth->rx_ring[i];
                idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
-               rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+               rxd = ring->dma + idx * eth->soc->rx.desc_size;
                if (rxd->rxd2 & RX_DMA_DONE) {
                        ring->calc_idx_update = true;
                        return ring;
@@@ -1710,7 -1710,7 +1710,7 @@@ static struct page_pool *mtk_create_pag
        if (IS_ERR(pp))
                return pp;
  
 -      err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
 +      err = __xdp_rxq_info_reg(xdp_q, eth->dummy_dev, id,
                                 eth->rx_napi.napi_id, PAGE_SIZE);
        if (err < 0)
                goto err_free_pp;
@@@ -1822,7 -1822,7 +1822,7 @@@ static int mtk_xdp_submit_frame(struct 
        }
        htxd = txd;
  
-       tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+       tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->tx.desc_size);
        memset(tx_buf, 0, sizeof(*tx_buf));
        htx_buf = tx_buf;
  
                                goto unmap;
  
                        tx_buf = mtk_desc_to_tx_buf(ring, txd,
-                                                   soc->txrx.txd_size);
+                                                   soc->tx.desc_size);
                        memset(tx_buf, 0, sizeof(*tx_buf));
                        n_desc++;
                }
        } else {
                int idx;
  
-               idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+               idx = txd_to_idx(ring, txd, soc->tx.desc_size);
                mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
                        MT7628_TX_CTX_IDX0);
        }
  
  unmap:
        while (htxd != txd) {
-               tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+               tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->tx.desc_size);
                mtk_tx_unmap(eth, tx_buf, NULL, false);
  
                htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
@@@ -2021,14 -2021,14 +2021,14 @@@ static int mtk_poll_rx(struct napi_stru
                        goto rx_done;
  
                idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
-               rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+               rxd = ring->dma + idx * eth->soc->rx.desc_size;
                data = ring->data[idx];
  
                if (!mtk_rx_get_desc(eth, &trxd, rxd))
                        break;
  
                /* find out which mac the packet come from. values start at 1 */
-               if (mtk_is_netsys_v2_or_greater(eth)) {
+               if (mtk_is_netsys_v3_or_greater(eth)) {
                        u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
  
                        switch (val) {
                skb->dev = netdev;
                bytes += skb->len;
  
-               if (mtk_is_netsys_v2_or_greater(eth)) {
+               if (mtk_is_netsys_v3_or_greater(eth)) {
                        reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
                        hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
                        if (hash != MTK_RXD5_FOE_ENTRY)
                        rxdcsum = &trxd.rxd4;
                }
  
-               if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+               if (*rxdcsum & eth->soc->rx.dma_l4_valid)
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
                else
                        skb_checksum_none_assert(skb);
@@@ -2280,7 -2280,7 +2280,7 @@@ static int mtk_poll_tx_qdma(struct mtk_
                        break;
  
                tx_buf = mtk_desc_to_tx_buf(ring, desc,
-                                           eth->soc->txrx.txd_size);
+                                           eth->soc->tx.desc_size);
                if (!tx_buf->data)
                        break;
  
@@@ -2331,7 -2331,7 +2331,7 @@@ static int mtk_poll_tx_pdma(struct mtk_
                }
                mtk_tx_unmap(eth, tx_buf, &bq, true);
  
-               desc = ring->dma + cpu * eth->soc->txrx.txd_size;
+               desc = ring->dma + cpu * eth->soc->tx.desc_size;
                ring->last_free = desc;
                atomic_inc(&ring->free_count);
  
@@@ -2421,7 -2421,7 +2421,7 @@@ static int mtk_napi_rx(struct napi_stru
        do {
                int rx_done;
  
-               mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+               mtk_w32(eth, eth->soc->rx.irq_done_mask,
                        reg_map->pdma.irq_status);
                rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
                rx_done_total += rx_done;
                        return budget;
  
        } while (mtk_r32(eth, reg_map->pdma.irq_status) &
-                eth->soc->txrx.rx_irq_done_mask);
+                eth->soc->rx.irq_done_mask);
  
        if (napi_complete_done(napi, rx_done_total))
-               mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+               mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
  
        return rx_done_total;
  }
@@@ -2449,7 -2449,7 +2449,7 @@@ static int mtk_tx_alloc(struct mtk_eth 
  {
        const struct mtk_soc_data *soc = eth->soc;
        struct mtk_tx_ring *ring = &eth->tx_ring;
-       int i, sz = soc->txrx.txd_size;
+       int i, sz = soc->tx.desc_size;
        struct mtk_tx_dma_v2 *txd;
        int ring_size;
        u32 ofs, val;
@@@ -2572,14 -2572,14 +2572,14 @@@ static void mtk_tx_clean(struct mtk_et
        }
        if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
                dma_free_coherent(eth->dma_dev,
-                                 ring->dma_size * soc->txrx.txd_size,
+                                 ring->dma_size * soc->tx.desc_size,
                                  ring->dma, ring->phys);
                ring->dma = NULL;
        }
  
        if (ring->dma_pdma) {
                dma_free_coherent(eth->dma_dev,
-                                 ring->dma_size * soc->txrx.txd_size,
+                                 ring->dma_size * soc->tx.desc_size,
                                  ring->dma_pdma, ring->phys_pdma);
                ring->dma_pdma = NULL;
        }
@@@ -2634,15 -2634,15 +2634,15 @@@ static int mtk_rx_alloc(struct mtk_eth 
        if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
            rx_flag != MTK_RX_FLAGS_NORMAL) {
                ring->dma = dma_alloc_coherent(eth->dma_dev,
-                                              rx_dma_size * eth->soc->txrx.rxd_size,
-                                              &ring->phys, GFP_KERNEL);
+                               rx_dma_size * eth->soc->rx.desc_size,
+                               &ring->phys, GFP_KERNEL);
        } else {
                struct mtk_tx_ring *tx_ring = &eth->tx_ring;
  
                ring->dma = tx_ring->dma + tx_ring_size *
-                           eth->soc->txrx.txd_size * (ring_no + 1);
+                           eth->soc->tx.desc_size * (ring_no + 1);
                ring->phys = tx_ring->phys + tx_ring_size *
-                            eth->soc->txrx.txd_size * (ring_no + 1);
+                            eth->soc->tx.desc_size * (ring_no + 1);
        }
  
        if (!ring->dma)
                dma_addr_t dma_addr;
                void *data;
  
-               rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+               rxd = ring->dma + i * eth->soc->rx.desc_size;
                if (ring->page_pool) {
                        data = mtk_page_pool_get_buff(ring->page_pool,
                                                      &dma_addr, GFP_KERNEL);
  
                rxd->rxd3 = 0;
                rxd->rxd4 = 0;
-               if (mtk_is_netsys_v2_or_greater(eth)) {
+               if (mtk_is_netsys_v3_or_greater(eth)) {
                        rxd->rxd5 = 0;
                        rxd->rxd6 = 0;
                        rxd->rxd7 = 0;
@@@ -2744,7 -2744,7 +2744,7 @@@ static void mtk_rx_clean(struct mtk_et
                        if (!ring->data[i])
                                continue;
  
-                       rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+                       rxd = ring->dma + i * eth->soc->rx.desc_size;
                        if (!rxd->rxd1)
                                continue;
  
  
        if (!in_sram && ring->dma) {
                dma_free_coherent(eth->dma_dev,
-                                 ring->dma_size * eth->soc->txrx.rxd_size,
+                                 ring->dma_size * eth->soc->rx.desc_size,
                                  ring->dma, ring->phys);
                ring->dma = NULL;
        }
@@@ -3124,7 -3124,7 +3124,7 @@@ static void mtk_dma_free(struct mtk_et
                        netdev_reset_queue(eth->netdev[i]);
        if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
                dma_free_coherent(eth->dma_dev,
-                                 MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
+                                 MTK_QDMA_RING_SIZE * soc->tx.desc_size,
                                  eth->scratch_ring, eth->phy_scratch_ring);
                eth->scratch_ring = NULL;
                eth->phy_scratch_ring = 0;
@@@ -3174,7 -3174,7 +3174,7 @@@ static irqreturn_t mtk_handle_irq_rx(in
  
        eth->rx_events++;
        if (likely(napi_schedule_prep(&eth->rx_napi))) {
-               mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+               mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
                __napi_schedule(&eth->rx_napi);
        }
  
@@@ -3200,9 -3200,9 +3200,9 @@@ static irqreturn_t mtk_handle_irq(int i
        const struct mtk_reg_map *reg_map = eth->soc->reg_map;
  
        if (mtk_r32(eth, reg_map->pdma.irq_mask) &
-           eth->soc->txrx.rx_irq_done_mask) {
+           eth->soc->rx.irq_done_mask) {
                if (mtk_r32(eth, reg_map->pdma.irq_status) &
-                   eth->soc->txrx.rx_irq_done_mask)
+                   eth->soc->rx.irq_done_mask)
                        mtk_handle_irq_rx(irq, _eth);
        }
        if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
@@@ -3220,10 -3220,10 +3220,10 @@@ static void mtk_poll_controller(struct 
        struct mtk_eth *eth = mac->hw;
  
        mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
-       mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+       mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
        mtk_handle_irq_rx(eth->irq[2], dev);
        mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
-       mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+       mtk_rx_irq_enable(eth, eth->soc->rx.irq_done_mask);
  }
  #endif
  
@@@ -3387,7 -3387,7 +3387,7 @@@ static int mtk_open(struct net_device *
                napi_enable(&eth->tx_napi);
                napi_enable(&eth->rx_napi);
                mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
-               mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
+               mtk_rx_irq_enable(eth, soc->rx.irq_done_mask);
                refcount_set(&eth->dma_refcnt, 1);
        }
        else
@@@ -3471,7 -3471,7 +3471,7 @@@ static int mtk_stop(struct net_device *
        mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
  
        mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
-       mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+       mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask);
        napi_disable(&eth->tx_napi);
        napi_disable(&eth->rx_napi);
  
@@@ -3893,7 -3893,7 +3893,7 @@@ static int mtk_hw_init(struct mtk_eth *
        else
                mtk_hw_reset(eth);
  
-       if (mtk_is_netsys_v2_or_greater(eth)) {
+       if (mtk_is_netsys_v3_or_greater(eth)) {
                /* Set FE to PDMAv2 if necessary */
                val = mtk_r32(eth, MTK_FE_GLO_MISC);
                mtk_w32(eth,  val | BIT(4), MTK_FE_GLO_MISC);
  
        /* FE int grouping */
        mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
-       mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+       mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->pdma.int_grp + 4);
        mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
-       mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
+       mtk_w32(eth, eth->soc->rx.irq_done_mask, reg_map->qdma.int_grp + 4);
        mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
  
        if (mtk_is_netsys_v3_or_greater(eth)) {
@@@ -4055,7 -4055,7 +4055,7 @@@ static int mtk_change_mtu(struct net_de
        }
  
        mtk_set_mcr_max_rx(mac, length);
 -      dev->mtu = new_mtu;
 +      WRITE_ONCE(dev->mtu, new_mtu);
  
        return 0;
  }
@@@ -4188,8 -4188,6 +4188,8 @@@ static int mtk_free_dev(struct mtk_eth 
                metadata_dst_free(eth->dsa_meta[i]);
        }
  
 +      free_netdev(eth->dummy_dev);
 +
        return 0;
  }
  
@@@ -4985,14 -4983,9 +4985,14 @@@ static int mtk_probe(struct platform_de
        /* we run 2 devices on the same DMA ring so we need a dummy device
         * for NAPI to work
         */
 -      init_dummy_netdev(&eth->dummy_dev);
 -      netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
 -      netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
 +      eth->dummy_dev = alloc_netdev_dummy(0);
 +      if (!eth->dummy_dev) {
 +              err = -ENOMEM;
 +              dev_err(eth->dev, "failed to allocated dummy device\n");
 +              goto err_unreg_netdev;
 +      }
 +      netif_napi_add(eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
 +      netif_napi_add(eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
  
        platform_set_drvdata(pdev, eth);
        schedule_delayed_work(&eth->reset.monitor_work,
  
        return 0;
  
 +err_unreg_netdev:
 +      mtk_unreg_dev(eth);
  err_deinit_ppe:
        mtk_ppe_deinit(eth);
        mtk_mdio_cleanup(eth);
@@@ -5048,11 -5039,15 +5048,15 @@@ static const struct mtk_soc_data mt2701
        .required_clks = MT7623_CLKS_BITMAP,
        .required_pctl = true,
        .version = 1,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma),
-               .rxd_size = sizeof(struct mtk_rx_dma),
-               .rx_irq_done_mask = MTK_RX_DONE_INT,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma),
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma),
+               .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@@ -5068,11 -5063,15 +5072,15 @@@ static const struct mtk_soc_data mt7621
        .offload_version = 1,
        .hash_offset = 2,
        .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma),
-               .rxd_size = sizeof(struct mtk_rx_dma),
-               .rx_irq_done_mask = MTK_RX_DONE_INT,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma),
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma),
+               .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@@ -5090,11 -5089,15 +5098,15 @@@ static const struct mtk_soc_data mt7622
        .hash_offset = 2,
        .has_accounting = true,
        .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma),
-               .rxd_size = sizeof(struct mtk_rx_dma),
-               .rx_irq_done_mask = MTK_RX_DONE_INT,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma),
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma),
+               .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@@ -5111,11 -5114,15 +5123,15 @@@ static const struct mtk_soc_data mt7623
        .hash_offset = 2,
        .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
        .disable_pll_modes = true,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma),
-               .rxd_size = sizeof(struct mtk_rx_dma),
-               .rx_irq_done_mask = MTK_RX_DONE_INT,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma),
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma),
+               .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@@ -5130,11 -5137,15 +5146,15 @@@ static const struct mtk_soc_data mt7629
        .required_pctl = false,
        .has_accounting = true,
        .version = 1,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma),
-               .rxd_size = sizeof(struct mtk_rx_dma),
-               .rx_irq_done_mask = MTK_RX_DONE_INT,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma),
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma),
+               .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
@@@ -5152,14 -5163,18 +5172,18 @@@ static const struct mtk_soc_data mt7981
        .hash_offset = 4,
        .has_accounting = true,
        .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma_v2),
-               .rxd_size = sizeof(struct mtk_rx_dma_v2),
-               .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma_v2),
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
        },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma),
+               .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID_V2,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
  };
  
  static const struct mtk_soc_data mt7986_data = {
        .hash_offset = 4,
        .has_accounting = true,
        .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma_v2),
-               .rxd_size = sizeof(struct mtk_rx_dma_v2),
-               .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma_v2),
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
        },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma),
+               .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID_V2,
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
  };
  
  static const struct mtk_soc_data mt7988_data = {
        .hash_offset = 4,
        .has_accounting = true,
        .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma_v2),
-               .rxd_size = sizeof(struct mtk_rx_dma_v2),
-               .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma_v2),
+               .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+               .dma_len_offset = 8,
+       },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma_v2),
+               .irq_done_mask = MTK_RX_DONE_INT_V2,
+               .dma_l4_valid = RX_DMA_L4_VALID_V2,
                .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
                .dma_len_offset = 8,
        },
@@@ -5213,11 -5236,15 +5245,15 @@@ static const struct mtk_soc_data rt5350
        .required_clks = MT7628_CLKS_BITMAP,
        .required_pctl = false,
        .version = 1,
-       .txrx = {
-               .txd_size = sizeof(struct mtk_tx_dma),
-               .rxd_size = sizeof(struct mtk_rx_dma),
-               .rx_irq_done_mask = MTK_RX_DONE_INT,
-               .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+       .tx = {
+               .desc_size = sizeof(struct mtk_tx_dma),
+               .dma_max_len = MTK_TX_DMA_BUF_LEN,
+               .dma_len_offset = 16,
+       },
+       .rx = {
+               .desc_size = sizeof(struct mtk_rx_dma),
+               .irq_done_mask = MTK_RX_DONE_INT,
+               .dma_l4_valid = RX_DMA_L4_VALID_PDMA,
                .dma_max_len = MTK_TX_DMA_BUF_LEN,
                .dma_len_offset = 16,
        },
index 723fc637027c42fc7f71c7e35548edc94075c370,39b50de1decbffecd2cead3662959cc57e9ae6ea..4eab30b44070633a2357f8517a6ad1e5c8340149
  /* QDMA descriptor txd3 */
  #define TX_DMA_OWNER_CPU      BIT(31)
  #define TX_DMA_LS0            BIT(30)
- #define TX_DMA_PLEN0(x)               (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
- #define TX_DMA_PLEN1(x)               ((x) & eth->soc->txrx.dma_max_len)
+ #define TX_DMA_PLEN0(x)               (((x) & eth->soc->tx.dma_max_len) << eth->soc->tx.dma_len_offset)
+ #define TX_DMA_PLEN1(x)               ((x) & eth->soc->tx.dma_max_len)
  #define TX_DMA_SWC            BIT(14)
  #define TX_DMA_PQID           GENMASK(3, 0)
  #define TX_DMA_ADDR64_MASK    GENMASK(3, 0)
  /* QDMA descriptor rxd2 */
  #define RX_DMA_DONE           BIT(31)
  #define RX_DMA_LSO            BIT(30)
- #define RX_DMA_PREP_PLEN0(x)  (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
- #define RX_DMA_GET_PLEN0(x)   (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
+ #define RX_DMA_PREP_PLEN0(x)  (((x) & eth->soc->rx.dma_max_len) << eth->soc->rx.dma_len_offset)
+ #define RX_DMA_GET_PLEN0(x)   (((x) >> eth->soc->rx.dma_len_offset) & eth->soc->rx.dma_max_len)
  #define RX_DMA_VTAG           BIT(15)
  #define RX_DMA_ADDR64_MASK    GENMASK(3, 0)
  #if IS_ENABLED(CONFIG_64BIT)
@@@ -1153,10 -1153,9 +1153,9 @@@ struct mtk_reg_map 
   * @foe_entry_size            Foe table entry size.
   * @has_accounting            Bool indicating support for accounting of
   *                            offloaded flows.
-  * @txd_size                  Tx DMA descriptor size.
-  * @rxd_size                  Rx DMA descriptor size.
-  * @rx_irq_done_mask          Rx irq done register mask.
-  * @rx_dma_l4_valid           Rx DMA valid register mask.
+  * @desc_size                 Tx/Rx DMA descriptor size.
+  * @irq_done_mask             Rx irq done register mask.
+  * @dma_l4_valid              Rx DMA valid register mask.
   * @dma_max_len                       Max DMA tx/rx buffer length.
   * @dma_len_offset            Tx/Rx DMA length field offset.
   */
@@@ -1174,13 -1173,17 +1173,17 @@@ struct mtk_soc_data 
        bool            has_accounting;
        bool            disable_pll_modes;
        struct {
-               u32     txd_size;
-               u32     rxd_size;
-               u32     rx_irq_done_mask;
-               u32     rx_dma_l4_valid;
+               u32     desc_size;
                u32     dma_max_len;
                u32     dma_len_offset;
-       } txrx;
+       } tx;
+       struct {
+               u32     desc_size;
+               u32     irq_done_mask;
+               u32     dma_l4_valid;
+               u32     dma_max_len;
+               u32     dma_len_offset;
+       } rx;
  };
  
  #define MTK_DMA_MONITOR_TIMEOUT               msecs_to_jiffies(1000)
@@@ -1242,7 -1245,7 +1245,7 @@@ struct mtk_eth 
        spinlock_t                      page_lock;
        spinlock_t                      tx_irq_lock;
        spinlock_t                      rx_irq_lock;
 -      struct net_device               dummy_dev;
 +      struct net_device               *dummy_dev;
        struct net_device               *netdev[MTK_MAX_DEVS];
        struct mtk_mac                  *mac[MTK_MAX_DEVS];
        int                             irq[3];
index 0a3d1999ede5d1fc719453b28e7e759f7db18670,64497b6eebd36e801cada2a35e71c127990bb22d..b758bc72ac36bb61b40da488659368548c825005
@@@ -30,7 -30,6 +30,7 @@@
   * SOFTWARE.
   */
  
 +#include <linux/dim.h>
  #include <net/tc_act/tc_gact.h>
  #include <linux/mlx5/fs.h>
  #include <net/vxlan.h>
@@@ -44,7 -43,6 +44,7 @@@
  #include <net/xdp_sock_drv.h>
  #include "eswitch.h"
  #include "en.h"
 +#include "en/dim.h"
  #include "en/txrx.h"
  #include "en_tc.h"
  #include "en_rep.h"
@@@ -962,6 -960,17 +962,6 @@@ static int mlx5e_alloc_rq(struct mlx5e_
                }
        }
  
 -      INIT_WORK(&rq->dim.work, mlx5e_rx_dim_work);
 -
 -      switch (params->rx_cq_moderation.cq_period_mode) {
 -      case MLX5_CQ_PERIOD_MODE_START_FROM_CQE:
 -              rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE;
 -              break;
 -      case MLX5_CQ_PERIOD_MODE_START_FROM_EQE:
 -      default:
 -              rq->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
 -      }
 -
        return 0;
  
  err_destroy_page_pool:
@@@ -1011,7 -1020,6 +1011,7 @@@ static void mlx5e_free_rq(struct mlx5e_
                mlx5e_free_wqe_alloc_info(rq);
        }
  
 +      kvfree(rq->dim);
        xdp_rxq_info_unreg(&rq->xdp_rxq);
        page_pool_destroy(rq->page_pool);
        mlx5_wq_destroy(&rq->wq_ctrl);
@@@ -1292,21 -1300,8 +1292,21 @@@ int mlx5e_open_rq(struct mlx5e_params *
        if (MLX5_CAP_ETH(mdev, cqe_checksum_full))
                __set_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state);
  
 -      if (params->rx_dim_enabled)
 -              __set_bit(MLX5E_RQ_STATE_DIM, &rq->state);
 +      if (rq->channel && !params->rx_dim_enabled) {
 +              rq->channel->rx_cq_moder = params->rx_cq_moderation;
 +      } else if (rq->channel) {
 +              u8 cq_period_mode;
 +
 +              cq_period_mode = params->rx_moder_use_cqe_mode ?
 +                                       DIM_CQ_PERIOD_MODE_START_FROM_CQE :
 +                                       DIM_CQ_PERIOD_MODE_START_FROM_EQE;
 +              mlx5e_reset_rx_moderation(&rq->channel->rx_cq_moder, cq_period_mode,
 +                                        params->rx_dim_enabled);
 +
 +              err = mlx5e_dim_rx_change(rq, params->rx_dim_enabled);
 +              if (err)
 +                      goto err_destroy_rq;
 +      }
  
        /* We disable csum_complete when XDP is enabled since
         * XDP programs might manipulate packets which will render
@@@ -1352,8 -1347,7 +1352,8 @@@ void mlx5e_deactivate_rq(struct mlx5e_r
  
  void mlx5e_close_rq(struct mlx5e_rq *rq)
  {
 -      cancel_work_sync(&rq->dim.work);
 +      if (rq->dim)
 +              cancel_work_sync(&rq->dim->work);
        cancel_work_sync(&rq->recover_work);
        mlx5e_destroy_rq(rq);
        mlx5e_free_rx_descs(rq);
@@@ -1629,6 -1623,9 +1629,6 @@@ static int mlx5e_alloc_txqsq(struct mlx
        if (err)
                goto err_sq_wq_destroy;
  
 -      INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
 -      sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
 -
        return 0;
  
  err_sq_wq_destroy:
  
  void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
  {
 +      kvfree(sq->dim);
        mlx5e_free_txqsq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
  }
@@@ -1795,27 -1791,11 +1795,27 @@@ int mlx5e_open_txqsq(struct mlx5e_chann
        if (tx_rate)
                mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
  
 -      if (params->tx_dim_enabled)
 -              sq->state |= BIT(MLX5E_SQ_STATE_DIM);
 +      if (sq->channel && !params->tx_dim_enabled) {
 +              sq->channel->tx_cq_moder = params->tx_cq_moderation;
 +      } else if (sq->channel) {
 +              u8 cq_period_mode;
 +
 +              cq_period_mode = params->tx_moder_use_cqe_mode ?
 +                                       DIM_CQ_PERIOD_MODE_START_FROM_CQE :
 +                                       DIM_CQ_PERIOD_MODE_START_FROM_EQE;
 +              mlx5e_reset_tx_moderation(&sq->channel->tx_cq_moder,
 +                                        cq_period_mode,
 +                                        params->tx_dim_enabled);
 +
 +              err = mlx5e_dim_tx_change(sq, params->tx_dim_enabled);
 +              if (err)
 +                      goto err_destroy_sq;
 +      }
  
        return 0;
  
 +err_destroy_sq:
 +      mlx5e_destroy_sq(c->mdev, sq->sqn);
  err_free_txqsq:
        mlx5e_free_txqsq(sq);
  
@@@ -1867,8 -1847,7 +1867,8 @@@ void mlx5e_close_txqsq(struct mlx5e_txq
        struct mlx5_core_dev *mdev = sq->mdev;
        struct mlx5_rate_limit rl = {0};
  
 -      cancel_work_sync(&sq->dim.work);
 +      if (sq->dim)
 +              cancel_work_sync(&sq->dim->work);
        cancel_work_sync(&sq->recover_work);
        mlx5e_destroy_sq(mdev, sq->sqn);
        if (sq->rate_limit) {
@@@ -1887,49 -1866,6 +1887,49 @@@ void mlx5e_tx_err_cqe_work(struct work_
        mlx5e_reporter_tx_err_cqe(sq);
  }
  
 +static struct dim_cq_moder mlx5e_get_def_tx_moderation(u8 cq_period_mode)
 +{
 +      return (struct dim_cq_moder) {
 +              .cq_period_mode = cq_period_mode,
 +              .pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS,
 +              .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
 +                              MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE :
 +                              MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC,
 +      };
 +}
 +
 +bool mlx5e_reset_tx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
 +                             bool dim_enabled)
 +{
 +      bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
 +
 +      if (dim_enabled)
 +              *cq_moder = net_dim_get_def_tx_moderation(cq_period_mode);
 +      else
 +              *cq_moder = mlx5e_get_def_tx_moderation(cq_period_mode);
 +
 +      return reset_needed;
 +}
 +
 +bool mlx5e_reset_tx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
 +                                      bool dim_enabled, bool keep_dim_state)
 +{
 +      bool reset = false;
 +      int i, tc;
 +
 +      for (i = 0; i < chs->num; i++) {
 +              for (tc = 0; tc < mlx5e_get_dcb_num_tc(&chs->params); tc++) {
 +                      if (keep_dim_state)
 +                              dim_enabled = !!chs->c[i]->sq[tc].dim;
 +
 +                      reset |= mlx5e_reset_tx_moderation(&chs->c[i]->tx_cq_moder,
 +                                                         cq_period_mode, dim_enabled);
 +              }
 +      }
 +
 +      return reset;
 +}
 +
  static int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
                            struct mlx5e_sq_param *param, struct mlx5e_icosq *sq,
                            work_func_t recover_work_func)
@@@ -2153,8 -2089,7 +2153,8 @@@ static int mlx5e_create_cq(struct mlx5e
        mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
                                  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
  
 -      MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
 +      MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(param->cq_period_mode));
 +
        MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
@@@ -2192,10 -2127,8 +2192,10 @@@ int mlx5e_open_cq(struct mlx5_core_dev 
        if (err)
                goto err_free_cq;
  
 -      if (MLX5_CAP_GEN(mdev, cq_moderation))
 -              mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
 +      if (MLX5_CAP_GEN(mdev, cq_moderation) &&
 +          MLX5_CAP_GEN(mdev, cq_period_mode_modify))
 +              mlx5e_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts,
 +                                         mlx5e_cq_period_mode(moder.cq_period_mode));
        return 0;
  
  err_free_cq:
@@@ -2210,40 -2143,6 +2210,40 @@@ void mlx5e_close_cq(struct mlx5e_cq *cq
        mlx5e_free_cq(cq);
  }
  
 +int mlx5e_modify_cq_period_mode(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
 +                              u8 cq_period_mode)
 +{
 +      u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
 +      void *cqc;
 +
 +      MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
 +      cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
 +      MLX5_SET(cqc, cqc, cq_period_mode, mlx5e_cq_period_mode(cq_period_mode));
 +      MLX5_SET(modify_cq_in, in,
 +               modify_field_select_resize_field_select.modify_field_select.modify_field_select,
 +               MLX5_CQ_MODIFY_PERIOD_MODE);
 +
 +      return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
 +}
 +
 +int mlx5e_modify_cq_moderation(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
 +                             u16 cq_period, u16 cq_max_count, u8 cq_period_mode)
 +{
 +      u32 in[MLX5_ST_SZ_DW(modify_cq_in)] = {};
 +      void *cqc;
 +
 +      MLX5_SET(modify_cq_in, in, cqn, cq->cqn);
 +      cqc = MLX5_ADDR_OF(modify_cq_in, in, cq_context);
 +      MLX5_SET(cqc, cqc, cq_period, cq_period);
 +      MLX5_SET(cqc, cqc, cq_max_count, cq_max_count);
 +      MLX5_SET(cqc, cqc, cq_period_mode, cq_period_mode);
 +      MLX5_SET(modify_cq_in, in,
 +               modify_field_select_resize_field_select.modify_field_select.modify_field_select,
 +               MLX5_CQ_MODIFY_PERIOD | MLX5_CQ_MODIFY_COUNT | MLX5_CQ_MODIFY_PERIOD_MODE);
 +
 +      return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
 +}
 +
  static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
                             struct mlx5e_params *params,
                             struct mlx5e_create_cq_param *ccp,
@@@ -3002,28 -2901,7 +3002,28 @@@ int mlx5e_update_tx_netdev_queues(struc
        return err;
  }
  
 -static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
 +static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
 +                                         struct mlx5e_params *params)
 +{
 +      struct mlx5_core_dev *mdev = priv->mdev;
 +      int num_comp_vectors, ix, irq;
 +
 +      num_comp_vectors = mlx5_comp_vectors_max(mdev);
 +
 +      for (ix = 0; ix < params->num_channels; ix++) {
 +              cpumask_clear(priv->scratchpad.cpumask);
 +
 +              for (irq = ix; irq < num_comp_vectors; irq += params->num_channels) {
 +                      int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
 +
 +                      cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
 +              }
 +
 +              netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
 +      }
 +}
 +
 +static int mlx5e_update_tc_and_tx_queues(struct mlx5e_priv *priv)
  {
        struct netdev_tc_txq old_tc_to_txq[TC_MAX_QUEUE], *tc_to_txq;
        struct net_device *netdev = priv->netdev;
        err = mlx5e_update_tx_netdev_queues(priv);
        if (err)
                goto err_tcs;
 -      err = netif_set_real_num_rx_queues(netdev, nch);
 -      if (err) {
 -              netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
 -              goto err_txqs;
 -      }
 +      mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
  
        return 0;
  
 -err_txqs:
 -      /* netif_set_real_num_rx_queues could fail only when nch increased. Only
 -       * one of nch and ntc is changed in this function. That means, the call
 -       * to netif_set_real_num_tx_queues below should not fail, because it
 -       * decreases the number of TX queues.
 -       */
 -      WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs));
 -
  err_tcs:
        WARN_ON_ONCE(mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc,
                                          old_tc_to_txq));
@@@ -3058,32 -2948,42 +3058,32 @@@ err_out
        return err;
  }
  
 -static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_netdev_queues);
 -
 -static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
 -                                         struct mlx5e_params *params)
 -{
 -      int ix;
 -
 -      for (ix = 0; ix < params->num_channels; ix++) {
 -              int num_comp_vectors, irq, vec_ix;
 -              struct mlx5_core_dev *mdev;
 -
 -              mdev = mlx5_sd_ch_ix_get_dev(priv->mdev, ix);
 -              num_comp_vectors = mlx5_comp_vectors_max(mdev);
 -              cpumask_clear(priv->scratchpad.cpumask);
 -              vec_ix = mlx5_sd_ch_ix_get_vec_ix(mdev, ix);
 -
 -              for (irq = vec_ix; irq < num_comp_vectors; irq += params->num_channels) {
 -                      int cpu = mlx5_comp_vector_get_cpu(mdev, irq);
 -
 -                      cpumask_set_cpu(cpu, priv->scratchpad.cpumask);
 -              }
 -
 -              netif_set_xps_queue(priv->netdev, priv->scratchpad.cpumask, ix);
 -      }
 -}
 +MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_update_tc_and_tx_queues);
  
  static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
  {
        u16 count = priv->channels.params.num_channels;
 +      struct net_device *netdev = priv->netdev;
 +      int old_num_rxqs;
        int err;
  
 -      err = mlx5e_update_netdev_queues(priv);
 -      if (err)
 +      old_num_rxqs = netdev->real_num_rx_queues;
 +      err = netif_set_real_num_rx_queues(netdev, count);
 +      if (err) {
 +              netdev_warn(netdev, "%s: netif_set_real_num_rx_queues failed, %d\n",
 +                          __func__, err);
                return err;
 -
 -      mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params);
 +      }
 +      err = mlx5e_update_tc_and_tx_queues(priv);
 +      if (err) {
 +              /* mlx5e_update_tc_and_tx_queues can fail if channels or TCs number increases.
 +               * Since channel number changed, it increased. That means, the call to
 +               * netif_set_real_num_rx_queues below should not fail, because it
 +               * decreases the number of RX queues.
 +               */
 +              WARN_ON_ONCE(netif_set_real_num_rx_queues(netdev, old_num_rxqs));
 +              return err;
 +      }
  
        /* This function may be called on attach, before priv->rx_res is created. */
        if (priv->rx_res) {
@@@ -3616,7 -3516,7 +3616,7 @@@ static int mlx5e_setup_tc_mqprio_dcb(st
        mlx5e_params_mqprio_dcb_set(&new_params, tc ? tc : 1);
  
        err = mlx5e_safe_switch_params(priv, &new_params,
 -                                     mlx5e_num_channels_changed_ctx, NULL, true);
 +                                     mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
  
        if (!err && priv->mqprio_rl) {
                mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
@@@ -3717,8 -3617,10 +3717,8 @@@ static struct mlx5e_mqprio_rl *mlx5e_mq
  static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
                                         struct tc_mqprio_qopt_offload *mqprio)
  {
 -      mlx5e_fp_preactivate preactivate;
        struct mlx5e_params new_params;
        struct mlx5e_mqprio_rl *rl;
 -      bool nch_changed;
        int err;
  
        err = mlx5e_mqprio_channel_validate(priv, mqprio);
        new_params = priv->channels.params;
        mlx5e_params_mqprio_channel_set(&new_params, mqprio, rl);
  
 -      nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
 -      preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
 -              mlx5e_update_netdev_queues_ctx;
 -      err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
 +      err = mlx5e_safe_switch_params(priv, &new_params,
 +                                     mlx5e_update_tc_and_tx_queues_ctx, NULL, true);
        if (err) {
                if (rl) {
                        mlx5e_mqprio_rl_cleanup(rl);
@@@ -4056,47 -3960,6 +4056,47 @@@ static int set_feature_rx_all(struct ne
        return mlx5_set_port_fcs(mdev, !enable);
  }
  
 +static struct dim_cq_moder mlx5e_get_def_rx_moderation(u8 cq_period_mode)
 +{
 +      return (struct dim_cq_moder) {
 +              .cq_period_mode = cq_period_mode,
 +              .pkts = MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS,
 +              .usec = cq_period_mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE ?
 +                              MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE :
 +                              MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC,
 +      };
 +}
 +
 +bool mlx5e_reset_rx_moderation(struct dim_cq_moder *cq_moder, u8 cq_period_mode,
 +                             bool dim_enabled)
 +{
 +      bool reset_needed = cq_moder->cq_period_mode != cq_period_mode;
 +
 +      if (dim_enabled)
 +              *cq_moder = net_dim_get_def_rx_moderation(cq_period_mode);
 +      else
 +              *cq_moder = mlx5e_get_def_rx_moderation(cq_period_mode);
 +
 +      return reset_needed;
 +}
 +
 +bool mlx5e_reset_rx_channels_moderation(struct mlx5e_channels *chs, u8 cq_period_mode,
 +                                      bool dim_enabled, bool keep_dim_state)
 +{
 +      bool reset = false;
 +      int i;
 +
 +      for (i = 0; i < chs->num; i++) {
 +              if (keep_dim_state)
 +                      dim_enabled = !!chs->c[i]->rq.dim;
 +
 +              reset |= mlx5e_reset_rx_moderation(&chs->c[i]->rx_cq_moder,
 +                                                 cq_period_mode, dim_enabled);
 +      }
 +
 +      return reset;
 +}
 +
  static int mlx5e_set_rx_port_ts(struct mlx5_core_dev *mdev, bool enable)
  {
        u32 in[MLX5_ST_SZ_DW(pcmr_reg)] = {};
@@@ -4520,7 -4383,7 +4520,7 @@@ int mlx5e_change_mtu(struct net_device 
        err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset);
  
  out:
 -      netdev->mtu = params->sw_mtu;
 +      WRITE_ONCE(netdev->mtu, params->sw_mtu);
        mutex_unlock(&priv->state_lock);
        return err;
  }
@@@ -5087,7 -4950,10 +5087,7 @@@ static int mlx5e_bridge_setlink(struct 
        if (!br_spec)
                return -EINVAL;
  
 -      nla_for_each_nested(attr, br_spec, rem) {
 -              if (nla_type(attr) != IFLA_BRIDGE_MODE)
 -                      continue;
 -
 +      nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) {
                mode = nla_get_u16(attr);
                if (mode > BRIDGE_MODE_VEPA)
                        return -EINVAL;
@@@ -5161,6 -5027,7 +5161,6 @@@ void mlx5e_build_nic_params(struct mlx5
  {
        struct mlx5e_params *params = &priv->channels.params;
        struct mlx5_core_dev *mdev = priv->mdev;
 -      u8 rx_cq_period_mode;
  
        params->sw_mtu = mtu;
        params->hard_mtu = MLX5E_ETH_HARD_MTU;
        params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
  
        /* CQ moderation params */
 -      rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
 -                      MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
 -                      MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
 -      params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
 -      params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
 -      mlx5e_set_rx_cq_mode_params(params, rx_cq_period_mode);
 -      mlx5e_set_tx_cq_mode_params(params, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
 +      params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
 +                               MLX5_CAP_GEN(mdev, cq_period_mode_modify);
 +      params->tx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation) &&
 +                               MLX5_CAP_GEN(mdev, cq_period_mode_modify);
 +      params->rx_moder_use_cqe_mode = !!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe);
 +      params->tx_moder_use_cqe_mode = false;
 +      mlx5e_reset_rx_moderation(&params->rx_cq_moderation, params->rx_moder_use_cqe_mode,
 +                                params->rx_dim_enabled);
 +      mlx5e_reset_tx_moderation(&params->tx_cq_moderation, params->tx_moder_use_cqe_mode,
 +                                params->tx_dim_enabled);
  
        /* TX inline */
        mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
@@@ -5701,7 -5565,7 +5701,7 @@@ static void mlx5e_nic_disable(struct ml
        mlx5e_ipsec_cleanup(priv);
  }
  
 -int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
 +static int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
  {
        return mlx5e_refresh_tirs(priv, false, false);
  }
@@@ -6194,7 -6058,7 +6194,7 @@@ static int mlx5e_resume(struct auxiliar
        return 0;
  }
  
- static int _mlx5e_suspend(struct auxiliary_device *adev)
+ static int _mlx5e_suspend(struct auxiliary_device *adev, bool pre_netdev_reg)
  {
        struct mlx5e_dev *mlx5e_dev = auxiliary_get_drvdata(adev);
        struct mlx5e_priv *priv = mlx5e_dev->priv;
        struct mlx5_core_dev *pos;
        int i;
  
-       if (!netif_device_present(netdev)) {
+       if (!pre_netdev_reg && !netif_device_present(netdev)) {
                if (test_bit(MLX5E_STATE_DESTROYING, &priv->state))
                        mlx5_sd_for_each_dev(i, mdev, pos)
                                mlx5e_destroy_mdev_resources(pos);
@@@ -6226,7 -6090,7 +6226,7 @@@ static int mlx5e_suspend(struct auxilia
  
        actual_adev = mlx5_sd_get_adev(mdev, adev, edev->idx);
        if (actual_adev)
-               err = _mlx5e_suspend(actual_adev);
+               err = _mlx5e_suspend(actual_adev, false);
  
        mlx5_sd_cleanup(mdev);
        return err;
@@@ -6293,7 -6157,7 +6293,7 @@@ static int _mlx5e_probe(struct auxiliar
        return 0;
  
  err_resume:
-       _mlx5e_suspend(adev);
+       _mlx5e_suspend(adev, true);
  err_profile_cleanup:
        profile->cleanup(priv);
  err_destroy_netdev:
@@@ -6333,7 -6197,7 +6333,7 @@@ static void _mlx5e_remove(struct auxili
        mlx5_core_uplink_netdev_set(mdev, NULL);
        mlx5e_dcbnl_delete_app(priv);
        unregister_netdev(priv->netdev);
-       _mlx5e_suspend(adev);
+       _mlx5e_suspend(adev, false);
        priv->profile->cleanup(priv);
        mlx5e_destroy_netdev(priv);
        mlx5e_devlink_port_unregister(mlx5e_dev);
index 50ce1ea20dd493a40ffb07c8b9be697148896c65,ef55674876cb48be6e034b60df6046f54d4b6c21..88745dc6aed5b7c6f84cf574e90cef4c4e00ae63
@@@ -573,13 -573,6 +573,13 @@@ int mlx5_devlink_port_fn_ipsec_packet_g
  int mlx5_devlink_port_fn_ipsec_packet_set(struct devlink_port *port, bool enable,
                                          struct netlink_ext_ack *extack);
  #endif /* CONFIG_XFRM_OFFLOAD */
 +int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port,
 +                                      u32 *max_io_eqs,
 +                                      struct netlink_ext_ack *extack);
 +int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port,
 +                                      u32 max_io_eqs,
 +                                      struct netlink_ext_ack *extack);
 +
  void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
  
  int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
@@@ -840,7 -833,7 +840,7 @@@ int mlx5_eswitch_offloads_single_fdb_ad
                                             struct mlx5_eswitch *slave_esw, int max_slaves);
  void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
                                              struct mlx5_eswitch *slave_esw);
- int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
+ int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw);
  
  bool mlx5_eswitch_block_encap(struct mlx5_core_dev *dev);
  void mlx5_eswitch_unblock_encap(struct mlx5_core_dev *dev);
@@@ -932,7 -925,7 +932,7 @@@ mlx5_eswitch_offloads_single_fdb_del_on
  static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
  
  static inline int
- mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+ mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
  {
        return 0;
  }
index f378b8176e47c5cdac44d9f371fff372d3702792,e8caf12f4c4f83c8225494baa73acf4a99c94a46..592143d5e1da138b9120e37c2a9095fd5fd97d4d
@@@ -67,8 -67,6 +67,8 @@@
  
  #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1)
  
 +#define MLX5_ESW_MAX_CTRL_EQS 4
 +
  static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
        .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
        .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
@@@ -2413,8 -2411,7 +2413,8 @@@ err
  }
  
  static int esw_port_metadata_set(struct devlink *devlink, u32 id,
 -                               struct devlink_param_gset_ctx *ctx)
 +                               struct devlink_param_gset_ctx *ctx,
 +                               struct netlink_ext_ack *extack)
  {
        struct mlx5_core_dev *dev = devlink_priv(devlink);
        struct mlx5_eswitch *esw = dev->priv.eswitch;
@@@ -2505,6 -2502,16 +2505,16 @@@ void esw_offloads_cleanup(struct mlx5_e
        esw_offloads_cleanup_reps(esw);
  }
  
+ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw,
+                                  struct mlx5_eswitch_rep *rep, u8 rep_type)
+ {
+       if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
+                          REP_REGISTERED, REP_LOADED) == REP_REGISTERED)
+               return esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
+       return 0;
+ }
  static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
                                      struct mlx5_eswitch_rep *rep, u8 rep_type)
  {
@@@ -2529,13 -2536,11 +2539,11 @@@ static int mlx5_esw_offloads_rep_load(s
        int err;
  
        rep = mlx5_eswitch_get_rep(esw, vport_num);
-       for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
-               if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
-                                  REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
-                       err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
-                       if (err)
-                               goto err_reps;
-               }
+       for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
+               err = __esw_offloads_load_rep(esw, rep, rep_type);
+               if (err)
+                       goto err_reps;
+       }
  
        return 0;
  
@@@ -3280,7 -3285,7 +3288,7 @@@ static void esw_destroy_offloads_acl_ta
                esw_vport_destroy_offloads_acl_tables(esw, vport);
  }
  
- int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
+ int mlx5_eswitch_reload_ib_reps(struct mlx5_eswitch *esw)
  {
        struct mlx5_eswitch_rep *rep;
        unsigned long i;
        if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
                return 0;
  
-       ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
+       ret = __esw_offloads_load_rep(esw, rep, REP_IB);
        if (ret)
                return ret;
  
        mlx5_esw_for_each_rep(esw, i, rep) {
                if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
-                       mlx5_esw_offloads_rep_load(esw, rep->vport);
+                       __esw_offloads_load_rep(esw, rep, REP_IB);
        }
  
        return 0;
@@@ -4571,98 -4576,3 +4579,98 @@@ unlock
        return err;
  }
  #endif /* CONFIG_XFRM_OFFLOAD */
 +
 +int
 +mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, u32 *max_io_eqs,
 +                                  struct netlink_ext_ack *extack)
 +{
 +      struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
 +      int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
 +      u16 vport_num = vport->vport;
 +      struct mlx5_eswitch *esw;
 +      void *query_ctx;
 +      void *hca_caps;
 +      u32 max_eqs;
 +      int err;
 +
 +      esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
 +      if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
 +              NL_SET_ERR_MSG_MOD(extack,
 +                                 "Device doesn't support VHCA management");
 +              return -EOPNOTSUPP;
 +      }
 +
 +      query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
 +      if (!query_ctx)
 +              return -ENOMEM;
 +
 +      mutex_lock(&esw->state_lock);
 +      err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
 +                                          MLX5_CAP_GENERAL);
 +      if (err) {
 +              NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
 +              goto out;
 +      }
 +
 +      hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
 +      max_eqs = MLX5_GET(cmd_hca_cap, hca_caps, max_num_eqs);
 +      if (max_eqs < MLX5_ESW_MAX_CTRL_EQS)
 +              *max_io_eqs = 0;
 +      else
 +              *max_io_eqs = max_eqs - MLX5_ESW_MAX_CTRL_EQS;
 +out:
 +      mutex_unlock(&esw->state_lock);
 +      kfree(query_ctx);
 +      return err;
 +}
 +
 +int
 +mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs,
 +                                  struct netlink_ext_ack *extack)
 +{
 +      struct mlx5_vport *vport = mlx5_devlink_port_vport_get(port);
 +      int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
 +      u16 vport_num = vport->vport;
 +      struct mlx5_eswitch *esw;
 +      void *query_ctx;
 +      void *hca_caps;
 +      u16 max_eqs;
 +      int err;
 +
 +      esw = mlx5_devlink_eswitch_nocheck_get(port->devlink);
 +      if (!MLX5_CAP_GEN(esw->dev, vhca_resource_manager)) {
 +              NL_SET_ERR_MSG_MOD(extack,
 +                                 "Device doesn't support VHCA management");
 +              return -EOPNOTSUPP;
 +      }
 +
 +      if (check_add_overflow(max_io_eqs, MLX5_ESW_MAX_CTRL_EQS, &max_eqs)) {
 +              NL_SET_ERR_MSG_MOD(extack, "Supplied value out of range");
 +              return -EINVAL;
 +      }
 +
 +      query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
 +      if (!query_ctx)
 +              return -ENOMEM;
 +
 +      mutex_lock(&esw->state_lock);
 +      err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx,
 +                                          MLX5_CAP_GENERAL);
 +      if (err) {
 +              NL_SET_ERR_MSG_MOD(extack, "Failed getting HCA caps");
 +              goto out;
 +      }
 +
 +      hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
 +      MLX5_SET(cmd_hca_cap, hca_caps, max_num_eqs, max_eqs);
 +
 +      err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
 +                                          MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
 +      if (err)
 +              NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps");
 +
 +out:
 +      mutex_unlock(&esw->state_lock);
 +      kfree(query_ctx);
 +      return err;
 +}
index 5e2171ff0a8940239d659a61b82af3206481d26c,37598d116f3b832f73c34c85821cc1dba4566c0d..f7f0476a4a58d350b52b9966a0efaa0d6964a5a8
@@@ -713,6 -713,7 +713,6 @@@ int mlx5_deactivate_lag(struct mlx5_la
        return 0;
  }
  
 -#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4
  bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
  {
  #ifdef CONFIG_MLX5_ESWITCH
                if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
                        return false;
  
 -      if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
 -              return false;
  #else
        for (i = 0; i < ldev->ports; i++)
                if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
@@@ -811,7 -814,7 +811,7 @@@ void mlx5_disable_lag(struct mlx5_lag *
        if (shared_fdb)
                for (i = 0; i < ldev->ports; i++)
                        if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
-                               mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+                               mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
  }
  
  static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
@@@ -919,7 -922,7 +919,7 @@@ static void mlx5_do_bond(struct mlx5_la
                        mlx5_rescan_drivers_locked(dev0);
  
                        for (i = 0; i < ldev->ports; i++) {
-                               err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+                               err = mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
                                if (err)
                                        break;
                        }
                                mlx5_deactivate_lag(ldev);
                                mlx5_lag_add_devices(ldev);
                                for (i = 0; i < ldev->ports; i++)
-                                       mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+                                       mlx5_eswitch_reload_ib_reps(ldev->pf[i].dev->priv.eswitch);
                                mlx5_core_err(dev0, "Failed to enable lag\n");
                                return;
                        }
index 4aa356e2f29933f1c36dc043e0b47660a4b0a2d0,d3a2fbb14140e90aabea9cdcd8a1b69fed2a21ab..3e5f9b17c77767f918f8254b97de38be36d88d28
@@@ -949,17 -949,6 +949,6 @@@ static irqreturn_t gem_interrupt(int ir
        return IRQ_HANDLED;
  }
  
- #ifdef CONFIG_NET_POLL_CONTROLLER
- static void gem_poll_controller(struct net_device *dev)
- {
-       struct gem *gp = netdev_priv(dev);
-       disable_irq(gp->pdev->irq);
-       gem_interrupt(gp->pdev->irq, dev);
-       enable_irq(gp->pdev->irq);
- }
- #endif
  static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue)
  {
        struct gem *gp = netdev_priv(dev);
@@@ -2499,7 -2488,7 +2488,7 @@@ static int gem_change_mtu(struct net_de
  {
        struct gem *gp = netdev_priv(dev);
  
 -      dev->mtu = new_mtu;
 +      WRITE_ONCE(dev->mtu, new_mtu);
  
        /* We'll just catch it later when the device is up'd or resumed */
        if (!netif_running(dev) || !netif_device_present(dev))
@@@ -2839,9 -2828,6 +2828,6 @@@ static const struct net_device_ops gem_
        .ndo_change_mtu         = gem_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = gem_set_mac_address,
- #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = gem_poll_controller,
- #endif
  };
  
  static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --combined drivers/net/phy/micrel.c
index 2d11f38cbc243e5b45fd3656bc8cf3fa7eafdcf5,87780465cd0d59fcbd18eb6b7152a0e42d8e90a3..13e30ea7eec5d1e8cbfca6811d09a347660e06dd
  #define PTP_CMD_CTL_PTP_LTC_STEP_SEC_         BIT(5)
  #define PTP_CMD_CTL_PTP_LTC_STEP_NSEC_                BIT(6)
  
 +#define PTP_COMMON_INT_ENA                    0x0204
 +#define PTP_COMMON_INT_ENA_GPIO_CAP_EN                BIT(2)
 +
  #define PTP_CLOCK_SET_SEC_HI                  0x0205
  #define PTP_CLOCK_SET_SEC_MID                 0x0206
  #define PTP_CLOCK_SET_SEC_LO                  0x0207
  #define PTP_CLOCK_READ_NS_HI                  0x022C
  #define PTP_CLOCK_READ_NS_LO                  0x022D
  
 +#define PTP_GPIO_SEL                          0x0230
 +#define PTP_GPIO_SEL_GPIO_SEL(pin)            ((pin) << 8)
 +#define PTP_GPIO_CAP_MAP_LO                   0x0232
 +
 +#define PTP_GPIO_CAP_EN                               0x0233
 +#define PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(gpio)  BIT(gpio)
 +#define PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(gpio)  (BIT(gpio) << 8)
 +
 +#define PTP_GPIO_RE_LTC_SEC_HI_CAP            0x0235
 +#define PTP_GPIO_RE_LTC_SEC_LO_CAP            0x0236
 +#define PTP_GPIO_RE_LTC_NS_HI_CAP             0x0237
 +#define PTP_GPIO_RE_LTC_NS_LO_CAP             0x0238
 +#define PTP_GPIO_FE_LTC_SEC_HI_CAP            0x0239
 +#define PTP_GPIO_FE_LTC_SEC_LO_CAP            0x023A
 +#define PTP_GPIO_FE_LTC_NS_HI_CAP             0x023B
 +#define PTP_GPIO_FE_LTC_NS_LO_CAP             0x023C
 +
 +#define PTP_GPIO_CAP_STS                      0x023D
 +#define PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(gpio)        BIT(gpio)
 +#define PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(gpio)        (BIT(gpio) << 8)
 +
  #define PTP_OPERATING_MODE                    0x0241
  #define PTP_OPERATING_MODE_STANDALONE_                BIT(0)
  
  #define PS_TO_REG                             200
  #define FIFO_SIZE                             8
  
 +#define LAN8814_PTP_GPIO_NUM                  24
 +#define LAN8814_PTP_PEROUT_NUM                        2
 +#define LAN8814_PTP_EXTTS_NUM                 3
 +
 +#define LAN8814_BUFFER_TIME                   2
 +
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_200MS    13
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100MS    12
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50MS     11
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10MS     10
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5MS      9
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1MS      8
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500US    7
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100US    6
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50US     5
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10US     4
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5US      3
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1US      2
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500NS    1
 +#define LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS    0
 +
 +#define LAN8814_GPIO_EN1                      0x20
 +#define LAN8814_GPIO_EN2                      0x21
 +#define LAN8814_GPIO_DIR1                     0x22
 +#define LAN8814_GPIO_DIR2                     0x23
 +#define LAN8814_GPIO_BUF1                     0x24
 +#define LAN8814_GPIO_BUF2                     0x25
 +
 +#define LAN8814_GPIO_EN_ADDR(pin) \
 +      ((pin) > 15 ? LAN8814_GPIO_EN1 : LAN8814_GPIO_EN2)
 +#define LAN8814_GPIO_EN_BIT(pin)              BIT(pin)
 +#define LAN8814_GPIO_DIR_ADDR(pin) \
 +      ((pin) > 15 ? LAN8814_GPIO_DIR1 : LAN8814_GPIO_DIR2)
 +#define LAN8814_GPIO_DIR_BIT(pin)             BIT(pin)
 +#define LAN8814_GPIO_BUF_ADDR(pin) \
 +      ((pin) > 15 ? LAN8814_GPIO_BUF1 : LAN8814_GPIO_BUF2)
 +#define LAN8814_GPIO_BUF_BIT(pin)             BIT(pin)
 +
 +#define LAN8814_EVENT_A                               0
 +#define LAN8814_EVENT_B                               1
 +
 +#define LAN8814_PTP_GENERAL_CONFIG            0x0201
 +#define LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event) \
 +      ((event) ? GENMASK(11, 8) : GENMASK(7, 4))
 +#define LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, value) \
 +      (((value) & GENMASK(3, 0)) << (4 + ((event) << 2)))
 +#define LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event) \
 +      ((event) ? BIT(2) : BIT(0))
 +#define LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event) \
 +      ((event) ? BIT(3) : BIT(1))
 +
 +#define LAN8814_PTP_CLOCK_TARGET_SEC_HI(event)        ((event) ? 0x21F : 0x215)
 +#define LAN8814_PTP_CLOCK_TARGET_SEC_LO(event)        ((event) ? 0x220 : 0x216)
 +#define LAN8814_PTP_CLOCK_TARGET_NS_HI(event) ((event) ? 0x221 : 0x217)
 +#define LAN8814_PTP_CLOCK_TARGET_NS_LO(event) ((event) ? 0x222 : 0x218)
 +
 +#define LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event) ((event) ? 0x223 : 0x219)
 +#define LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event) ((event) ? 0x224 : 0x21A)
 +#define LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event)  ((event) ? 0x225 : 0x21B)
 +#define LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event)  ((event) ? 0x226 : 0x21C)
 +
  /* Delay used to get the second part from the LTC */
  #define LAN8841_GET_SEC_LTC_DELAY             (500 * NSEC_PER_MSEC)
  
@@@ -389,9 -304,13 +389,9 @@@ struct lan8814_shared_priv 
        struct phy_device *phydev;
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_clock_info;
 +      struct ptp_pin_desc *pin_config;
  
 -      /* Reference counter to how many ports in the package are enabling the
 -       * timestamping
 -       */
 -      u8 ref;
 -
 -      /* Lock for ptp_clock and ref */
 +      /* Lock for ptp_clock */
        struct mutex shared_lock;
  };
  
@@@ -2507,6 -2426,8 +2507,6 @@@ static int lan8814_hwtstamp(struct mii_
  {
        struct kszphy_ptp_priv *ptp_priv =
                          container_of(mii_ts, struct kszphy_ptp_priv, mii_ts);
 -      struct phy_device *phydev = ptp_priv->phydev;
 -      struct lan8814_shared_priv *shared = phydev->shared->priv;
        struct lan8814_ptp_rx_ts *rx_ts, *tmp;
        int txcfg = 0, rxcfg = 0;
        int pkt_ts_enable;
        else
                lan8814_config_ts_intr(ptp_priv->phydev, false);
  
 -      mutex_lock(&shared->shared_lock);
 -      if (config->rx_filter != HWTSTAMP_FILTER_NONE)
 -              shared->ref++;
 -      else
 -              shared->ref--;
 -
 -      if (shared->ref)
 -              lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
 -                                    PTP_CMD_CTL_PTP_ENABLE_);
 -      else
 -              lanphy_write_page_reg(ptp_priv->phydev, 4, PTP_CMD_CTL,
 -                                    PTP_CMD_CTL_PTP_DISABLE_);
 -      mutex_unlock(&shared->shared_lock);
 -
        /* In case of multiple starts and stops, these needs to be cleared */
        list_for_each_entry_safe(rx_ts, tmp, &ptp_priv->rx_ts_list, list) {
                list_del(&rx_ts->list);
@@@ -2742,29 -2677,6 +2742,29 @@@ static int lan8814_ptpci_settime64(stru
        return 0;
  }
  
 +static void lan8814_ptp_set_target(struct phy_device *phydev, int event,
 +                                 s64 start_sec, u32 start_nsec)
 +{
 +      /* Set the start time */
 +      lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_LO(event),
 +                            lower_16_bits(start_sec));
 +      lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_SEC_HI(event),
 +                            upper_16_bits(start_sec));
 +
 +      lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_LO(event),
 +                            lower_16_bits(start_nsec));
 +      lanphy_write_page_reg(phydev, 4, LAN8814_PTP_CLOCK_TARGET_NS_HI(event),
 +                            upper_16_bits(start_nsec) & 0x3fff);
 +}
 +
 +static void lan8814_ptp_update_target(struct phy_device *phydev, time64_t sec)
 +{
 +      lan8814_ptp_set_target(phydev, LAN8814_EVENT_A,
 +                             sec + LAN8814_BUFFER_TIME, 0);
 +      lan8814_ptp_set_target(phydev, LAN8814_EVENT_B,
 +                             sec + LAN8814_BUFFER_TIME, 0);
 +}
 +
  static void lan8814_ptp_clock_step(struct phy_device *phydev,
                                   s64 time_step_ns)
  {
                        nano_seconds -= 1000000000;
                }
                lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
 +              lan8814_ptp_update_target(phydev, set_seconds);
                return;
        } else if (time_step_ns < -15000000000LL) {
                /* convert to clock set */
                }
                nano_seconds -= nano_seconds_step;
                lan8814_ptp_clock_set(phydev, set_seconds, nano_seconds);
 +              lan8814_ptp_update_target(phydev, set_seconds);
                return;
        }
  
        }
  
        while (seconds) {
 +              u32 nsec;
 +
                if (seconds > 0) {
                        u32 adjustment_value = (u32)seconds;
                        u16 adjustment_value_lo, adjustment_value_hi;
                                              PTP_LTC_STEP_ADJ_DIR_ |
                                              adjustment_value_hi);
                        seconds -= ((s32)adjustment_value);
 +
 +                      lan8814_ptp_clock_get(phydev, &set_seconds, &nsec);
 +                      set_seconds -= adjustment_value;
 +                      lan8814_ptp_update_target(phydev, set_seconds);
                } else {
                        u32 adjustment_value = (u32)(-seconds);
                        u16 adjustment_value_lo, adjustment_value_hi;
                        lanphy_write_page_reg(phydev, 4, PTP_LTC_STEP_ADJ_HI,
                                              adjustment_value_hi);
                        seconds += ((s32)adjustment_value);
 +
 +                      lan8814_ptp_clock_get(phydev, &set_seconds, &nsec);
 +                      set_seconds += adjustment_value;
 +                      lan8814_ptp_update_target(phydev, set_seconds);
                }
                lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL,
                                      PTP_CMD_CTL_PTP_LTC_STEP_SEC_);
@@@ -2945,335 -2845,6 +2945,335 @@@ static int lan8814_ptpci_adjfine(struc
        return 0;
  }
  
 +static void lan8814_ptp_set_reload(struct phy_device *phydev, int event,
 +                                 s64 period_sec, u32 period_nsec)
 +{
 +      lanphy_write_page_reg(phydev, 4,
 +                            LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_LO(event),
 +                            lower_16_bits(period_sec));
 +      lanphy_write_page_reg(phydev, 4,
 +                            LAN8814_PTP_CLOCK_TARGET_RELOAD_SEC_HI(event),
 +                            upper_16_bits(period_sec));
 +
 +      lanphy_write_page_reg(phydev, 4,
 +                            LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_LO(event),
 +                            lower_16_bits(period_nsec));
 +      lanphy_write_page_reg(phydev, 4,
 +                            LAN8814_PTP_CLOCK_TARGET_RELOAD_NS_HI(event),
 +                            upper_16_bits(period_nsec) & 0x3fff);
 +}
 +
 +static void lan8814_ptp_enable_event(struct phy_device *phydev, int event,
 +                                   int pulse_width)
 +{
 +      u16 val;
 +
 +      val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
 +      /* Set the pulse width of the event */
 +      val &= ~(LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_MASK(event));
 +      /* Make sure that the target clock will be incremented each time when
 +       * local time reaches or pass it
 +       */
 +      val |= LAN8814_PTP_GENERAL_CONFIG_LTC_EVENT_SET(event, pulse_width);
 +      val &= ~(LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event));
 +      /* Set the polarity high */
 +      val |= LAN8814_PTP_GENERAL_CONFIG_POLARITY_X(event);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
 +}
 +
 +static void lan8814_ptp_disable_event(struct phy_device *phydev, int event)
 +{
 +      u16 val;
 +
 +      /* Set target to too far in the future, effectively disabling it */
 +      lan8814_ptp_set_target(phydev, event, 0xFFFFFFFF, 0);
 +
 +      /* And then reload once it recheas the target */
 +      val = lanphy_read_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG);
 +      val |= LAN8814_PTP_GENERAL_CONFIG_RELOAD_ADD_X(event);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_PTP_GENERAL_CONFIG, val);
 +}
 +
 +static void lan8814_ptp_perout_off(struct phy_device *phydev, int pin)
 +{
 +      u16 val;
 +
 +      /* Disable gpio alternate function,
 +       * 1: select as gpio,
 +       * 0: select alt func
 +       */
 +      val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
 +      val |= LAN8814_GPIO_EN_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
 +
 +      val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
 +      val &= ~LAN8814_GPIO_DIR_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
 +
 +      val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
 +      val &= ~LAN8814_GPIO_BUF_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
 +}
 +
 +static void lan8814_ptp_perout_on(struct phy_device *phydev, int pin)
 +{
 +      int val;
 +
 +      /* Set as gpio output */
 +      val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
 +      val |= LAN8814_GPIO_DIR_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), val);
 +
 +      /* Enable gpio 0:for alternate function, 1:gpio */
 +      val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
 +      val &= ~LAN8814_GPIO_EN_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), val);
 +
 +      /* Set buffer type to push pull */
 +      val = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin));
 +      val |= LAN8814_GPIO_BUF_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_BUF_ADDR(pin), val);
 +}
 +
 +static int lan8814_ptp_perout(struct ptp_clock_info *ptpci,
 +                            struct ptp_clock_request *rq, int on)
 +{
 +      struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
 +                                                        ptp_clock_info);
 +      struct phy_device *phydev = shared->phydev;
 +      struct timespec64 ts_on, ts_period;
 +      s64 on_nsec, period_nsec;
 +      int pulse_width;
 +      int pin, event;
 +
 +      /* Reject requests with unsupported flags */
 +      if (rq->perout.flags & ~PTP_PEROUT_DUTY_CYCLE)
 +              return -EOPNOTSUPP;
 +
 +      mutex_lock(&shared->shared_lock);
 +      event = rq->perout.index;
 +      pin = ptp_find_pin(shared->ptp_clock, PTP_PF_PEROUT, event);
 +      if (pin < 0 || pin >= LAN8814_PTP_PEROUT_NUM) {
 +              mutex_unlock(&shared->shared_lock);
 +              return -EBUSY;
 +      }
 +
 +      if (!on) {
 +              lan8814_ptp_perout_off(phydev, pin);
 +              lan8814_ptp_disable_event(phydev, event);
 +              mutex_unlock(&shared->shared_lock);
 +              return 0;
 +      }
 +
 +      ts_on.tv_sec = rq->perout.on.sec;
 +      ts_on.tv_nsec = rq->perout.on.nsec;
 +      on_nsec = timespec64_to_ns(&ts_on);
 +
 +      ts_period.tv_sec = rq->perout.period.sec;
 +      ts_period.tv_nsec = rq->perout.period.nsec;
 +      period_nsec = timespec64_to_ns(&ts_period);
 +
 +      if (period_nsec < 200) {
 +              pr_warn_ratelimited("%s: perout period too small, minimum is 200 nsec\n",
 +                                  phydev_name(phydev));
 +              mutex_unlock(&shared->shared_lock);
 +              return -EOPNOTSUPP;
 +      }
 +
 +      if (on_nsec >= period_nsec) {
 +              pr_warn_ratelimited("%s: pulse width must be smaller than period\n",
 +                                  phydev_name(phydev));
 +              mutex_unlock(&shared->shared_lock);
 +              return -EINVAL;
 +      }
 +
 +      switch (on_nsec) {
 +      case 200000000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_200MS;
 +              break;
 +      case 100000000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100MS;
 +              break;
 +      case 50000000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50MS;
 +              break;
 +      case 10000000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10MS;
 +              break;
 +      case 5000000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5MS;
 +              break;
 +      case 1000000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1MS;
 +              break;
 +      case 500000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500US;
 +              break;
 +      case 100000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100US;
 +              break;
 +      case 50000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_50US;
 +              break;
 +      case 10000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_10US;
 +              break;
 +      case 5000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_5US;
 +              break;
 +      case 1000:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_1US;
 +              break;
 +      case 500:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_500NS;
 +              break;
 +      case 100:
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS;
 +              break;
 +      default:
 +              pr_warn_ratelimited("%s: Use default duty cycle of 100ns\n",
 +                                  phydev_name(phydev));
 +              pulse_width = LAN8841_PTP_GENERAL_CONFIG_LTC_EVENT_100NS;
 +              break;
 +      }
 +
 +      /* Configure to pulse every period */
 +      lan8814_ptp_enable_event(phydev, event, pulse_width);
 +      lan8814_ptp_set_target(phydev, event, rq->perout.start.sec,
 +                             rq->perout.start.nsec);
 +      lan8814_ptp_set_reload(phydev, event, rq->perout.period.sec,
 +                             rq->perout.period.nsec);
 +      lan8814_ptp_perout_on(phydev, pin);
 +      mutex_unlock(&shared->shared_lock);
 +
 +      return 0;
 +}
 +
 +static void lan8814_ptp_extts_on(struct phy_device *phydev, int pin, u32 flags)
 +{
 +      u16 tmp;
 +
 +      /* Set as gpio input */
 +      tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
 +      tmp &= ~LAN8814_GPIO_DIR_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
 +
 +      /* Map the pin to ltc pin 0 of the capture map registers */
 +      tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
 +      tmp |= pin;
 +      lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
 +
 +      /* Enable capture on the edges of the ltc pin */
 +      tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
 +      if (flags & PTP_RISING_EDGE)
 +              tmp |= PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(0);
 +      if (flags & PTP_FALLING_EDGE)
 +              tmp |= PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(0);
 +      lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
 +
 +      /* Enable interrupt top interrupt */
 +      tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
 +      tmp |= PTP_COMMON_INT_ENA_GPIO_CAP_EN;
 +      lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
 +}
 +
 +static void lan8814_ptp_extts_off(struct phy_device *phydev, int pin)
 +{
 +      u16 tmp;
 +
 +      /* Set as gpio out */
 +      tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin));
 +      tmp |= LAN8814_GPIO_DIR_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_DIR_ADDR(pin), tmp);
 +
 +      /* Enable alternate, 0:for alternate function, 1:gpio */
 +      tmp = lanphy_read_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin));
 +      tmp &= ~LAN8814_GPIO_EN_BIT(pin);
 +      lanphy_write_page_reg(phydev, 4, LAN8814_GPIO_EN_ADDR(pin), tmp);
 +
 +      /* Clear the mapping of pin to registers 0 of the capture registers */
 +      tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO);
 +      tmp &= ~GENMASK(3, 0);
 +      lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_MAP_LO, tmp);
 +
 +      /* Disable capture on both of the edges */
 +      tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_EN);
 +      tmp &= ~PTP_GPIO_CAP_EN_GPIO_RE_CAPTURE_ENABLE(pin);
 +      tmp &= ~PTP_GPIO_CAP_EN_GPIO_FE_CAPTURE_ENABLE(pin);
 +      lanphy_write_page_reg(phydev, 4, PTP_GPIO_CAP_EN, tmp);
 +
 +      /* Disable interrupt top interrupt */
 +      tmp = lanphy_read_page_reg(phydev, 4, PTP_COMMON_INT_ENA);
 +      tmp &= ~PTP_COMMON_INT_ENA_GPIO_CAP_EN;
 +      lanphy_write_page_reg(phydev, 4, PTP_COMMON_INT_ENA, tmp);
 +}
 +
 +static int lan8814_ptp_extts(struct ptp_clock_info *ptpci,
 +                           struct ptp_clock_request *rq, int on)
 +{
 +      struct lan8814_shared_priv *shared = container_of(ptpci, struct lan8814_shared_priv,
 +                                                        ptp_clock_info);
 +      struct phy_device *phydev = shared->phydev;
 +      int pin;
 +
 +      if (rq->extts.flags & ~(PTP_ENABLE_FEATURE |
 +                              PTP_EXTTS_EDGES |
 +                              PTP_STRICT_FLAGS))
 +              return -EOPNOTSUPP;
 +
 +      pin = ptp_find_pin(shared->ptp_clock, PTP_PF_EXTTS,
 +                         rq->extts.index);
 +      if (pin == -1 || pin != LAN8814_PTP_EXTTS_NUM)
 +              return -EINVAL;
 +
 +      mutex_lock(&shared->shared_lock);
 +      if (on)
 +              lan8814_ptp_extts_on(phydev, pin, rq->extts.flags);
 +      else
 +              lan8814_ptp_extts_off(phydev, pin);
 +
 +      mutex_unlock(&shared->shared_lock);
 +
 +      return 0;
 +}
 +
 +static int lan8814_ptpci_enable(struct ptp_clock_info *ptpci,
 +                              struct ptp_clock_request *rq, int on)
 +{
 +      switch (rq->type) {
 +      case PTP_CLK_REQ_PEROUT:
 +              return lan8814_ptp_perout(ptpci, rq, on);
 +      case PTP_CLK_REQ_EXTTS:
 +              return lan8814_ptp_extts(ptpci, rq, on);
 +      default:
 +              return -EINVAL;
 +      }
 +}
 +
 +static int lan8814_ptpci_verify(struct ptp_clock_info *ptp, unsigned int pin,
 +                              enum ptp_pin_function func, unsigned int chan)
 +{
 +      switch (func) {
 +      case PTP_PF_NONE:
 +      case PTP_PF_PEROUT:
 +              /* Only pins 0 and 1 can generate perout signals. And for pin 0
 +               * there is only chan 0 (event A) and for pin 1 there is only
 +               * chan 1 (event B)
 +               */
 +              if (pin >= LAN8814_PTP_PEROUT_NUM || pin != chan)
 +                      return -1;
 +              break;
 +      case PTP_PF_EXTTS:
 +              if (pin != LAN8814_PTP_EXTTS_NUM)
 +                      return -1;
 +              break;
 +      default:
 +              return -1;
 +      }
 +
 +      return 0;
 +}
 +
  static bool lan8814_get_sig_tx(struct sk_buff *skb, u16 *sig)
  {
        struct ptp_header *ptp_header;
@@@ -3439,64 -3010,6 +3439,64 @@@ static void lan8814_handle_ptp_interrup
        }
  }
  
 +static int lan8814_gpio_process_cap(struct lan8814_shared_priv *shared)
 +{
 +      struct phy_device *phydev = shared->phydev;
 +      struct ptp_clock_event ptp_event = {0};
 +      unsigned long nsec;
 +      s64 sec;
 +      u16 tmp;
 +
 +      /* This is 0 because whatever was the input pin it was mapped it to
 +       * ltc gpio pin 0
 +       */
 +      tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_SEL);
 +      tmp |= PTP_GPIO_SEL_GPIO_SEL(0);
 +      lanphy_write_page_reg(phydev, 4, PTP_GPIO_SEL, tmp);
 +
 +      tmp = lanphy_read_page_reg(phydev, 4, PTP_GPIO_CAP_STS);
 +      if (!(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_RE_STS(0)) &&
 +          !(tmp & PTP_GPIO_CAP_STS_PTP_GPIO_FE_STS(0)))
 +              return -1;
 +
 +      if (tmp & BIT(0)) {
 +              sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_HI_CAP);
 +              sec <<= 16;
 +              sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_SEC_LO_CAP);
 +
 +              nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_HI_CAP) & 0x3fff;
 +              nsec <<= 16;
 +              nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
 +      } else {
 +              sec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_HI_CAP);
 +              sec <<= 16;
 +              sec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_SEC_LO_CAP);
 +
 +              nsec = lanphy_read_page_reg(phydev, 4, PTP_GPIO_FE_LTC_NS_HI_CAP) & 0x3fff;
 +              nsec <<= 16;
 +              nsec |= lanphy_read_page_reg(phydev, 4, PTP_GPIO_RE_LTC_NS_LO_CAP);
 +      }
 +
 +      ptp_event.index = 0;
 +      ptp_event.timestamp = ktime_set(sec, nsec);
 +      ptp_event.type = PTP_CLOCK_EXTTS;
 +      ptp_clock_event(shared->ptp_clock, &ptp_event);
 +
 +      return 0;
 +}
 +
 +static int lan8814_handle_gpio_interrupt(struct phy_device *phydev, u16 status)
 +{
 +      struct lan8814_shared_priv *shared = phydev->shared->priv;
 +      int ret;
 +
 +      mutex_lock(&shared->shared_lock);
 +      ret = lan8814_gpio_process_cap(shared);
 +      mutex_unlock(&shared->shared_lock);
 +
 +      return ret;
 +}
 +
  static int lan8804_config_init(struct phy_device *phydev)
  {
        int val;
@@@ -3601,9 -3114,6 +3601,9 @@@ static irqreturn_t lan8814_handle_inter
                ret = IRQ_HANDLED;
        }
  
 +      if (!lan8814_handle_gpio_interrupt(phydev, irq_status))
 +              ret = IRQ_HANDLED;
 +
        return ret;
  }
  
@@@ -3700,39 -3210,19 +3700,39 @@@ static int lan8814_ptp_probe_once(struc
        /* Initialise shared lock for clock*/
        mutex_init(&shared->shared_lock);
  
 +      shared->pin_config = devm_kmalloc_array(&phydev->mdio.dev,
 +                                              LAN8814_PTP_GPIO_NUM,
 +                                              sizeof(*shared->pin_config),
 +                                              GFP_KERNEL);
 +      if (!shared->pin_config)
 +              return -ENOMEM;
 +
 +      for (int i = 0; i < LAN8814_PTP_GPIO_NUM; i++) {
 +              struct ptp_pin_desc *ptp_pin = &shared->pin_config[i];
 +
 +              memset(ptp_pin, 0, sizeof(*ptp_pin));
 +              snprintf(ptp_pin->name,
 +                       sizeof(ptp_pin->name), "lan8814_ptp_pin_%02d", i);
 +              ptp_pin->index = i;
 +              ptp_pin->func =  PTP_PF_NONE;
 +      }
 +
        shared->ptp_clock_info.owner = THIS_MODULE;
        snprintf(shared->ptp_clock_info.name, 30, "%s", phydev->drv->name);
        shared->ptp_clock_info.max_adj = 31249999;
        shared->ptp_clock_info.n_alarm = 0;
 -      shared->ptp_clock_info.n_ext_ts = 0;
 -      shared->ptp_clock_info.n_pins = 0;
 +      shared->ptp_clock_info.n_ext_ts = LAN8814_PTP_EXTTS_NUM;
 +      shared->ptp_clock_info.n_pins = LAN8814_PTP_GPIO_NUM;
        shared->ptp_clock_info.pps = 0;
 -      shared->ptp_clock_info.pin_config = NULL;
 +      shared->ptp_clock_info.pin_config = shared->pin_config;
 +      shared->ptp_clock_info.n_per_out = LAN8814_PTP_PEROUT_NUM;
        shared->ptp_clock_info.adjfine = lan8814_ptpci_adjfine;
        shared->ptp_clock_info.adjtime = lan8814_ptpci_adjtime;
        shared->ptp_clock_info.gettime64 = lan8814_ptpci_gettime64;
        shared->ptp_clock_info.settime64 = lan8814_ptpci_settime64;
        shared->ptp_clock_info.getcrosststamp = NULL;
 +      shared->ptp_clock_info.enable = lan8814_ptpci_enable;
 +      shared->ptp_clock_info.verify = lan8814_ptpci_verify;
  
        shared->ptp_clock = ptp_clock_register(&shared->ptp_clock_info,
                                               &phydev->mdio.dev);
        lanphy_write_page_reg(phydev, 4, PTP_OPERATING_MODE,
                              PTP_OPERATING_MODE_STANDALONE_);
  
 +      /* Enable ptp to run LTC clock for ptp and gpio 1PPS operation */
 +      lanphy_write_page_reg(phydev, 4, PTP_CMD_CTL, PTP_CMD_CTL_PTP_ENABLE_);
 +
        return 0;
  }
  
@@@ -5189,7 -4676,8 +5189,8 @@@ static int lan8841_suspend(struct phy_d
        struct kszphy_priv *priv = phydev->priv;
        struct kszphy_ptp_priv *ptp_priv = &priv->ptp_priv;
  
-       ptp_cancel_worker_sync(ptp_priv->ptp_clock);
+       if (ptp_priv->ptp_clock)
+               ptp_cancel_worker_sync(ptp_priv->ptp_clock);
  
        return genphy_suspend(phydev);
  }
index c5196b5313c0bd9141521c289959a38cf070bd2c,56ede5fa02617d727c5a478c763eac3ad26607ce..51c295e1e823ad759f81dfaeba77d9e7d75a5ea3
@@@ -174,6 -174,7 +174,7 @@@ struct ax88179_data 
        u32 wol_supported;
        u32 wolopts;
        u8 disconnecting;
+       u8 initialized;
  };
  
  struct ax88179_int_data {
@@@ -943,7 -944,7 +944,7 @@@ static int ax88179_change_mtu(struct ne
        struct usbnet *dev = netdev_priv(net);
        u16 tmp16;
  
 -      net->mtu = new_mtu;
 +      WRITE_ONCE(net->mtu, new_mtu);
        dev->hard_mtu = net->mtu + net->hard_header_len;
  
        if (net->mtu > 1500) {
@@@ -1277,6 -1278,7 +1278,6 @@@ static void ax88179_get_mac_addr(struc
                        dev->net->addr_assign_type = NET_ADDR_PERM;
        } else {
                netdev_info(dev->net, "invalid MAC address, using random\n");
 -              eth_hw_addr_random(dev->net);
        }
  
        ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, ETH_ALEN,
  static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
  {
        struct ax88179_data *ax179_data;
 +      int ret;
  
 -      usbnet_get_endpoints(dev, intf);
 +      ret = usbnet_get_endpoints(dev, intf);
 +      if (ret < 0)
 +              return ret;
  
        ax179_data = kzalloc(sizeof(*ax179_data), GFP_KERNEL);
        if (!ax179_data)
@@@ -1675,6 -1674,18 +1676,18 @@@ static int ax88179_reset(struct usbnet 
        return 0;
  }
  
+ static int ax88179_net_reset(struct usbnet *dev)
+ {
+       struct ax88179_data *ax179_data = dev->driver_priv;
+       if (ax179_data->initialized)
+               ax88179_reset(dev);
+       else
+               ax179_data->initialized = 1;
+       return 0;
+ }
  static int ax88179_stop(struct usbnet *dev)
  {
        u16 tmp16;
@@@ -1694,6 -1705,7 +1707,7 @@@ static const struct driver_info ax88179
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
+       .reset = ax88179_net_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1706,6 -1718,7 +1720,7 @@@ static const struct driver_info ax88178
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
+       .reset = ax88179_net_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1718,7 -1731,7 +1733,7 @@@ static const struct driver_info cypress
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset = ax88179_reset,
+       .reset = ax88179_net_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1731,7 -1744,7 +1746,7 @@@ static const struct driver_info dlink_d
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset = ax88179_reset,
+       .reset = ax88179_net_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1744,7 -1757,7 +1759,7 @@@ static const struct driver_info sitecom
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset = ax88179_reset,
+       .reset = ax88179_net_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1757,7 -1770,7 +1772,7 @@@ static const struct driver_info samsung
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset = ax88179_reset,
+       .reset = ax88179_net_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1770,7 -1783,7 +1785,7 @@@ static const struct driver_info lenovo_
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset = ax88179_reset,
+       .reset = ax88179_net_reset,
        .stop = ax88179_stop,
        .flags = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1783,7 -1796,7 +1798,7 @@@ static const struct driver_info belkin_
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset  = ax88179_reset,
+       .reset  = ax88179_net_reset,
        .stop   = ax88179_stop,
        .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1796,7 -1809,7 +1811,7 @@@ static const struct driver_info toshiba
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset  = ax88179_reset,
+       .reset  = ax88179_net_reset,
        .stop = ax88179_stop,
        .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1809,7 -1822,7 +1824,7 @@@ static const struct driver_info mct_inf
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset  = ax88179_reset,
+       .reset  = ax88179_net_reset,
        .stop   = ax88179_stop,
        .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1822,7 -1835,7 +1837,7 @@@ static const struct driver_info at_umc2
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset  = ax88179_reset,
+       .reset  = ax88179_net_reset,
        .stop   = ax88179_stop,
        .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1835,7 -1848,7 +1850,7 @@@ static const struct driver_info at_umc2
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset  = ax88179_reset,
+       .reset  = ax88179_net_reset,
        .stop   = ax88179_stop,
        .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
@@@ -1848,7 -1861,7 +1863,7 @@@ static const struct driver_info at_umc2
        .unbind = ax88179_unbind,
        .status = ax88179_status,
        .link_reset = ax88179_link_reset,
-       .reset  = ax88179_reset,
+       .reset  = ax88179_net_reset,
        .stop   = ax88179_stop,
        .flags  = FLAG_ETHER | FLAG_FRAMING_AX,
        .rx_fixup = ax88179_rx_fixup,
index 0aa15cac0308bf96ea9bde5ded3de62db8bdae09,80452bd982531b2c46bbd71abff85eb6a01141ad..779cfdf2e9d652e0d5f82d3c22f641ad3918e3ef
@@@ -85,7 -85,7 +85,7 @@@ enum mlx5_sqp_t 
  };
  
  enum {
 -      MLX5_MAX_PORTS  = 4,
 +      MLX5_MAX_PORTS  = 8,
  };
  
  enum {
@@@ -862,6 -862,7 +862,7 @@@ struct mlx5_cmd_work_ent 
        void                   *context;
        int                     idx;
        struct completion       handling;
+       struct completion       slotted;
        struct completion       done;
        struct mlx5_cmd        *cmd;
        struct work_struct      work;
@@@ -1374,4 -1375,11 +1375,4 @@@ static inline bool mlx5_is_macsec_roce_
  enum {
        MLX5_OCTWORD = 16,
  };
 -
 -struct msi_map mlx5_msix_alloc(struct mlx5_core_dev *dev,
 -                             irqreturn_t (*handler)(int, void *),
 -                             const struct irq_affinity_desc *affdesc,
 -                             const char *name);
 -void mlx5_msix_free(struct mlx5_core_dev *dev, struct msi_map map);
 -
  #endif /* MLX5_DRIVER_H */
diff --combined include/net/ax25.h
index eb9cee8252c855e6b5547020e724b016e4692e67,c2a85fd3f5ea40770c290c62fc8df74365e64cc0..cb622d84cd0cc4570705774036a843c8c075a328
@@@ -139,9 -139,7 +139,9 @@@ enum 
        AX25_VALUES_N2,         /* Default N2 value */
        AX25_VALUES_PACLEN,     /* AX.25 MTU */
        AX25_VALUES_PROTOCOL,   /* Std AX.25, DAMA Slave, DAMA Master */
 +#ifdef CONFIG_AX25_DAMA_SLAVE
        AX25_VALUES_DS_TIMEOUT, /* DAMA Slave timeout */
 +#endif
        AX25_MAX_VALUES         /* THIS MUST REMAIN THE LAST ENTRY OF THIS LIST */
  };
  
@@@ -218,7 -216,7 +218,7 @@@ typedef struct 
  struct ctl_table;
  
  typedef struct ax25_dev {
-       struct ax25_dev         *next;
+       struct list_head        list;
  
        struct net_device       *dev;
        netdevice_tracker       dev_tracker;
@@@ -332,7 -330,6 +332,6 @@@ int ax25_addr_size(const ax25_digi *)
  void ax25_digi_invert(const ax25_digi *, ax25_digi *);
  
  /* ax25_dev.c */
- extern ax25_dev *ax25_dev_list;
  extern spinlock_t ax25_dev_lock;
  
  #if IS_ENABLED(CONFIG_AX25)
diff --combined kernel/bpf/syscall.c
index 13ad74ecf2cd4bcd95cc8f53ff7bbf632e5addc8,cb61d8880dbe0307e298addd47dc481e98daa866..cf6285760aea51d3114e804fe156521caf142582
@@@ -559,7 -559,6 +559,7 @@@ void btf_record_free(struct btf_record 
                case BPF_SPIN_LOCK:
                case BPF_TIMER:
                case BPF_REFCOUNT:
 +              case BPF_WORKQUEUE:
                        /* Nothing to release */
                        break;
                default:
@@@ -609,7 -608,6 +609,7 @@@ struct btf_record *btf_record_dup(cons
                case BPF_SPIN_LOCK:
                case BPF_TIMER:
                case BPF_REFCOUNT:
 +              case BPF_WORKQUEUE:
                        /* Nothing to acquire */
                        break;
                default:
@@@ -661,13 -659,6 +661,13 @@@ void bpf_obj_free_timer(const struct bt
        bpf_timer_cancel_and_free(obj + rec->timer_off);
  }
  
 +void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj)
 +{
 +      if (WARN_ON_ONCE(!btf_record_has_field(rec, BPF_WORKQUEUE)))
 +              return;
 +      bpf_wq_cancel_and_free(obj + rec->wq_off);
 +}
 +
  void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
  {
        const struct btf_field *fields;
                case BPF_TIMER:
                        bpf_timer_cancel_and_free(field_ptr);
                        break;
 +              case BPF_WORKQUEUE:
 +                      bpf_wq_cancel_and_free(field_ptr);
 +                      break;
                case BPF_KPTR_UNREF:
                        WRITE_ONCE(*(u64 *)field_ptr, 0);
                        break;
@@@ -1097,7 -1085,7 +1097,7 @@@ static int map_check_btf(struct bpf_ma
  
        map->record = btf_parse_fields(btf, value_type,
                                       BPF_SPIN_LOCK | BPF_TIMER | BPF_KPTR | BPF_LIST_HEAD |
 -                                     BPF_RB_ROOT | BPF_REFCOUNT,
 +                                     BPF_RB_ROOT | BPF_REFCOUNT | BPF_WORKQUEUE,
                                       map->value_size);
        if (!IS_ERR_OR_NULL(map->record)) {
                int i;
                                }
                                break;
                        case BPF_TIMER:
 +                      case BPF_WORKQUEUE:
                                if (map->map_type != BPF_MAP_TYPE_HASH &&
                                    map->map_type != BPF_MAP_TYPE_LRU_HASH &&
                                    map->map_type != BPF_MAP_TYPE_ARRAY) {
@@@ -3511,12 -3498,17 +3511,12 @@@ out_put_prog
        return err;
  }
  
 -struct bpf_raw_tp_link {
 -      struct bpf_link link;
 -      struct bpf_raw_event_map *btp;
 -};
 -
  static void bpf_raw_tp_link_release(struct bpf_link *link)
  {
        struct bpf_raw_tp_link *raw_tp =
                container_of(link, struct bpf_raw_tp_link, link);
  
 -      bpf_probe_unregister(raw_tp->btp, raw_tp->link.prog);
 +      bpf_probe_unregister(raw_tp->btp, raw_tp);
        bpf_put_raw_tracepoint(raw_tp->btp);
  }
  
@@@ -3816,7 -3808,7 +3816,7 @@@ static int bpf_perf_link_attach(const u
  #endif /* CONFIG_PERF_EVENTS */
  
  static int bpf_raw_tp_link_attach(struct bpf_prog *prog,
 -                                const char __user *user_tp_name)
 +                                const char __user *user_tp_name, u64 cookie)
  {
        struct bpf_link_primer link_primer;
        struct bpf_raw_tp_link *link;
        bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
                      &bpf_raw_tp_link_lops, prog);
        link->btp = btp;
 +      link->cookie = cookie;
  
        err = bpf_link_prime(&link->link, &link_primer);
        if (err) {
                goto out_put_btp;
        }
  
 -      err = bpf_probe_register(link->btp, prog);
 +      err = bpf_probe_register(link->btp, link);
        if (err) {
                bpf_link_cleanup(&link_primer);
                goto out_put_btp;
@@@ -3884,13 -3875,11 +3884,13 @@@ out_put_btp
        return err;
  }
  
 -#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
 +#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.cookie
  
  static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
  {
        struct bpf_prog *prog;
 +      void __user *tp_name;
 +      __u64 cookie;
        int fd;
  
        if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
        if (IS_ERR(prog))
                return PTR_ERR(prog);
  
 -      fd = bpf_raw_tp_link_attach(prog, u64_to_user_ptr(attr->raw_tracepoint.name));
 +      tp_name = u64_to_user_ptr(attr->raw_tracepoint.name);
 +      cookie = attr->raw_tracepoint.cookie;
 +      fd = bpf_raw_tp_link_attach(prog, tp_name, cookie);
        if (fd < 0)
                bpf_prog_put(prog);
        return fd;
@@@ -3998,6 -3985,11 +3998,11 @@@ static int bpf_prog_attach_check_attach
                         * check permissions at attach time.
                         */
                        return -EPERM;
+               ptype = attach_type_to_prog_type(attach_type);
+               if (prog->type != ptype)
+                       return -EINVAL;
                return prog->enforce_expected_attach_type &&
                        prog->expected_attach_type != attach_type ?
                        -EINVAL : 0;
                if (prog->expected_attach_type == BPF_TRACE_KPROBE_MULTI &&
                    attach_type != BPF_TRACE_KPROBE_MULTI)
                        return -EINVAL;
 +              if (prog->expected_attach_type == BPF_TRACE_KPROBE_SESSION &&
 +                  attach_type != BPF_TRACE_KPROBE_SESSION)
 +                      return -EINVAL;
                if (prog->expected_attach_type == BPF_TRACE_UPROBE_MULTI &&
                    attach_type != BPF_TRACE_UPROBE_MULTI)
                        return -EINVAL;
                if (attach_type != BPF_PERF_EVENT &&
                    attach_type != BPF_TRACE_KPROBE_MULTI &&
 +                  attach_type != BPF_TRACE_KPROBE_SESSION &&
                    attach_type != BPF_TRACE_UPROBE_MULTI)
                        return -EINVAL;
                return 0;
@@@ -5244,7 -5232,7 +5249,7 @@@ static int link_create(union bpf_attr *
                        goto out;
                }
                if (prog->expected_attach_type == BPF_TRACE_RAW_TP)
 -                      ret = bpf_raw_tp_link_attach(prog, NULL);
 +                      ret = bpf_raw_tp_link_attach(prog, NULL, attr->link_create.tracing.cookie);
                else if (prog->expected_attach_type == BPF_TRACE_ITER)
                        ret = bpf_iter_link_attach(attr, uattr, prog);
                else if (prog->expected_attach_type == BPF_LSM_CGROUP)
        case BPF_PROG_TYPE_SK_LOOKUP:
                ret = netns_bpf_link_create(attr, prog);
                break;
 +      case BPF_PROG_TYPE_SK_MSG:
 +      case BPF_PROG_TYPE_SK_SKB:
 +              ret = sock_map_link_create(attr, prog);
 +              break;
  #ifdef CONFIG_NET
        case BPF_PROG_TYPE_XDP:
                ret = bpf_xdp_link_attach(attr, prog);
        case BPF_PROG_TYPE_KPROBE:
                if (attr->link_create.attach_type == BPF_PERF_EVENT)
                        ret = bpf_perf_link_attach(attr, prog);
 -              else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI)
 +              else if (attr->link_create.attach_type == BPF_TRACE_KPROBE_MULTI ||
 +                       attr->link_create.attach_type == BPF_TRACE_KPROBE_SESSION)
                        ret = bpf_kprobe_multi_link_attach(attr, prog);
                else if (attr->link_create.attach_type == BPF_TRACE_UPROBE_MULTI)
                        ret = bpf_uprobe_multi_link_attach(attr, prog);
diff --combined net/ax25/ax25_dev.c
index 0bc682ffae9c5c481266b737f0156fd3acd8fac2,c9d55b99a7a570e742b7220b2f0641a723ab9171..742d7c68e7e7e9fb6c1734f75d20402b2e4ad5a9
  #include <net/sock.h>
  #include <linux/uaccess.h>
  #include <linux/fcntl.h>
+ #include <linux/list.h>
  #include <linux/mm.h>
  #include <linux/interrupt.h>
  #include <linux/init.h>
  
ax25_dev *ax25_dev_list;
static LIST_HEAD(ax25_dev_list);
  DEFINE_SPINLOCK(ax25_dev_lock);
  
  ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
        ax25_dev *ax25_dev, *res = NULL;
  
        spin_lock_bh(&ax25_dev_lock);
-       for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
+       list_for_each_entry(ax25_dev, &ax25_dev_list, list)
                if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) {
                        res = ax25_dev;
                        ax25_dev_hold(ax25_dev);
+                       break;
                }
        spin_unlock_bh(&ax25_dev_lock);
  
@@@ -59,7 -61,6 +61,6 @@@ void ax25_dev_device_up(struct net_devi
        }
  
        refcount_set(&ax25_dev->refcount, 1);
-       dev->ax25_ptr     = ax25_dev;
        ax25_dev->dev     = dev;
        netdev_hold(dev, &ax25_dev->dev_tracker, GFP_KERNEL);
        ax25_dev->forward = NULL;
        ax25_dev->values[AX25_VALUES_N2]        = AX25_DEF_N2;
        ax25_dev->values[AX25_VALUES_PACLEN]    = AX25_DEF_PACLEN;
        ax25_dev->values[AX25_VALUES_PROTOCOL]  = AX25_DEF_PROTOCOL;
 +
 +#ifdef CONFIG_AX25_DAMA_SLAVE
        ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT;
 +#endif
  
  #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
        ax25_ds_setup_timer(ax25_dev);
  #endif
  
        spin_lock_bh(&ax25_dev_lock);
-       ax25_dev->next = ax25_dev_list;
-       ax25_dev_list  = ax25_dev;
+       list_add(&ax25_dev->list, &ax25_dev_list);
+       dev->ax25_ptr     = ax25_dev;
        spin_unlock_bh(&ax25_dev_lock);
-       ax25_dev_hold(ax25_dev);
  
        ax25_register_dev_sysctl(ax25_dev);
  }
@@@ -114,32 -111,19 +114,19 @@@ void ax25_dev_device_down(struct net_de
        /*
         *      Remove any packet forwarding that points to this device.
         */
-       for (s = ax25_dev_list; s != NULL; s = s->next)
+       list_for_each_entry(s, &ax25_dev_list, list)
                if (s->forward == dev)
                        s->forward = NULL;
  
-       if ((s = ax25_dev_list) == ax25_dev) {
-               ax25_dev_list = s->next;
-               goto unlock_put;
-       }
-       while (s != NULL && s->next != NULL) {
-               if (s->next == ax25_dev) {
-                       s->next = ax25_dev->next;
-                       goto unlock_put;
+       list_for_each_entry(s, &ax25_dev_list, list) {
+               if (s == ax25_dev) {
+                       list_del(&s->list);
+                       break;
                }
-               s = s->next;
        }
-       spin_unlock_bh(&ax25_dev_lock);
-       dev->ax25_ptr = NULL;
-       ax25_dev_put(ax25_dev);
-       return;
  
- unlock_put:
-       spin_unlock_bh(&ax25_dev_lock);
-       ax25_dev_put(ax25_dev);
        dev->ax25_ptr = NULL;
+       spin_unlock_bh(&ax25_dev_lock);
        netdev_put(dev, &ax25_dev->dev_tracker);
        ax25_dev_put(ax25_dev);
  }
@@@ -203,16 -187,13 +190,13 @@@ struct net_device *ax25_fwd_dev(struct 
   */
  void __exit ax25_dev_free(void)
  {
-       ax25_dev *s, *ax25_dev;
+       ax25_dev *s, *n;
  
        spin_lock_bh(&ax25_dev_lock);
-       ax25_dev = ax25_dev_list;
-       while (ax25_dev != NULL) {
-               s        = ax25_dev;
-               netdev_put(ax25_dev->dev, &ax25_dev->dev_tracker);
-               ax25_dev = ax25_dev->next;
+       list_for_each_entry_safe(s, n, &ax25_dev_list, list) {
+               netdev_put(s->dev, &s->dev_tracker);
+               list_del(&s->list);
                kfree(s);
        }
-       ax25_dev_list = NULL;
        spin_unlock_bh(&ax25_dev_lock);
  }
diff --combined net/ipv4/devinet.c
index a612c57b61c5f6910e3f09eb0670753deaa53a04,7e45c34c8340a6d2cf96b4485cd4249fd4da7009..96accde527da2484417b14d96ca0222ef5f67c2f
@@@ -224,7 -224,6 +224,7 @@@ static struct in_ifaddr *inet_alloc_ifa
  static void inet_rcu_free_ifa(struct rcu_head *head)
  {
        struct in_ifaddr *ifa = container_of(head, struct in_ifaddr, rcu_head);
 +
        if (ifa->ifa_dev)
                in_dev_put(ifa->ifa_dev);
        kfree(ifa);
  
  static void inet_free_ifa(struct in_ifaddr *ifa)
  {
 -      call_rcu(&ifa->rcu_head, inet_rcu_free_ifa);
 +      /* Our reference to ifa->ifa_dev must be freed ASAP
 +       * to release the reference to the netdev the same way.
 +       * in_dev_put() -> in_dev_finish_destroy() -> netdev_put()
 +       */
 +      call_rcu_hurry(&ifa->rcu_head, inet_rcu_free_ifa);
  }
  
  static void in_dev_free_rcu(struct rcu_head *head)
@@@ -1688,6 -1683,7 +1688,7 @@@ static int inet_fill_ifaddr(struct sk_b
        struct nlmsghdr  *nlh;
        unsigned long tstamp;
        u32 preferred, valid;
+       u32 flags;
  
        nlh = nlmsg_put(skb, args->portid, args->seq, args->event, sizeof(*ifm),
                        args->flags);
        ifm = nlmsg_data(nlh);
        ifm->ifa_family = AF_INET;
        ifm->ifa_prefixlen = ifa->ifa_prefixlen;
-       ifm->ifa_flags = READ_ONCE(ifa->ifa_flags);
+       flags = READ_ONCE(ifa->ifa_flags);
+       /* Warning : ifm->ifa_flags is an __u8, it holds only 8 bits.
+        * The 32bit value is given in IFA_FLAGS attribute.
+        */
+       ifm->ifa_flags = (__u8)flags;
        ifm->ifa_scope = ifa->ifa_scope;
        ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
  
                goto nla_put_failure;
  
        tstamp = READ_ONCE(ifa->ifa_tstamp);
-       if (!(ifm->ifa_flags & IFA_F_PERMANENT)) {
+       if (!(flags & IFA_F_PERMANENT)) {
                preferred = READ_ONCE(ifa->ifa_preferred_lft);
                valid = READ_ONCE(ifa->ifa_valid_lft);
                if (preferred != INFINITY_LIFE_TIME) {
             nla_put_string(skb, IFA_LABEL, ifa->ifa_label)) ||
            (ifa->ifa_proto &&
             nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto)) ||
-           nla_put_u32(skb, IFA_FLAGS, ifm->ifa_flags) ||
+           nla_put_u32(skb, IFA_FLAGS, flags) ||
            (ifa->ifa_rt_priority &&
             nla_put_u32(skb, IFA_RT_PRIORITY, ifa->ifa_rt_priority)) ||
            put_cacheinfo(skb, READ_ONCE(ifa->ifa_cstamp), tstamp,
@@@ -2520,7 -2522,7 +2527,7 @@@ static int ipv4_doint_and_flush(struct 
  
  static struct devinet_sysctl_table {
        struct ctl_table_header *sysctl_header;
 -      struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
 +      struct ctl_table devinet_vars[IPV4_DEVCONF_MAX];
  } devinet_sysctl = {
        .devinet_vars = {
                DEVINET_SYSCTL_COMPLEX_ENTRY(FORWARDING, "forwarding",
@@@ -2583,7 -2585,7 +2590,7 @@@ static int __devinet_sysctl_register(st
        if (!t)
                goto out;
  
 -      for (i = 0; i < ARRAY_SIZE(t->devinet_vars) - 1; i++) {
 +      for (i = 0; i < ARRAY_SIZE(t->devinet_vars); i++) {
                t->devinet_vars[i].data += (char *)p - (char *)&ipv4_devconf;
                t->devinet_vars[i].extra1 = p;
                t->devinet_vars[i].extra2 = net;
@@@ -2657,6 -2659,7 +2664,6 @@@ static struct ctl_table ctl_forward_ent
                .extra1         = &ipv4_devconf,
                .extra2         = &init_net,
        },
 -      { },
  };
  #endif
  
@@@ -2753,7 -2756,7 +2760,7 @@@ err_alloc_all
  static __net_exit void devinet_exit_net(struct net *net)
  {
  #ifdef CONFIG_SYSCTL
 -      struct ctl_table *tbl;
 +      const struct ctl_table *tbl;
  
        tbl = net->ipv4.forw_hdr->ctl_table_arg;
        unregister_net_sysctl_table(net->ipv4.forw_hdr);
diff --combined net/unix/af_unix.c
index dc16515417232787a2ff769ec382307d967e1941,e94839d89b09d8510184edb8c6b55968afa3f35d..fa906ec5e657b6648ec6add6fadf6adc25ed28e6
@@@ -546,7 -546,7 +546,7 @@@ static void unix_write_space(struct soc
                if (skwq_has_sleeper(wq))
                        wake_up_interruptible_sync_poll(&wq->wait,
                                EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND);
 -              sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 +              sk_wake_async_rcu(sk, SOCK_WAKE_SPACE, POLL_OUT);
        }
        rcu_read_unlock();
  }
@@@ -979,11 -979,11 +979,11 @@@ static struct sock *unix_create1(struc
        sk->sk_max_ack_backlog  = net->unx.sysctl_max_dgram_qlen;
        sk->sk_destruct         = unix_sock_destructor;
        u = unix_sk(sk);
 -      u->inflight = 0;
 +      u->listener = NULL;
 +      u->vertex = NULL;
        u->path.dentry = NULL;
        u->path.mnt = NULL;
        spin_lock_init(&u->lock);
 -      INIT_LIST_HEAD(&u->link);
        mutex_init(&u->iolock); /* single task reading lock */
        mutex_init(&u->bindlock); /* single task binding lock */
        init_waitqueue_head(&u->peer_wait);
@@@ -1597,7 -1597,6 +1597,7 @@@ restart
        newsk->sk_type          = sk->sk_type;
        init_peercred(newsk);
        newu = unix_sk(newsk);
 +      newu->listener = other;
        RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
        otheru = unix_sk(other);
  
@@@ -1693,8 -1692,8 +1693,8 @@@ static int unix_accept(struct socket *s
                       bool kern)
  {
        struct sock *sk = sock->sk;
 -      struct sock *tsk;
        struct sk_buff *skb;
 +      struct sock *tsk;
        int err;
  
        err = -EOPNOTSUPP;
  
        /* attach accepted sock to socket */
        unix_state_lock(tsk);
 +      unix_update_edges(unix_sk(tsk));
        newsock->state = SS_CONNECTED;
        unix_sock_inherit_flags(sock, newsock);
        sock_graft(tsk, newsock);
@@@ -1791,29 -1789,81 +1791,29 @@@ static inline bool too_many_unix_fds(st
  
  static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
  {
 -      int i;
 -
        if (too_many_unix_fds(current))
                return -ETOOMANYREFS;
  
 -      /* Need to duplicate file references for the sake of garbage
 -       * collection.  Otherwise a socket in the fps might become a
 -       * candidate for GC while the skb is not yet queued.
 -       */
 -      UNIXCB(skb).fp = scm_fp_dup(scm->fp);
 -      if (!UNIXCB(skb).fp)
 -              return -ENOMEM;
 +      UNIXCB(skb).fp = scm->fp;
 +      scm->fp = NULL;
  
 -      for (i = scm->fp->count - 1; i >= 0; i--)
 -              unix_inflight(scm->fp->user, scm->fp->fp[i]);
 +      if (unix_prepare_fpl(UNIXCB(skb).fp))
 +              return -ENOMEM;
  
        return 0;
  }
  
  static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
  {
 -      int i;
 -
        scm->fp = UNIXCB(skb).fp;
        UNIXCB(skb).fp = NULL;
  
 -      for (i = scm->fp->count - 1; i >= 0; i--)
 -              unix_notinflight(scm->fp->user, scm->fp->fp[i]);
 +      unix_destroy_fpl(scm->fp);
  }
  
  static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
  {
        scm->fp = scm_fp_dup(UNIXCB(skb).fp);
 -
 -      /*
 -       * Garbage collection of unix sockets starts by selecting a set of
 -       * candidate sockets which have reference only from being in flight
 -       * (total_refs == inflight_refs).  This condition is checked once during
 -       * the candidate collection phase, and candidates are marked as such, so
 -       * that non-candidates can later be ignored.  While inflight_refs is
 -       * protected by unix_gc_lock, total_refs (file count) is not, hence this
 -       * is an instantaneous decision.
 -       *
 -       * Once a candidate, however, the socket must not be reinstalled into a
 -       * file descriptor while the garbage collection is in progress.
 -       *
 -       * If the above conditions are met, then the directed graph of
 -       * candidates (*) does not change while unix_gc_lock is held.
 -       *
 -       * Any operations that changes the file count through file descriptors
 -       * (dup, close, sendmsg) does not change the graph since candidates are
 -       * not installed in fds.
 -       *
 -       * Dequeing a candidate via recvmsg would install it into an fd, but
 -       * that takes unix_gc_lock to decrement the inflight count, so it's
 -       * serialized with garbage collection.
 -       *
 -       * MSG_PEEK is special in that it does not change the inflight count,
 -       * yet does install the socket into an fd.  The following lock/unlock
 -       * pair is to ensure serialization with garbage collection.  It must be
 -       * done between incrementing the file count and installing the file into
 -       * an fd.
 -       *
 -       * If garbage collection starts after the barrier provided by the
 -       * lock/unlock, then it will see the elevated refcount and not mark this
 -       * as a candidate.  If a garbage collection is already in progress
 -       * before the file count was incremented, then the lock/unlock pair will
 -       * ensure that garbage collection is finished before progressing to
 -       * installing the fd.
 -       *
 -       * (*) A -> B where B is on the queue of A or B is on the queue of C
 -       * which is on the queue of listening socket A.
 -       */
 -      spin_lock(&unix_gc_lock);
 -      spin_unlock(&unix_gc_lock);
  }
  
  static void unix_destruct_scm(struct sk_buff *skb)
@@@ -1887,10 -1937,8 +1887,10 @@@ static void scm_stat_add(struct sock *s
        struct scm_fp_list *fp = UNIXCB(skb).fp;
        struct unix_sock *u = unix_sk(sk);
  
 -      if (unlikely(fp && fp->count))
 +      if (unlikely(fp && fp->count)) {
                atomic_add(fp->count, &u->scm_stat.nr_fds);
 +              unix_add_edges(fp, u);
 +      }
  }
  
  static void scm_stat_del(struct sock *sk, struct sk_buff *skb)
        struct scm_fp_list *fp = UNIXCB(skb).fp;
        struct unix_sock *u = unix_sk(sk);
  
 -      if (unlikely(fp && fp->count))
 +      if (unlikely(fp && fp->count)) {
                atomic_sub(fp->count, &u->scm_stat.nr_fds);
 +              unix_del_edges(fp);
 +      }
  }
  
  /*
@@@ -2224,7 -2270,7 +2224,7 @@@ static int unix_stream_sendmsg(struct s
                        goto out_err;
        }
  
-       if (sk->sk_shutdown & SEND_SHUTDOWN)
+       if (READ_ONCE(sk->sk_shutdown) & SEND_SHUTDOWN)
                goto pipe_err;
  
        while (sent < len) {
This page took 0.212685 seconds and 4 git commands to generate.