]> Git Repo - J-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorPaolo Abeni <[email protected]>
Thu, 21 Dec 2023 21:17:23 +0000 (22:17 +0100)
committerPaolo Abeni <[email protected]>
Thu, 21 Dec 2023 21:17:23 +0000 (22:17 +0100)
Cross-merge networking fixes after downstream PR.

Adjacent changes:

drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
  23c93c3b6275 ("bnxt_en: do not map packet buffers twice")
  6d1add95536b ("bnxt_en: Modify TX ring indexing logic.")

tools/testing/selftests/net/Makefile
  2258b666482d ("selftests: add vlan hw filter tests")
  a0bc96c0cd6e ("selftests: net: verify fq per-band packet limit")

Signed-off-by: Paolo Abeni <[email protected]>
26 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/intel/i40e/i40e_register.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/ice/ice_ethtool.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/mediatek/mtk_wed_wo.c
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/phy/phy_device.c
drivers/net/wireless/intel/iwlwifi/pcie/rx.c
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
drivers/net/wireless/mediatek/mt76/dma.c
include/linux/ieee80211.h
include/net/sock.h
net/core/dev.c
net/core/skbuff.c
net/mac80211/cfg.c
net/mac80211/mlme.c
net/mptcp/protocol.h
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/config.x86_64
tools/testing/selftests/net/Makefile
tools/testing/selftests/net/mptcp/mptcp_join.sh

diff --combined MAINTAINERS
index dda78b4ce70767e906937975f1d2874f3fbedd0f,7cef2d2ef8d708bd56113360e5696e3c17535302..fc044884c472e273bb3af6794d223ea5c1cbf09c
@@@ -2394,6 -2394,7 +2394,6 @@@ F:      drivers/memory/atmel
  F:    drivers/watchdog/sama5d4_wdt.c
  F:    include/soc/at91/
  X:    drivers/input/touchscreen/atmel_mxt_ts.c
 -X:    drivers/net/wireless/atmel/
  N:    at91
  N:    atmel
  
@@@ -3072,14 -3073,6 +3072,14 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/net/asix,ax88796c.yaml
  F:    drivers/net/ethernet/asix/ax88796c_*
  
 +ASIX PHY DRIVER [RUST]
 +M:    FUJITA Tomonori <[email protected]>
 +R:    Trevor Gross <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +S:    Maintained
 +F:    drivers/net/phy/ax88796b_rust.rs
 +
  ASPEED CRYPTO DRIVER
  M:    Neal Liu <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
@@@ -3305,6 -3298,13 +3305,6 @@@ T:     git git://github.com/ndyer/linux.gi
  F:    Documentation/devicetree/bindings/input/atmel,maxtouch.yaml
  F:    drivers/input/touchscreen/atmel_mxt_ts.c
  
 -ATMEL WIRELESS DRIVER
 -L:    [email protected]
 -S:    Orphan
 -W:    http://www.thekelleys.org.uk/atmel
 -W:    http://atmelwlandriver.sourceforge.net/
 -F:    drivers/net/wireless/atmel/atmel*
 -
  ATOMIC INFRASTRUCTURE
  M:    Will Deacon <[email protected]>
  M:    Peter Zijlstra <[email protected]>
@@@ -6050,10 -6050,8 +6050,8 @@@ M:     Mikulas Patocka <[email protected]
  M:    [email protected]
  L:    [email protected]
  S:    Maintained
- W:    http://sources.redhat.com/dm
  Q:    http://patchwork.kernel.org/project/dm-devel/list/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
- T:    quilt http://people.redhat.com/agk/patches/linux/editing/
  F:    Documentation/admin-guide/device-mapper/
  F:    drivers/md/Kconfig
  F:    drivers/md/Makefile
@@@ -7881,14 -7879,6 +7879,14 @@@ F:    include/uapi/linux/mdio.
  F:    include/uapi/linux/mii.h
  F:    net/core/of_net.c
  
 +ETHERNET PHY LIBRARY [RUST]
 +M:    FUJITA Tomonori <[email protected]>
 +R:    Trevor Gross <[email protected]>
 +L:    [email protected]
 +L:    [email protected]
 +S:    Maintained
 +F:    rust/kernel/net/phy.rs
 +
  EXEC & BINFMT API
  R:    Eric Biederman <[email protected]>
  R:    Kees Cook <[email protected]>
@@@ -9534,6 -9524,7 +9532,7 @@@ F:      drivers/bus/hisi_lpc.
  HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
  M:    Yisen Zhuang <[email protected]>
  M:    Salil Mehta <[email protected]>
+ M:    Jijie Shao <[email protected]>
  L:    [email protected]
  S:    Maintained
  W:    http://www.hisilicon.com
@@@ -9663,6 -9654,11 +9662,6 @@@ S:     Maintaine
  F:    Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml
  F:    drivers/iio/pressure/mprls0025pa.c
  
 -HOST AP DRIVER
 -L:    [email protected]
 -S:    Obsolete
 -F:    drivers/net/wireless/intersil/hostap/
 -
  HP BIOSCFG DRIVER
  M:    Jorge Lopez <[email protected]>
  L:    [email protected]
@@@ -10966,7 -10962,6 +10965,7 @@@ F:   drivers/net/wireless/intel/iwlegacy
  
  INTEL WIRELESS WIFI LINK (iwlwifi)
  M:    Gregory Greenman <[email protected]>
 +M:    Miri Korenblit <[email protected]>
  L:    [email protected]
  S:    Supported
  W:    https://wireless.wiki.kernel.org/en/users/drivers/iwlwifi
@@@ -11973,14 -11968,6 +11972,14 @@@ S: Maintaine
  F:    arch/mips/lantiq
  F:    drivers/soc/lantiq
  
 +LANTIQ PEF2256 DRIVER
 +M:    Herve Codina <[email protected]>
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/net/lantiq,pef2256.yaml
 +F:    drivers/net/wan/framer/pef2256/
 +F:    drivers/pinctrl/pinctrl-pef2256.c
 +F:    include/linux/framer/pef2256.h
 +
  LASI 53c700 driver for PARISC
  M:    "James E.J. Bottomley" <[email protected]>
  L:    [email protected]
@@@ -12201,6 -12188,8 +12200,8 @@@ LINUX FOR POWERPC (32-BIT AND 64-BIT
  M:    Michael Ellerman <[email protected]>
  R:    Nicholas Piggin <[email protected]>
  R:    Christophe Leroy <[email protected]>
+ R:    Aneesh Kumar K.V <[email protected]>
+ R:    Naveen N. Rao <[email protected]>
  L:    [email protected]
  S:    Supported
  W:    https://github.com/linuxppc/wiki/wiki
@@@ -12743,8 -12732,7 +12744,8 @@@ MARVELL 88E6XXX ETHERNET SWITCH FABRIC 
  M:    Andrew Lunn <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/devicetree/bindings/net/dsa/marvell.txt
 +F:    Documentation/devicetree/bindings/net/dsa/marvell,mv88e6060.yaml
 +F:    Documentation/devicetree/bindings/net/dsa/marvell,mv88e6xxx.yaml
  F:    Documentation/networking/devlink/mv88e6xxx.rst
  F:    drivers/net/dsa/mv88e6xxx/
  F:    include/linux/dsa/mv88e6xxx.h
@@@ -14946,7 -14934,6 +14947,7 @@@ Q:   https://patchwork.kernel.org/project
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
  F:    Documentation/devicetree/bindings/net/
 +F:    Documentation/networking/net_cachelines/net_device.rst
  F:    drivers/connector/
  F:    drivers/net/
  F:    include/dt-bindings/net/
@@@ -15002,7 -14989,6 +15003,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  F:    Documentation/core-api/netlink.rst
  F:    Documentation/netlink/
  F:    Documentation/networking/
 +F:    Documentation/networking/net_cachelines/
  F:    Documentation/process/maintainer-netdev.rst
  F:    Documentation/userspace-api/netlink/
  F:    include/linux/in.h
@@@ -15099,7 -15085,7 +15100,7 @@@ W:   https://github.com/multipath-tcp/mpt
  B:    https://github.com/multipath-tcp/mptcp_net-next/issues
  T:    git https://github.com/multipath-tcp/mptcp_net-next.git export-net
  T:    git https://github.com/multipath-tcp/mptcp_net-next.git export
 -F:    Documentation/netlink/specs/mptcp.yaml
 +F:    Documentation/netlink/specs/mptcp_pm.yaml
  F:    Documentation/networking/mptcp-sysctl.rst
  F:    include/net/mptcp.h
  F:    include/trace/events/mptcp.h
@@@ -15112,7 -15098,6 +15113,7 @@@ NETWORKING [TCP
  M:    Eric Dumazet <[email protected]>
  L:    [email protected]
  S:    Maintained
 +F:    Documentation/networking/net_cachelines/tcp_sock.rst
  F:    include/linux/tcp.h
  F:    include/net/tcp.h
  F:    include/trace/events/tcp.h
@@@ -16222,6 -16207,13 +16223,6 @@@ T:  git git://git.kernel.org/pub/scm/lin
  F:    Documentation/filesystems/orangefs.rst
  F:    fs/orangefs/
  
 -ORINOCO DRIVER
 -L:    [email protected]
 -S:    Orphan
 -W:    https://wireless.wiki.kernel.org/en/users/Drivers/orinoco
 -W:    http://www.nongnu.org/orinoco/
 -F:    drivers/net/wireless/intersil/orinoco/
 -
  OV2659 OMNIVISION SENSOR DRIVER
  M:    "Lad, Prabhakar" <[email protected]>
  L:    [email protected]
@@@ -18114,6 -18106,11 +18115,6 @@@ F:  drivers/ras
  F:    include/linux/ras.h
  F:    include/ras/ras_event.h
  
 -RAYLINK/WEBGEAR 802.11 WIRELESS LAN DRIVER
 -L:    [email protected]
 -S:    Orphan
 -F:    drivers/net/wireless/legacy/ray*
 -
  RC-CORE / LIRC FRAMEWORK
  M:    Sean Young <[email protected]>
  L:    [email protected]
@@@ -22593,6 -22590,11 +22594,6 @@@ F:  drivers/usb/gadget/function/*uvc
  F:    drivers/usb/gadget/legacy/webcam.c
  F:    include/uapi/linux/usb/g_uvc.h
  
 -USB WIRELESS RNDIS DRIVER (rndis_wlan)
 -L:    [email protected]
 -S:    Orphan
 -F:    drivers/net/wireless/legacy/rndis_wlan.c
 -
  USB XHCI DRIVER
  M:    Mathias Nyman <[email protected]>
  L:    [email protected]
@@@ -22600,6 -22602,12 +22601,6 @@@ S:  Supporte
  F:    drivers/usb/host/pci-quirks*
  F:    drivers/usb/host/xhci*
  
 -USB ZD1201 DRIVER
 -L:    [email protected]
 -S:    Orphan
 -W:    http://linux-lc100020.sourceforge.net
 -F:    drivers/net/wireless/zydas/zd1201.*
 -
  USER DATAGRAM PROTOCOL (UDP)
  M:    Willem de Bruijn <[email protected]>
  S:    Maintained
@@@ -23401,6 -23409,11 +23402,6 @@@ M:  Miloslav Trmac <[email protected]
  S:    Maintained
  F:    drivers/input/misc/wistron_btns.c
  
 -WL3501 WIRELESS PCMCIA CARD DRIVER
 -L:    [email protected]
 -S:    Orphan
 -F:    drivers/net/wireless/legacy/wl3501*
 -
  WMI BINARY MOF DRIVER
  M:    Armin Wolf <[email protected]>
  R:    Thomas Weißschuh <[email protected]>
index 037624f17aea26187ada7eb209a470da05bc50fe,8cb9a99154aad9e52119aa88157e78475c37bb91..c2b25fc623ecc08410e8fc45cb391d5a555cc1d9
@@@ -42,24 -42,23 +42,23 @@@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(stru
  
        /* fill up the first buffer */
        prod = txr->tx_prod;
 -      tx_buf = &txr->tx_buf_ring[prod];
 +      tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
        tx_buf->nr_frags = num_frags;
        if (xdp)
                tx_buf->page = virt_to_head_page(xdp->data);
  
 -      txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 +      txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
        flags = (len << TX_BD_LEN_SHIFT) |
                ((num_frags + 1) << TX_BD_FLAGS_BD_CNT_SHIFT) |
                bnxt_lhint_arr[len >> 9];
        txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
 -      txbd->tx_bd_opaque = prod;
 +      txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags);
        txbd->tx_bd_haddr = cpu_to_le64(mapping);
  
        /* now let us fill up the frags into the next buffers */
        for (i = 0; i < num_frags ; i++) {
                skb_frag_t *frag = &sinfo->frags[i];
                struct bnxt_sw_tx_bd *frag_tx_buf;
-               struct pci_dev *pdev = bp->pdev;
                dma_addr_t frag_mapping;
                int frag_len;
  
                WRITE_ONCE(txr->tx_prod, prod);
  
                /* first fill up the first buffer */
 -              frag_tx_buf = &txr->tx_buf_ring[prod];
 +              frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)];
                frag_tx_buf->page = skb_frag_page(frag);
  
 -              txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
 +              txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)];
  
                frag_len = skb_frag_size(frag);
-               frag_mapping = skb_frag_dma_map(&pdev->dev, frag, 0,
-                                               frag_len, DMA_TO_DEVICE);
-               if (unlikely(dma_mapping_error(&pdev->dev, frag_mapping)))
-                       return NULL;
-               dma_unmap_addr_set(frag_tx_buf, mapping, frag_mapping);
                flags = frag_len << TX_BD_LEN_SHIFT;
                txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
+               frag_mapping = page_pool_get_dma_addr(skb_frag_page(frag)) +
+                              skb_frag_off(frag);
                txbd->tx_bd_haddr = cpu_to_le64(frag_mapping);
  
                len = frag_len;
@@@ -127,20 -120,20 +120,20 @@@ static void __bnxt_xmit_xdp_redirect(st
  
  void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
  {
 -      struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
 +      struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0];
        struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
 +      u16 tx_hw_cons = txr->tx_hw_cons;
        bool rx_doorbell_needed = false;
 -      int nr_pkts = bnapi->tx_pkts;
        struct bnxt_sw_tx_bd *tx_buf;
        u16 tx_cons = txr->tx_cons;
        u16 last_tx_cons = tx_cons;
 -      int i, j, frags;
 +      int j, frags;
  
        if (!budget)
                return;
  
 -      for (i = 0; i < nr_pkts; i++) {
 -              tx_buf = &txr->tx_buf_ring[tx_cons];
 +      while (RING_TX(bp, tx_cons) != tx_hw_cons) {
 +              tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
  
                if (tx_buf->action == XDP_REDIRECT) {
                        struct pci_dev *pdev = bp->pdev;
                        frags = tx_buf->nr_frags;
                        for (j = 0; j < frags; j++) {
                                tx_cons = NEXT_TX(tx_cons);
 -                              tx_buf = &txr->tx_buf_ring[tx_cons];
 +                              tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)];
                                page_pool_recycle_direct(rxr->page_pool, tx_buf->page);
                        }
                } else {
 -                      bnxt_sched_reset_txr(bp, txr, i);
 +                      bnxt_sched_reset_txr(bp, txr, tx_cons);
                        return;
                }
                tx_cons = NEXT_TX(tx_cons);
        }
  
 -      bnapi->tx_pkts = 0;
 +      bnapi->events &= ~BNXT_TX_CMP_EVENT;
        WRITE_ONCE(txr->tx_cons, tx_cons);
        if (rx_doorbell_needed) {
 -              tx_buf = &txr->tx_buf_ring[last_tx_cons];
 +              tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)];
                bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
  
        }
@@@ -249,7 -242,7 +242,7 @@@ bool bnxt_rx_xdp(struct bnxt *bp, struc
        pdev = bp->pdev;
        offset = bp->rx_offset;
  
 -      txr = rxr->bnapi->tx_ring;
 +      txr = rxr->bnapi->tx_ring[0];
        /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
        orig_data = xdp.data;
  
        case XDP_TX:
                rx_buf = &rxr->rx_buf_ring[cons];
                mapping = rx_buf->mapping - bp->rx_dma_offset;
 -              *event = 0;
 +              *event &= BNXT_TX_CMP_EVENT;
  
                if (unlikely(xdp_buff_has_frags(&xdp))) {
                        struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp);
@@@ -398,7 -391,7 +391,7 @@@ int bnxt_xdp_xmit(struct net_device *de
  static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
  {
        struct net_device *dev = bp->dev;
 -      int tx_xdp = 0, rc, tc;
 +      int tx_xdp = 0, tx_cp, rc, tc;
        struct bpf_prog *old;
  
        if (prog && !prog->aux->xdp_has_frags &&
        }
        bp->tx_nr_rings_xdp = tx_xdp;
        bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
 -      bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
 +      tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
 +      bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings);
        bnxt_set_tpa_flags(bp);
        bnxt_set_ring_params(bp);
  
index 2e1eaca44343889cff88706e3e8ee16feda6166e,f6671ac797353a10c13f4bd0c8a935fc5c3d1948..14ab642cafdb26f77e98c4c5e7962088103e2cad
  #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
  #define I40E_GLGEN_MSCA_OPCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_OPCODE_SHIFT)
  #define I40E_GLGEN_MSCA_STCODE_SHIFT 28
- #define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_STCODE_SHIFT)
+ #define I40E_GLGEN_MSCA_STCODE_MASK(_i) I40E_MASK(_i, I40E_GLGEN_MSCA_STCODE_SHIFT)
  #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
  #define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
  #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
  #define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
  #define I40E_PFPM_WUFC_MAG_SHIFT 1
  #define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
 -#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
 -#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
 -#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
 -#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
 -#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
 -#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
 -#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
 -#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
 -#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
 -#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
  #define I40E_VFQF_HLUT_MAX_INDEX 15
  
  
  #define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
  #define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
  #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
 +#define I40E_FDEVICT_PCTYPE_DEFAULT 0xc03
  /* Redefined for X722 family */
  #define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
  #endif /* _I40E_REGISTER_H_ */
index de69c2e22448ccc55b3f89f11fb15b2ad02db9dd,f95bc2a4a83883973151ab51927fd25b1e7bfbfd..d9031499697e6aa8b27356654ce89ffb012d27a5
@@@ -37,11 -37,11 +37,11 @@@ typedef void (*I40E_ADMINQ_CALLBACK)(st
  #define I40E_QTX_CTL_VM_QUEUE 0x1
  #define I40E_QTX_CTL_PF_QUEUE 0x2
  
- #define I40E_MDIO_CLAUSE22_STCODE_MASK                I40E_GLGEN_MSCA_STCODE_MASK
+ #define I40E_MDIO_CLAUSE22_STCODE_MASK                I40E_GLGEN_MSCA_STCODE_MASK(1)
  #define I40E_MDIO_CLAUSE22_OPCODE_WRITE_MASK  I40E_GLGEN_MSCA_OPCODE_MASK(1)
  #define I40E_MDIO_CLAUSE22_OPCODE_READ_MASK   I40E_GLGEN_MSCA_OPCODE_MASK(2)
  
- #define I40E_MDIO_CLAUSE45_STCODE_MASK                I40E_GLGEN_MSCA_STCODE_MASK
+ #define I40E_MDIO_CLAUSE45_STCODE_MASK                I40E_GLGEN_MSCA_STCODE_MASK(0)
  #define I40E_MDIO_CLAUSE45_OPCODE_ADDRESS_MASK        I40E_GLGEN_MSCA_OPCODE_MASK(0)
  #define I40E_MDIO_CLAUSE45_OPCODE_WRITE_MASK  I40E_GLGEN_MSCA_OPCODE_MASK(1)
  #define I40E_MDIO_CLAUSE45_OPCODE_READ_MASK   I40E_GLGEN_MSCA_OPCODE_MASK(3)
@@@ -64,7 -64,9 +64,7 @@@
  enum i40e_mac_type {
        I40E_MAC_UNKNOWN = 0,
        I40E_MAC_XL710,
 -      I40E_MAC_VF,
        I40E_MAC_X722,
 -      I40E_MAC_X722_VF,
        I40E_MAC_GENERIC,
  };
  
@@@ -270,7 -272,9 +270,7 @@@ struct i40e_mac_info 
        enum i40e_mac_type type;
        u8 addr[ETH_ALEN];
        u8 perm_addr[ETH_ALEN];
 -      u8 san_addr[ETH_ALEN];
        u8 port_addr[ETH_ALEN];
 -      u16 max_fcoeq;
  };
  
  enum i40e_aq_resources_ids {
@@@ -478,36 -482,6 +478,36 @@@ struct i40e_dcbx_config 
        struct i40e_dcb_app_priority_table app[I40E_DCBX_MAX_APPS];
  };
  
 +enum i40e_hw_flags {
 +      I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE,
 +      I40E_HW_CAP_802_1AD,
 +      I40E_HW_CAP_AQ_PHY_ACCESS,
 +      I40E_HW_CAP_NVM_READ_REQUIRES_LOCK,
 +      I40E_HW_CAP_FW_LLDP_STOPPABLE,
 +      I40E_HW_CAP_FW_LLDP_PERSISTENT,
 +      I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED,
 +      I40E_HW_CAP_X722_FEC_REQUEST,
 +      I40E_HW_CAP_RSS_AQ,
 +      I40E_HW_CAP_128_QP_RSS,
 +      I40E_HW_CAP_ATR_EVICT,
 +      I40E_HW_CAP_WB_ON_ITR,
 +      I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE,
 +      I40E_HW_CAP_NO_PCI_LINK_CHECK,
 +      I40E_HW_CAP_100M_SGMII,
 +      I40E_HW_CAP_NO_DCB_SUPPORT,
 +      I40E_HW_CAP_USE_SET_LLDP_MIB,
 +      I40E_HW_CAP_GENEVE_OFFLOAD,
 +      I40E_HW_CAP_PTP_L4,
 +      I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE,
 +      I40E_HW_CAP_CRT_RETIMER,
 +      I40E_HW_CAP_OUTER_UDP_CSUM,
 +      I40E_HW_CAP_PHY_CONTROLS_LEDS,
 +      I40E_HW_CAP_STOP_FW_LLDP,
 +      I40E_HW_CAP_PORT_ID_VALID,
 +      I40E_HW_CAP_RESTART_AUTONEG,
 +      I40E_HW_CAPS_NBITS,
 +};
 +
  /* Port hardware description */
  struct i40e_hw {
        u8 __iomem *hw_addr;
        struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
        struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
  
 -#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
 -#define I40E_HW_FLAG_802_1AD_CAPABLE        BIT_ULL(1)
 -#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE  BIT_ULL(2)
 -#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
 -#define I40E_HW_FLAG_FW_LLDP_STOPPABLE      BIT_ULL(4)
 -#define I40E_HW_FLAG_FW_LLDP_PERSISTENT     BIT_ULL(5)
 -#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
 -#define I40E_HW_FLAG_DROP_MODE              BIT_ULL(7)
 -#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
 -      u64 flags;
 +      DECLARE_BITMAP(caps, I40E_HW_CAPS_NBITS);
  
        /* Used in set switch config AQ command */
        u16 switch_tag;
        char err_str[16];
  };
  
 -static inline bool i40e_is_vf(struct i40e_hw *hw)
 -{
 -      return (hw->mac.type == I40E_MAC_VF ||
 -              hw->mac.type == I40E_MAC_X722_VF);
 -}
 -
  struct i40e_driver_version {
        u8 major_version;
        u8 minor_version;
index 47ab37ba62d299fface7ba4dc6f3920399965910,bde9bc74f928786d602bc7278dbab96c972f4472..2244d41fd93379be1a921b6955d2ad5b83e241ff
@@@ -1142,7 -1142,8 +1142,7 @@@ __ice_get_strings(struct net_device *ne
        switch (stringset) {
        case ETH_SS_STATS:
                for (i = 0; i < ICE_VSI_STATS_LEN; i++)
 -                      ethtool_sprintf(&p, "%s",
 -                                      ice_gstrings_vsi_stats[i].stat_string);
 +                      ethtool_puts(&p, ice_gstrings_vsi_stats[i].stat_string);
  
                if (ice_is_port_repr_netdev(netdev))
                        return;
                        return;
  
                for (i = 0; i < ICE_PF_STATS_LEN; i++)
 -                      ethtool_sprintf(&p, "%s",
 -                                      ice_gstrings_pf_stats[i].stat_string);
 +                      ethtool_puts(&p, ice_gstrings_pf_stats[i].stat_string);
  
                for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
                        ethtool_sprintf(&p, "tx_priority_%u_xon.nic", i);
                break;
        case ETH_SS_PRIV_FLAGS:
                for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
 -                      ethtool_sprintf(&p, "%s",
 -                                      ice_gstrings_priv_flags[i].name);
 +                      ethtool_puts(&p, ice_gstrings_priv_flags[i].name);
                break;
        default:
                break;
@@@ -1847,14 -1850,14 +1847,14 @@@ ice_phy_type_to_ethtool(struct net_devi
        linkmode_zero(ks->link_modes.supported);
        linkmode_zero(ks->link_modes.advertising);
  
-       for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+       for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
                if (phy_types_low & BIT_ULL(i))
                        ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
                                             req_speeds, advert_phy_type_lo,
                                             i);
        }
  
-       for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+       for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
                if (phy_types_high & BIT_ULL(i))
                        ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
                                             req_speeds, advert_phy_type_hi,
@@@ -2502,15 -2505,27 +2502,15 @@@ static u32 ice_parse_hdrs(struct ethtoo
        return hdrs;
  }
  
 -#define ICE_FLOW_HASH_FLD_IPV4_SA     BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA)
 -#define ICE_FLOW_HASH_FLD_IPV6_SA     BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA)
 -#define ICE_FLOW_HASH_FLD_IPV4_DA     BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)
 -#define ICE_FLOW_HASH_FLD_IPV6_DA     BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)
 -#define ICE_FLOW_HASH_FLD_TCP_SRC_PORT        BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT)
 -#define ICE_FLOW_HASH_FLD_TCP_DST_PORT        BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)
 -#define ICE_FLOW_HASH_FLD_UDP_SRC_PORT        BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT)
 -#define ICE_FLOW_HASH_FLD_UDP_DST_PORT        BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)
 -#define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT       \
 -      BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT)
 -#define ICE_FLOW_HASH_FLD_SCTP_DST_PORT       \
 -      BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)
 -
  /**
   * ice_parse_hash_flds - parses hash fields from RSS hash input
   * @nfc: ethtool rxnfc command
 + * @symm: true if Symmetric Topelitz is set
   *
   * This function parses the rxnfc command and returns intended
   * hash fields for RSS configuration
   */
 -static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc)
 +static u64 ice_parse_hash_flds(struct ethtool_rxnfc *nfc, bool symm)
  {
        u64 hfld = ICE_HASH_INVALID;
  
@@@ -2579,11 -2594,9 +2579,11 @@@ static in
  ice_set_rss_hash_opt(struct ice_vsi *vsi, struct ethtool_rxnfc *nfc)
  {
        struct ice_pf *pf = vsi->back;
 +      struct ice_rss_hash_cfg cfg;
        struct device *dev;
        u64 hashed_flds;
        int status;
 +      bool symm;
        u32 hdrs;
  
        dev = ice_pf_to_dev(pf);
                return -EINVAL;
        }
  
 -      hashed_flds = ice_parse_hash_flds(nfc);
 +      symm = !!(vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ);
 +      hashed_flds = ice_parse_hash_flds(nfc, symm);
        if (hashed_flds == ICE_HASH_INVALID) {
                dev_dbg(dev, "Invalid hash fields, vsi num = %d\n",
                        vsi->vsi_num);
                return -EINVAL;
        }
  
 -      status = ice_add_rss_cfg(&pf->hw, vsi->idx, hashed_flds, hdrs);
 +      cfg.hash_flds = hashed_flds;
 +      cfg.addl_hdrs = hdrs;
 +      cfg.hdr_type = ICE_RSS_ANY_HEADERS;
 +      cfg.symm = symm;
 +
 +      status = ice_add_rss_cfg(&pf->hw, vsi, &cfg);
        if (status) {
                dev_dbg(dev, "ice_add_rss_cfg failed, vsi num = %d, error = %d\n",
                        vsi->vsi_num, status);
@@@ -2634,7 -2641,6 +2634,7 @@@ ice_get_rss_hash_opt(struct ice_vsi *vs
        struct ice_pf *pf = vsi->back;
        struct device *dev;
        u64 hash_flds;
 +      bool symm;
        u32 hdrs;
  
        dev = ice_pf_to_dev(pf);
                return;
        }
  
 -      hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs);
 +      hash_flds = ice_get_rss_cfg(&pf->hw, vsi->idx, hdrs, &symm);
        if (hash_flds == ICE_HASH_INVALID) {
                dev_dbg(dev, "No hash fields found for the given header type, vsi num = %d\n",
                        vsi->vsi_num);
@@@ -3192,18 -3198,11 +3192,18 @@@ static u32 ice_get_rxfh_indir_size(stru
        return np->vsi->rss_table_size;
  }
  
 +/**
 + * ice_get_rxfh - get the Rx flow hash indirection table
 + * @netdev: network interface device structure
 + * @rxfh: pointer to param struct (indir, key, hfunc)
 + *
 + * Reads the indirection table directly from the hardware.
 + */
  static int
 -ice_get_rxfh_context(struct net_device *netdev, u32 *indir,
 -                   u8 *key, u8 *hfunc, u32 rss_context)
 +ice_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
  {
        struct ice_netdev_priv *np = netdev_priv(netdev);
 +      u32 rss_context = rxfh->rss_context;
        struct ice_vsi *vsi = np->vsi;
        struct ice_pf *pf = vsi->back;
        u16 qcount, offset;
                vsi = vsi->tc_map_vsi[rss_context];
        }
  
 -      if (hfunc)
 -              *hfunc = ETH_RSS_HASH_TOP;
 +      rxfh->hfunc = ETH_RSS_HASH_TOP;
 +      if (vsi->rss_hfunc == ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ)
 +              rxfh->input_xfrm |= RXH_XFRM_SYM_XOR;
  
 -      if (!indir)
 +      if (!rxfh->indir)
                return 0;
  
        lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
        if (!lut)
                return -ENOMEM;
  
 -      err = ice_get_rss_key(vsi, key);
 +      err = ice_get_rss_key(vsi, rxfh->key);
        if (err)
                goto out;
  
  
        if (ice_is_adq_active(pf)) {
                for (i = 0; i < vsi->rss_table_size; i++)
 -                      indir[i] = offset + lut[i] % qcount;
 +                      rxfh->indir[i] = offset + lut[i] % qcount;
                goto out;
        }
  
        for (i = 0; i < vsi->rss_table_size; i++)
 -              indir[i] = lut[i];
 +              rxfh->indir[i] = lut[i];
  
  out:
        kfree(lut);
        return err;
  }
  
 -/**
 - * ice_get_rxfh - get the Rx flow hash indirection table
 - * @netdev: network interface device structure
 - * @indir: indirection table
 - * @key: hash key
 - * @hfunc: hash function
 - *
 - * Reads the indirection table directly from the hardware.
 - */
 -static int
 -ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
 -{
 -      return ice_get_rxfh_context(netdev, indir, key, hfunc, 0);
 -}
 -
  /**
   * ice_set_rxfh - set the Rx flow hash indirection table
   * @netdev: network interface device structure
 - * @indir: indirection table
 - * @key: hash key
 - * @hfunc: hash function
 + * @rxfh: pointer to param struct (indir, key, hfunc)
 + * @extack: extended ACK from the Netlink message
   *
   * Returns -EINVAL if the table specifies an invalid queue ID, otherwise
   * returns 0 after programming the table.
   */
  static int
 -ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
 -           const u8 hfunc)
 +ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh,
 +           struct netlink_ext_ack *extack)
  {
        struct ice_netdev_priv *np = netdev_priv(netdev);
 +      u8 hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
        struct ice_vsi *vsi = np->vsi;
        struct ice_pf *pf = vsi->back;
        struct device *dev;
        int err;
  
        dev = ice_pf_to_dev(pf);
 -      if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
 +      if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
 +          rxfh->hfunc != ETH_RSS_HASH_TOP)
 +              return -EOPNOTSUPP;
 +
 +      if (rxfh->rss_context)
                return -EOPNOTSUPP;
  
        if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
                return -EOPNOTSUPP;
        }
  
 -      if (key) {
 +      /* Update the VSI's hash function */
 +      if (rxfh->input_xfrm & RXH_XFRM_SYM_XOR)
 +              hfunc = ICE_AQ_VSI_Q_OPT_RSS_HASH_SYM_TPLZ;
 +
 +      err = ice_set_rss_hfunc(vsi, hfunc);
 +      if (err)
 +              return err;
 +
 +      if (rxfh->key) {
                if (!vsi->rss_hkey_user) {
                        vsi->rss_hkey_user =
                                devm_kzalloc(dev, ICE_VSIQF_HKEY_ARRAY_SIZE,
                        if (!vsi->rss_hkey_user)
                                return -ENOMEM;
                }
 -              memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
 +              memcpy(vsi->rss_hkey_user, rxfh->key,
 +                     ICE_VSIQF_HKEY_ARRAY_SIZE);
  
                err = ice_set_rss_key(vsi, vsi->rss_hkey_user);
                if (err)
        }
  
        /* Each 32 bits pointed by 'indir' is stored with a lut entry */
 -      if (indir) {
 +      if (rxfh->indir) {
                int i;
  
                for (i = 0; i < vsi->rss_table_size; i++)
 -                      vsi->rss_lut_user[i] = (u8)(indir[i]);
 +                      vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]);
        } else {
                ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
                                 vsi->rss_size);
@@@ -4220,11 -4220,9 +4220,11 @@@ ice_get_module_eeprom(struct net_devic
  }
  
  static const struct ethtool_ops ice_ethtool_ops = {
 +      .cap_rss_ctx_supported  = true,
        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
                                     ETHTOOL_COALESCE_USE_ADAPTIVE |
                                     ETHTOOL_COALESCE_RX_USECS_HIGH,
 +      .cap_rss_sym_xor_supported = true,
        .get_link_ksettings     = ice_get_link_ksettings,
        .set_link_ksettings     = ice_set_link_ksettings,
        .get_drvinfo            = ice_get_drvinfo,
        .set_pauseparam         = ice_set_pauseparam,
        .get_rxfh_key_size      = ice_get_rxfh_key_size,
        .get_rxfh_indir_size    = ice_get_rxfh_indir_size,
 -      .get_rxfh_context       = ice_get_rxfh_context,
        .get_rxfh               = ice_get_rxfh,
        .set_rxfh               = ice_set_rxfh,
        .get_channels           = ice_get_channels,
index 711e4fb62cb7aeb09a1e1a7b9857d5a873f84929,1bad6e17f9befdcafe804c064b820ffeac22994a..5af45932f460b2d2ff92714e49598420c5cd28a1
@@@ -212,18 -212,11 +212,18 @@@ static void ice_vsi_set_num_qs(struct i
                                                 vsi->alloc_txq));
                break;
        case ICE_VSI_SWITCHDEV_CTRL:
 -              /* The number of queues for ctrl VSI is equal to number of VFs.
 +              /* The number of queues for ctrl VSI is equal to number of PRs
                 * Each ring is associated to the corresponding VF_PR netdev.
 +               * Tx and Rx rings are always equal
                 */
 -              vsi->alloc_txq = ice_get_num_vfs(pf);
 -              vsi->alloc_rxq = vsi->alloc_txq;
 +              if (vsi->req_txq && vsi->req_rxq) {
 +                      vsi->alloc_txq = vsi->req_txq;
 +                      vsi->alloc_rxq = vsi->req_rxq;
 +              } else {
 +                      vsi->alloc_txq = 1;
 +                      vsi->alloc_rxq = 1;
 +              }
 +
                vsi->num_q_vectors = 1;
                break;
        case ICE_VSI_VF:
@@@ -526,14 -519,16 +526,14 @@@ static irqreturn_t ice_eswitch_msix_cle
  {
        struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
        struct ice_pf *pf = q_vector->vsi->back;
 -      struct ice_vf *vf;
 -      unsigned int bkt;
 +      struct ice_repr *repr;
 +      unsigned long id;
  
        if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
                return IRQ_HANDLED;
  
 -      rcu_read_lock();
 -      ice_for_each_vf_rcu(pf, bkt, vf)
 -              napi_schedule(&vf->repr->q_vector->napi);
 -      rcu_read_unlock();
 +      xa_for_each(&pf->eswitch.reprs, id, repr)
 +              napi_schedule(&repr->q_vector->napi);
  
        return IRQ_HANDLED;
  }
@@@ -1191,10 -1186,12 +1191,10 @@@ static void ice_set_rss_vsi_ctx(struct 
        case ICE_VSI_PF:
                /* PF VSI will inherit RSS instance of PF */
                lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
 -              hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
                break;
        case ICE_VSI_VF:
                /* VF VSI will gets a small RSS table which is a VSI LUT type */
                lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
 -              hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
                break;
        default:
                dev_dbg(dev, "Unsupported VSI type %s\n",
                return;
        }
  
 -      ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
 -                              ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
 -                              (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
 +      hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
 +      vsi->rss_hfunc = hash_type;
 +
 +      ctxt->info.q_opt_rss =
 +              FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_LUT_M, lut_type) |
 +              FIELD_PREP(ICE_AQ_VSI_Q_OPT_RSS_HASH_M, hash_type);
  }
  
  static void
@@@ -1606,44 -1600,12 +1606,44 @@@ static void ice_vsi_set_vf_rss_flow_fld
                return;
        }
  
 -      status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
 +      status = ice_add_avf_rss_cfg(&pf->hw, vsi, ICE_DEFAULT_RSS_HENA);
        if (status)
                dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
                        vsi->vsi_num, status);
  }
  
 +static const struct ice_rss_hash_cfg default_rss_cfgs[] = {
 +      /* configure RSS for IPv4 with input set IP src/dst */
 +      {ICE_FLOW_SEG_HDR_IPV4, ICE_FLOW_HASH_IPV4, ICE_RSS_ANY_HEADERS, false},
 +      /* configure RSS for IPv6 with input set IPv6 src/dst */
 +      {ICE_FLOW_SEG_HDR_IPV6, ICE_FLOW_HASH_IPV6, ICE_RSS_ANY_HEADERS, false},
 +      /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
 +      {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4,
 +                              ICE_HASH_TCP_IPV4,  ICE_RSS_ANY_HEADERS, false},
 +      /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
 +      {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4,
 +                              ICE_HASH_UDP_IPV4,  ICE_RSS_ANY_HEADERS, false},
 +      /* configure RSS for sctp4 with input set IP src/dst - only support
 +       * RSS on SCTPv4 on outer headers (non-tunneled)
 +       */
 +      {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4,
 +              ICE_HASH_SCTP_IPV4, ICE_RSS_OUTER_HEADERS, false},
 +      /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
 +      {ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6,
 +                              ICE_HASH_TCP_IPV6,  ICE_RSS_ANY_HEADERS, false},
 +      /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
 +      {ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6,
 +                              ICE_HASH_UDP_IPV6,  ICE_RSS_ANY_HEADERS, false},
 +      /* configure RSS for sctp6 with input set IPv6 src/dst - only support
 +       * RSS on SCTPv6 on outer headers (non-tunneled)
 +       */
 +      {ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6,
 +              ICE_HASH_SCTP_IPV6, ICE_RSS_OUTER_HEADERS, false},
 +      /* configure RSS for IPSEC ESP SPI with input set MAC_IPV4_SPI */
 +      {ICE_FLOW_SEG_HDR_ESP,
 +              ICE_FLOW_HASH_ESP_SPI, ICE_RSS_OUTER_HEADERS, false},
 +};
 +
  /**
   * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
   * @vsi: VSI to be configured
   */
  static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
  {
 -      u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
 +      u16 vsi_num = vsi->vsi_num;
        struct ice_pf *pf = vsi->back;
        struct ice_hw *hw = &pf->hw;
        struct device *dev;
        int status;
 +      u32 i;
  
        dev = ice_pf_to_dev(pf);
        if (ice_is_safe_mode(pf)) {
                        vsi_num);
                return;
        }
 -      /* configure RSS for IPv4 with input set IP src/dst */
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
 -                               ICE_FLOW_SEG_HDR_IPV4);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 -
 -      /* configure RSS for IPv6 with input set IPv6 src/dst */
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
 -                               ICE_FLOW_SEG_HDR_IPV6);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 -
 -      /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
 -                               ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 +      for (i = 0; i < ARRAY_SIZE(default_rss_cfgs); i++) {
 +              const struct ice_rss_hash_cfg *cfg = &default_rss_cfgs[i];
  
 -      /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
 -                               ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 -
 -      /* configure RSS for sctp4 with input set IP src/dst */
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
 -                               ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 -
 -      /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
 -                               ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 -
 -      /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
 -                               ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 -
 -      /* configure RSS for sctp6 with input set IPv6 src/dst */
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
 -                               ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 -
 -      status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
 -                               ICE_FLOW_SEG_HDR_ESP);
 -      if (status)
 -              dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
 -                      vsi_num, status);
 +              status = ice_add_rss_cfg(hw, vsi, cfg);
 +              if (status)
 +                      dev_dbg(dev, "ice_add_rss_cfg failed, addl_hdrs = %x, hash_flds = %llx, hdr_type = %d, symm = %d\n",
 +                              cfg->addl_hdrs, cfg->hash_flds,
 +                              cfg->hdr_type, cfg->symm);
 +      }
  }
  
  /**
@@@ -2358,6 -2371,9 +2358,9 @@@ static int ice_vsi_cfg_tc_lan(struct ic
                } else {
                        max_txqs[i] = vsi->alloc_txq;
                }
+               if (vsi->type == ICE_VSI_PF)
+                       max_txqs[i] += vsi->num_xdp_txq;
        }
  
        dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
@@@ -2434,10 -2450,6 +2437,10 @@@ ice_vsi_cfg_def(struct ice_vsi *vsi, st
                        goto unroll_vector_base;
  
                ice_vsi_map_rings_to_vectors(vsi);
 +
 +              /* Associate q_vector rings to napi */
 +              ice_vsi_set_napi_queues(vsi, true);
 +
                vsi->stat_offsets_loaded = false;
  
                if (ice_is_xdp_ena_vsi(vsi)) {
@@@ -2611,10 -2623,6 +2614,6 @@@ void ice_vsi_decfg(struct ice_vsi *vsi
        if (vsi->type == ICE_VSI_VF &&
            vsi->agg_node && vsi->agg_node->valid)
                vsi->agg_node->num_vsis--;
-       if (vsi->agg_node) {
-               vsi->agg_node->valid = false;
-               vsi->agg_node->agg_id = 0;
-       }
  }
  
  /**
@@@ -2917,71 -2925,6 +2916,71 @@@ void ice_vsi_dis_irq(struct ice_vsi *vs
                synchronize_irq(vsi->q_vectors[i]->irq.virq);
  }
  
 +/**
 + * ice_queue_set_napi - Set the napi instance for the queue
 + * @dev: device to which NAPI and queue belong
 + * @queue_index: Index of queue
 + * @type: queue type as RX or TX
 + * @napi: NAPI context
 + * @locked: is the rtnl_lock already held
 + *
 + * Set the napi instance for the queue
 + */
 +static void
 +ice_queue_set_napi(struct net_device *dev, unsigned int queue_index,
 +                 enum netdev_queue_type type, struct napi_struct *napi,
 +                 bool locked)
 +{
 +      if (!locked)
 +              rtnl_lock();
 +      netif_queue_set_napi(dev, queue_index, type, napi);
 +      if (!locked)
 +              rtnl_unlock();
 +}
 +
 +/**
 + * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi
 + * @q_vector: q_vector pointer
 + * @locked: is the rtnl_lock already held
 + *
 + * Associate the q_vector napi with all the queue[s] on the vector
 + */
 +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked)
 +{
 +      struct ice_rx_ring *rx_ring;
 +      struct ice_tx_ring *tx_ring;
 +
 +      ice_for_each_rx_ring(rx_ring, q_vector->rx)
 +              ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index,
 +                                 NETDEV_QUEUE_TYPE_RX, &q_vector->napi,
 +                                 locked);
 +
 +      ice_for_each_tx_ring(tx_ring, q_vector->tx)
 +              ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index,
 +                                 NETDEV_QUEUE_TYPE_TX, &q_vector->napi,
 +                                 locked);
 +      /* Also set the interrupt number for the NAPI */
 +      netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq);
 +}
 +
 +/**
 + * ice_vsi_set_napi_queues
 + * @vsi: VSI pointer
 + * @locked: is the rtnl_lock already held
 + *
 + * Associate queue[s] with napi for all vectors
 + */
 +void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked)
 +{
 +      int i;
 +
 +      if (!vsi->netdev)
 +              return;
 +
 +      ice_for_each_q_vector(vsi, i)
 +              ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked);
 +}
 +
  /**
   * ice_vsi_release - Delete a VSI and free its resources
   * @vsi: the VSI being removed
@@@ -3127,26 -3070,27 +3126,26 @@@ ice_vsi_rebuild_set_coalesce(struct ice
  }
  
  /**
 - * ice_vsi_realloc_stat_arrays - Frees unused stat structures
 + * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
   * @vsi: VSI pointer
 - * @prev_txq: Number of Tx rings before ring reallocation
 - * @prev_rxq: Number of Rx rings before ring reallocation
   */
 -static void
 -ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi, int prev_txq, int prev_rxq)
 +static int
 +ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
  {
 +      u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
 +      u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
 +      struct ice_ring_stats **tx_ring_stats;
 +      struct ice_ring_stats **rx_ring_stats;
        struct ice_vsi_stats *vsi_stat;
        struct ice_pf *pf = vsi->back;
 +      u16 prev_txq = vsi->alloc_txq;
 +      u16 prev_rxq = vsi->alloc_rxq;
        int i;
  
 -      if (!prev_txq || !prev_rxq)
 -              return;
 -      if (vsi->type == ICE_VSI_CHNL)
 -              return;
 -
        vsi_stat = pf->vsi_stats[vsi->idx];
  
 -      if (vsi->num_txq < prev_txq) {
 -              for (i = vsi->num_txq; i < prev_txq; i++) {
 +      if (req_txq < prev_txq) {
 +              for (i = req_txq; i < prev_txq; i++) {
                        if (vsi_stat->tx_ring_stats[i]) {
                                kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
                                WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
                }
        }
  
 -      if (vsi->num_rxq < prev_rxq) {
 -              for (i = vsi->num_rxq; i < prev_rxq; i++) {
 +      tx_ring_stats = vsi_stat->rx_ring_stats;
 +      vsi_stat->tx_ring_stats =
 +              krealloc_array(vsi_stat->tx_ring_stats, req_txq,
 +                             sizeof(*vsi_stat->tx_ring_stats),
 +                             GFP_KERNEL | __GFP_ZERO);
 +      if (!vsi_stat->tx_ring_stats) {
 +              vsi_stat->tx_ring_stats = tx_ring_stats;
 +              return -ENOMEM;
 +      }
 +
 +      if (req_rxq < prev_rxq) {
 +              for (i = req_rxq; i < prev_rxq; i++) {
                        if (vsi_stat->rx_ring_stats[i]) {
                                kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
                                WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
                        }
                }
        }
 +
 +      rx_ring_stats = vsi_stat->rx_ring_stats;
 +      vsi_stat->rx_ring_stats =
 +              krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
 +                             sizeof(*vsi_stat->rx_ring_stats),
 +                             GFP_KERNEL | __GFP_ZERO);
 +      if (!vsi_stat->rx_ring_stats) {
 +              vsi_stat->rx_ring_stats = rx_ring_stats;
 +              return -ENOMEM;
 +      }
 +
 +      return 0;
  }
  
  /**
@@@ -3200,9 -3122,9 +3199,9 @@@ int ice_vsi_rebuild(struct ice_vsi *vsi
  {
        struct ice_vsi_cfg_params params = {};
        struct ice_coalesce_stored *coalesce;
 -      int ret, prev_txq, prev_rxq;
        int prev_num_q_vectors = 0;
        struct ice_pf *pf;
 +      int ret;
  
        if (!vsi)
                return -EINVAL;
  
        prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
  
 -      prev_txq = vsi->num_txq;
 -      prev_rxq = vsi->num_rxq;
 +      ret = ice_vsi_realloc_stat_arrays(vsi);
 +      if (ret)
 +              goto err_vsi_cfg;
  
        ice_vsi_decfg(vsi);
        ret = ice_vsi_cfg_def(vsi, &params);
                return ice_schedule_reset(pf, ICE_RESET_PFR);
        }
  
 -      ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
 -
        ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
        kfree(coalesce);
  
index 7ffbd4fca881f6476f93b3d61c93d7cc5008428a,ae44ad5f8ce8a1e9894f6a2175f0c92642979389..d58b07e7e123994d5c82ef5fb08e2744eb531ab3
@@@ -142,8 -142,7 +142,8 @@@ mtk_wed_wo_queue_refill(struct mtk_wed_
                dma_addr_t addr;
                void *buf;
  
 -              buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
 +              buf = page_frag_alloc(&q->cache, q->buf_size,
 +                                    GFP_ATOMIC | GFP_DMA32);
                if (!buf)
                        break;
  
@@@ -292,6 -291,9 +292,9 @@@ mtk_wed_wo_queue_tx_clean(struct mtk_we
        for (i = 0; i < q->n_desc; i++) {
                struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
  
+               if (!entry->buf)
+                       continue;
                dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
                                 DMA_TO_DEVICE);
                skb_free_frag(entry->buf);
index 9e695ed122ee21c883587b78e9c3d947fc9ab212,13c7ed1bb37e9363721726f3881dfc96cbc9b9fc..82b5ca1be4f3955e16df633fb7442901f5cf66b9
@@@ -103,7 -103,7 +103,7 @@@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq 
                xdptxd->dma_addr = dma_addr;
  
                if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
 -                                            mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
 +                                            mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL)))
                        return false;
  
                /* xmit_mode == MLX5E_XDP_XMIT_MODE_FRAME */
        xdptxd->dma_addr = dma_addr;
  
        if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
 -                                    mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
 +                                    mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL)))
                return false;
  
        /* xmit_mode == MLX5E_XDP_XMIT_MODE_PAGE */
@@@ -256,55 -256,9 +256,55 @@@ static int mlx5e_xdp_rx_hash(const stru
        return 0;
  }
  
 +static int mlx5e_xdp_rx_vlan_tag(const struct xdp_md *ctx, __be16 *vlan_proto,
 +                               u16 *vlan_tci)
 +{
 +      const struct mlx5e_xdp_buff *_ctx = (void *)ctx;
 +      const struct mlx5_cqe64 *cqe = _ctx->cqe;
 +
 +      if (!cqe_has_vlan(cqe))
 +              return -ENODATA;
 +
 +      *vlan_proto = htons(ETH_P_8021Q);
 +      *vlan_tci = be16_to_cpu(cqe->vlan_info);
 +      return 0;
 +}
 +
  const struct xdp_metadata_ops mlx5e_xdp_metadata_ops = {
        .xmo_rx_timestamp               = mlx5e_xdp_rx_timestamp,
        .xmo_rx_hash                    = mlx5e_xdp_rx_hash,
 +      .xmo_rx_vlan_tag                = mlx5e_xdp_rx_vlan_tag,
 +};
 +
 +struct mlx5e_xsk_tx_complete {
 +      struct mlx5_cqe64 *cqe;
 +      struct mlx5e_cq *cq;
 +};
 +
 +static u64 mlx5e_xsk_fill_timestamp(void *_priv)
 +{
 +      struct mlx5e_xsk_tx_complete *priv = _priv;
 +      u64 ts;
 +
 +      ts = get_cqe_ts(priv->cqe);
 +
 +      if (mlx5_is_real_time_rq(priv->cq->mdev) || mlx5_is_real_time_sq(priv->cq->mdev))
 +              return mlx5_real_time_cyc2time(&priv->cq->mdev->clock, ts);
 +
 +      return  mlx5_timecounter_cyc2time(&priv->cq->mdev->clock, ts);
 +}
 +
 +static void mlx5e_xsk_request_checksum(u16 csum_start, u16 csum_offset, void *priv)
 +{
 +      struct mlx5_wqe_eth_seg *eseg = priv;
 +
 +      /* HW/FW is doing parsing, so offsets are largely ignored. */
 +      eseg->cs_flags |= MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
 +}
 +
 +const struct xsk_tx_metadata_ops mlx5e_xsk_tx_metadata_ops = {
 +      .tmo_fill_timestamp             = mlx5e_xsk_fill_timestamp,
 +      .tmo_request_checksum           = mlx5e_xsk_request_checksum,
  };
  
  /* returns true if packet was consumed by xdp */
@@@ -444,11 -398,11 +444,11 @@@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_
  
  INDIRECT_CALLABLE_SCOPE bool
  mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 -                   int check_result);
 +                   int check_result, struct xsk_tx_metadata *meta);
  
  INDIRECT_CALLABLE_SCOPE bool
  mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 -                         int check_result)
 +                         int check_result, struct xsk_tx_metadata *meta)
  {
        struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
        struct mlx5e_xdpsq_stats *stats = sq->stats;
                         */
                        if (unlikely(sq->mpwqe.wqe))
                                mlx5e_xdp_mpwqe_complete(sq);
 -                      return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
 +                      return mlx5e_xmit_xdp_frame(sq, xdptxd, 0, meta);
                }
                if (!xdptxd->len) {
                        skb_frag_t *frag = &xdptxdf->sinfo->frags[0];
                 * and it's safe to complete it at any time.
                 */
                mlx5e_xdp_mpwqe_session_start(sq);
 +              xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, &session->wqe->eth);
        }
  
        mlx5e_xdp_mpwqe_add_dseg(sq, p, stats);
@@@ -527,7 -480,7 +527,7 @@@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_
  
  INDIRECT_CALLABLE_SCOPE bool
  mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
 -                   int check_result)
 +                   int check_result, struct xsk_tx_metadata *meta)
  {
        struct mlx5e_xmit_data_frags *xdptxdf =
                container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
        dma_addr_t dma_addr = xdptxd->dma_addr;
        u32 dma_len = xdptxd->len;
        u16 ds_cnt, inline_hdr_sz;
+       unsigned int frags_size;
        u8 num_wqebbs = 1;
        int num_frags = 0;
        bool inline_ok;
  
        inline_ok = sq->min_inline_mode == MLX5_INLINE_MODE_NONE ||
                dma_len >= MLX5E_XDP_MIN_INLINE;
+       frags_size = xdptxd->has_frags ? xdptxdf->sinfo->xdp_frags_size : 0;
  
-       if (unlikely(!inline_ok || sq->hw_mtu < dma_len)) {
+       if (unlikely(!inline_ok || sq->hw_mtu < dma_len + frags_size)) {
                stats->err++;
                return false;
        }
                sq->pc++;
        }
  
 +      xsk_tx_metadata_request(meta, &mlx5e_xsk_tx_metadata_ops, eseg);
 +
        sq->doorbell_cseg = cseg;
  
        stats->xmit++;
  static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
                                  struct mlx5e_xdp_wqe_info *wi,
                                  u32 *xsk_frames,
 -                                struct xdp_frame_bulk *bq)
 +                                struct xdp_frame_bulk *bq,
 +                                struct mlx5e_cq *cq,
 +                                struct mlx5_cqe64 *cqe)
  {
        struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
        u16 i;
  
                        break;
                }
 -              case MLX5E_XDP_XMIT_MODE_XSK:
 +              case MLX5E_XDP_XMIT_MODE_XSK: {
                        /* AF_XDP send */
 +                      struct xsk_tx_metadata_compl *compl = NULL;
 +                      struct mlx5e_xsk_tx_complete priv = {
 +                              .cqe = cqe,
 +                              .cq = cq,
 +                      };
 +
 +                      if (xp_tx_metadata_enabled(sq->xsk_pool)) {
 +                              xdpi = mlx5e_xdpi_fifo_pop(xdpi_fifo);
 +                              compl = &xdpi.xsk_meta;
 +
 +                              xsk_tx_metadata_complete(compl, &mlx5e_xsk_tx_metadata_ops, &priv);
 +                      }
 +
                        (*xsk_frames)++;
                        break;
 +              }
                default:
                        WARN_ON_ONCE(true);
                }
@@@ -785,7 -722,7 +787,7 @@@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_c
  
                        sqcc += wi->num_wqebbs;
  
 -                      mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
 +                      mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, cq, cqe);
                } while (!last_wqe);
  
                if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
@@@ -832,7 -769,7 +834,7 @@@ void mlx5e_free_xdpsq_descs(struct mlx5
  
                sq->cc += wi->num_wqebbs;
  
 -              mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq);
 +              mlx5e_free_xdpsq_desc(sq, wi, &xsk_frames, &bq, NULL, NULL);
        }
  
        xdp_flush_frame_bulk(&bq);
@@@ -905,7 -842,7 +907,7 @@@ int mlx5e_xdp_xmit(struct net_device *d
                }
  
                ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
 -                                    mlx5e_xmit_xdp_frame, sq, xdptxd, 0);
 +                                    mlx5e_xmit_xdp_frame, sq, xdptxd, 0, NULL);
                if (unlikely(!ret)) {
                        int j;
  
index 0fe7ea88d56735b8c21b8cb2f35e111d60375ed2,c7c1b667b105a082148b5d8e99bcf3dde042475d..cc51ce16df14abe530910e063b9072c9e23ff49c
@@@ -49,7 -49,7 +49,7 @@@ void mlx5e_ethtool_get_drvinfo(struct m
        count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
                         "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
                         fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
-       if (count == sizeof(drvinfo->fw_version))
+       if (count >= sizeof(drvinfo->fw_version))
                snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
                         "%d.%d.%04d", fw_rev_maj(mdev),
                         fw_rev_min(mdev), fw_rev_sub(mdev));
@@@ -1262,29 -1262,27 +1262,29 @@@ static u32 mlx5e_get_rxfh_indir_size(st
        return mlx5e_ethtool_get_rxfh_indir_size(priv);
  }
  
 -static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir,
 -                                u8 *key, u8 *hfunc, u32 rss_context)
 +int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh)
  {
 -      struct mlx5e_priv *priv = netdev_priv(dev);
 +      struct mlx5e_priv *priv = netdev_priv(netdev);
 +      u32 rss_context = rxfh->rss_context;
        int err;
  
        mutex_lock(&priv->state_lock);
 -      err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc);
 +      err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context,
 +                                      rxfh->indir, rxfh->key, &rxfh->hfunc);
        mutex_unlock(&priv->state_lock);
        return err;
  }
  
 -static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir,
 -                                const u8 *key, const u8 hfunc,
 -                                u32 *rss_context, bool delete)
 +int mlx5e_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh,
 +                 struct netlink_ext_ack *extack)
  {
        struct mlx5e_priv *priv = netdev_priv(dev);
 +      u32 *rss_context = &rxfh->rss_context;
 +      u8 hfunc = rxfh->hfunc;
        int err;
  
        mutex_lock(&priv->state_lock);
 -      if (delete) {
 +      if (*rss_context && rxfh->rss_delete) {
                err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context);
                goto unlock;
        }
                        goto unlock;
        }
  
 -      err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key,
 +      err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context,
 +                                      rxfh->indir, rxfh->key,
                                        hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
  
  unlock:
        return err;
  }
  
 -int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
 -                 u8 *hfunc)
 -{
 -      return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0);
 -}
 -
 -int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
 -                 const u8 *key, const u8 hfunc)
 -{
 -      struct mlx5e_priv *priv = netdev_priv(dev);
 -      int err;
 -
 -      mutex_lock(&priv->state_lock);
 -      err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key,
 -                                      hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc);
 -      mutex_unlock(&priv->state_lock);
 -      return err;
 -}
 -
  #define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC               100
  #define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC                8000
  #define MLX5E_PFC_PREVEN_MINOR_PRECENT                85
@@@ -2382,7 -2398,6 +2382,7 @@@ static void mlx5e_get_rmon_stats(struc
  }
  
  const struct ethtool_ops mlx5e_ethtool_ops = {
 +      .cap_rss_ctx_supported  = true,
        .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
                                     ETHTOOL_COALESCE_MAX_FRAMES |
                                     ETHTOOL_COALESCE_USE_ADAPTIVE |
        .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
        .get_rxfh          = mlx5e_get_rxfh,
        .set_rxfh          = mlx5e_set_rxfh,
 -      .get_rxfh_context  = mlx5e_get_rxfh_context,
 -      .set_rxfh_context  = mlx5e_set_rxfh_context,
        .get_rxnfc         = mlx5e_get_rxnfc,
        .set_rxnfc         = mlx5e_set_rxnfc,
        .get_tunable       = mlx5e_get_tunable,
index ddcc2f6a11c2fcf868306afdaecd861ff02d63a5,e92d4f83592ed94fefd028731a2ab71617f4c1a1..05527418fa642f6dcd73e424f3a031ce0ec0c6f8
@@@ -78,7 -78,7 +78,7 @@@ static void mlx5e_rep_get_drvinfo(struc
        count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
                         "%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
                         fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
-       if (count == sizeof(drvinfo->fw_version))
+       if (count >= sizeof(drvinfo->fw_version))
                snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
                         "%d.%d.%04d", fw_rev_maj(mdev),
                         fw_rev_min(mdev), fw_rev_sub(mdev));
@@@ -112,18 -112,8 +112,18 @@@ static const struct counter_desc vport_
                             tx_vport_rdma_multicast_bytes) },
  };
  
 +static const struct counter_desc vport_rep_loopback_stats_desc[] = {
 +      { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
 +                           vport_loopback_packets) },
 +      { MLX5E_DECLARE_STAT(struct mlx5e_rep_stats,
 +                           vport_loopback_bytes) },
 +};
 +
  #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
  #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
 +#define NUM_VPORT_REP_LOOPBACK_COUNTERS(dev) \
 +      (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
 +       ARRAY_SIZE(vport_rep_loopback_stats_desc) : 0)
  
  static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(sw_rep)
  {
@@@ -167,8 -157,7 +167,8 @@@ static MLX5E_DECLARE_STATS_GRP_OP_UPDAT
  
  static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport_rep)
  {
 -      return NUM_VPORT_REP_HW_COUNTERS;
 +      return NUM_VPORT_REP_HW_COUNTERS +
 +             NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev);
  }
  
  static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport_rep)
  
        for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
                strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_rep_stats_desc[i].format);
 +      for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
 +              strcpy(data + (idx++) * ETH_GSTRING_LEN,
 +                     vport_rep_loopback_stats_desc[i].format);
        return idx;
  }
  
@@@ -190,9 -176,6 +190,9 @@@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_
        for (i = 0; i < NUM_VPORT_REP_HW_COUNTERS; i++)
                data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
                                                   vport_rep_stats_desc, i);
 +      for (i = 0; i < NUM_VPORT_REP_LOOPBACK_COUNTERS(priv->mdev); i++)
 +              data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.rep_stats,
 +                                                 vport_rep_loopback_stats_desc, i);
        return idx;
  }
  
@@@ -264,13 -247,6 +264,13 @@@ static MLX5E_DECLARE_STATS_GRP_OP_UPDAT
        rep_stats->tx_vport_rdma_multicast_bytes =
                MLX5_GET_CTR(out, received_ib_multicast.octets);
  
 +      if (MLX5_CAP_GEN(priv->mdev, vport_counter_local_loopback)) {
 +              rep_stats->vport_loopback_packets =
 +                      MLX5_GET_CTR(out, local_loopback.packets);
 +              rep_stats->vport_loopback_bytes =
 +                      MLX5_GET_CTR(out, local_loopback.octets);
 +      }
 +
  out:
        kvfree(out);
  }
@@@ -1180,6 -1156,12 +1180,6 @@@ static int mlx5e_init_rep_tx(struct mlx
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        int err;
  
 -      err = mlx5e_create_tises(priv);
 -      if (err) {
 -              mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
 -              return err;
 -      }
 -
        err = mlx5e_rep_neigh_init(rpriv);
        if (err)
                goto err_neigh_init;
@@@ -1202,6 -1184,7 +1202,6 @@@ err_ht_init
  err_init_tx:
        mlx5e_rep_neigh_cleanup(rpriv);
  err_neigh_init:
 -      mlx5e_destroy_tises(priv);
        return err;
  }
  
@@@ -1215,6 -1198,7 +1215,6 @@@ static void mlx5e_cleanup_rep_tx(struc
                mlx5e_cleanup_uplink_rep_tx(rpriv);
  
        mlx5e_rep_neigh_cleanup(rpriv);
 -      mlx5e_destroy_tises(priv);
  }
  
  static void mlx5e_rep_enable(struct mlx5e_priv *priv)
@@@ -1444,7 -1428,7 +1444,7 @@@ static const struct mlx5e_profile mlx5e
        .update_stats           = mlx5e_stats_update_ndo_stats,
        .update_carrier         = mlx5e_update_carrier,
        .rx_handlers            = &mlx5e_rx_handlers_rep,
 -      .max_tc                 = MLX5E_MAX_NUM_TC,
 +      .max_tc                 = MLX5_MAX_NUM_TC,
        .stats_grps             = mlx5e_ul_rep_stats_grps,
        .stats_grps_num         = mlx5e_ul_rep_stats_grps_num,
  };
index 85cdba226eaca63a7e528b8bee94b6d1c13aa47a,96af9e2ab1d87d7305d16141f7ee45dd8c52450e..30932c9c9a8f08bca2c8025f0a5685f79695e54d
@@@ -3209,10 -3209,10 +3209,10 @@@ static int offload_pedit_fields(struct 
        headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
        headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
  
 -      set_masks = &hdrs[0].masks;
 -      add_masks = &hdrs[1].masks;
 -      set_vals = &hdrs[0].vals;
 -      add_vals = &hdrs[1].vals;
 +      set_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].masks;
 +      add_masks = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].masks;
 +      set_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_SET].vals;
 +      add_vals = &hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].vals;
  
        for (i = 0; i < ARRAY_SIZE(fields); i++) {
                bool skip;
@@@ -3778,7 -3778,8 +3778,8 @@@ alloc_branch_attr(struct mlx5e_tc_flow 
                break;
        case FLOW_ACTION_ACCEPT:
        case FLOW_ACTION_PIPE:
-               if (set_branch_dest_ft(flow->priv, attr))
+               err = set_branch_dest_ft(flow->priv, attr);
+               if (err)
                        goto out_err;
                break;
        case FLOW_ACTION_JUMP:
                        goto out_err;
                }
                *jump_count = cond->extval;
-               if (set_branch_dest_ft(flow->priv, attr))
+               err = set_branch_dest_ft(flow->priv, attr);
+               if (err)
                        goto out_err;
                break;
        default:
@@@ -5028,6 -5030,22 +5030,6 @@@ int mlx5e_tc_delete_matchall(struct mlx
        return apply_police_params(priv, 0, extack);
  }
  
 -void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
 -                           struct tc_cls_matchall_offload *ma)
 -{
 -      struct mlx5e_rep_priv *rpriv = priv->ppriv;
 -      struct rtnl_link_stats64 cur_stats;
 -      u64 dbytes;
 -      u64 dpkts;
 -
 -      mlx5e_stats_copy_rep_stats(&cur_stats, &priv->stats.rep_stats);
 -      dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
 -      dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
 -      rpriv->prev_vf_vport_stats = cur_stats;
 -      flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
 -                        FLOW_ACTION_HW_STATS_DELAYED);
 -}
 -
  static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
                                              struct mlx5e_priv *peer_priv)
  {
@@@ -5720,8 -5738,10 +5722,10 @@@ int mlx5e_tc_action_miss_mapping_get(st
  
        esw = priv->mdev->priv.eswitch;
        attr->act_id_restore_rule = esw_add_restore_rule(esw, *act_miss_mapping);
-       if (IS_ERR(attr->act_id_restore_rule))
+       if (IS_ERR(attr->act_id_restore_rule)) {
+               err = PTR_ERR(attr->act_id_restore_rule);
                goto err_rule;
+       }
  
        return 0;
  
index 0c52a9eff188dab05eaba0fa9eb0b3157a7aadda,a42df2c1bd043ccc0ac68566ed3b4d0ef592dc2b..3611ea64875effbcc76d83def3d798fc9f59c130
@@@ -654,7 -654,6 +654,7 @@@ struct phy_device *phy_device_create(st
        mdiodev->flags = MDIO_DEVICE_FLAG_PHY;
        mdiodev->device_free = phy_mdio_device_free;
        mdiodev->device_remove = phy_mdio_device_remove;
 +      mdiodev->reset_state = -1;
  
        dev->speed = SPEED_UNKNOWN;
        dev->duplex = DUPLEX_UNKNOWN;
@@@ -1236,19 -1235,18 +1236,19 @@@ int phy_init_hw(struct phy_device *phyd
  
        if (phydev->drv->soft_reset) {
                ret = phydev->drv->soft_reset(phydev);
 +              if (ret < 0)
 +                      return ret;
 +
                /* see comment in genphy_soft_reset for an explanation */
 -              if (!ret)
 -                      phydev->suspended = 0;
 +              phydev->suspended = 0;
        }
  
 -      if (ret < 0)
 -              return ret;
 -
        ret = phy_scan_fixups(phydev);
        if (ret < 0)
                return ret;
  
 +      phy_interface_zero(phydev->possible_interfaces);
 +
        if (phydev->drv->config_init) {
                ret = phydev->drv->config_init(phydev);
                if (ret < 0)
@@@ -1550,7 -1548,8 +1550,8 @@@ int phy_attach_direct(struct net_devic
                goto error;
  
        phy_resume(phydev);
-       phy_led_triggers_register(phydev);
+       if (!phydev->is_on_sfp_module)
+               phy_led_triggers_register(phydev);
  
        /**
         * If the external phy used by current mac interface is managed by
@@@ -1651,22 -1650,20 +1652,22 @@@ EXPORT_SYMBOL_GPL(phy_driver_is_genphy_
  /**
   * phy_package_join - join a common PHY group
   * @phydev: target phy_device struct
 - * @addr: cookie and PHY address for global register access
 + * @base_addr: cookie and base PHY address of PHY package for offset
 + *   calculation of global register access
   * @priv_size: if non-zero allocate this amount of bytes for private data
   *
   * This joins a PHY group and provides a shared storage for all phydevs in
   * this group. This is intended to be used for packages which contain
   * more than one PHY, for example a quad PHY transceiver.
   *
 - * The addr parameter serves as a cookie which has to have the same value
 - * for all members of one group and as a PHY address to access generic
 - * registers of a PHY package. Usually, one of the PHY addresses of the
 - * different PHYs in the package provides access to these global registers.
 + * The base_addr parameter serves as cookie which has to have the same values
 + * for all members of one group and as the base PHY address of the PHY package
 + * for offset calculation to access generic registers of a PHY package.
 + * Usually, one of the PHY addresses of the different PHYs in the package
 + * provides access to these global registers.
   * The address which is given here, will be used in the phy_package_read()
 - * and phy_package_write() convenience functions. If your PHY doesn't have
 - * global registers you can just pick any of the PHY addresses.
 + * and phy_package_write() convenience functions as base and added to the
 + * passed offset in those functions.
   *
   * This will set the shared pointer of the phydev to the shared storage.
   * If this is the first call for a this cookie the shared storage will be
   * Returns < 1 on error, 0 on success. Esp. calling phy_package_join()
   * with the same cookie but a different priv_size is an error.
   */
 -int phy_package_join(struct phy_device *phydev, int addr, size_t priv_size)
 +int phy_package_join(struct phy_device *phydev, int base_addr, size_t priv_size)
  {
        struct mii_bus *bus = phydev->mdio.bus;
        struct phy_package_shared *shared;
        int ret;
  
 -      if (addr < 0 || addr >= PHY_MAX_ADDR)
 +      if (base_addr < 0 || base_addr >= PHY_MAX_ADDR)
                return -EINVAL;
  
        mutex_lock(&bus->shared_lock);
 -      shared = bus->shared[addr];
 +      shared = bus->shared[base_addr];
        if (!shared) {
                ret = -ENOMEM;
                shared = kzalloc(sizeof(*shared), GFP_KERNEL);
                                goto err_free;
                        shared->priv_size = priv_size;
                }
 -              shared->addr = addr;
 +              shared->base_addr = base_addr;
                refcount_set(&shared->refcnt, 1);
 -              bus->shared[addr] = shared;
 +              bus->shared[base_addr] = shared;
        } else {
                ret = -EINVAL;
                if (priv_size && priv_size != shared->priv_size)
@@@ -1738,7 -1735,7 +1739,7 @@@ void phy_package_leave(struct phy_devic
                return;
  
        if (refcount_dec_and_mutex_lock(&shared->refcnt, &bus->shared_lock)) {
 -              bus->shared[shared->addr] = NULL;
 +              bus->shared[shared->base_addr] = NULL;
                mutex_unlock(&bus->shared_lock);
                kfree(shared->priv);
                kfree(shared);
@@@ -1757,8 -1754,7 +1758,8 @@@ static void devm_phy_package_leave(stru
   * devm_phy_package_join - resource managed phy_package_join()
   * @dev: device that is registering this PHY package
   * @phydev: target phy_device struct
 - * @addr: cookie and PHY address for global register access
 + * @base_addr: cookie and base PHY address of PHY package for offset
 + *   calculation of global register access
   * @priv_size: if non-zero allocate this amount of bytes for private data
   *
   * Managed phy_package_join(). Shared storage fetched by this function,
   * phy_package_join() for more information.
   */
  int devm_phy_package_join(struct device *dev, struct phy_device *phydev,
 -                        int addr, size_t priv_size)
 +                        int base_addr, size_t priv_size)
  {
        struct phy_device **ptr;
        int ret;
        if (!ptr)
                return -ENOMEM;
  
 -      ret = phy_package_join(phydev, addr, priv_size);
 +      ret = phy_package_join(phydev, base_addr, priv_size);
  
        if (!ret) {
                *ptr = phydev;
@@@ -1822,7 -1818,8 +1823,8 @@@ void phy_detach(struct phy_device *phyd
        }
        phydev->phylink = NULL;
  
-       phy_led_triggers_unregister(phydev);
+       if (!phydev->is_on_sfp_module)
+               phy_led_triggers_unregister(phydev);
  
        if (phydev->mdio.dev.driver)
                module_put(phydev->mdio.dev.driver->owner);
index ab0c72c55b2dbd2b5bdffa4e6cb385d658acae35,bc6a9f861711fdf9b2b4c3f57c46d82902b522e3..0f405ded1a7aa06b9ae4ee09108be91365bd0781
@@@ -1351,7 -1351,8 +1351,7 @@@ static void iwl_pcie_rx_handle_rb(struc
                if (len < sizeof(*pkt) || offset > max_len)
                        break;
  
 -              trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
 -              trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
 +              maybe_trace_iwlwifi_dev_rx(trans, pkt, len);
  
                /* Reclaim a command buffer only if this packet is a response
                 *   to a (driver-originated) command.
                 * if it is true then one of the handlers took the page.
                 */
  
-               if (reclaim) {
+               if (reclaim && txq) {
                        u16 sequence = le16_to_cpu(pkt->hdr.sequence);
                        int index = SEQ_TO_INDEX(sequence);
                        int cmd_index = iwl_txq_get_cmd_index(txq, index);
index f39c436f0b6d2e41420ee93018a5f3ceee82eadb,92253260f56832678cc149f562c6b18910a20655..fc64e1e7f5eeb9885b667ee3ec7ea7470b72b44a
@@@ -2107,29 -2107,18 +2107,29 @@@ static void iwl_trans_pcie_removal_wk(s
                container_of(wk, struct iwl_trans_pcie_removal, work);
        struct pci_dev *pdev = removal->pdev;
        static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
 -      struct pci_bus *bus = pdev->bus;
 +      struct pci_bus *bus;
 +
 +      pci_lock_rescan_remove();
 +
 +      bus = pdev->bus;
 +      /* in this case, something else already removed the device */
 +      if (!bus)
 +              goto out;
  
        dev_err(&pdev->dev, "Device gone - attempting removal\n");
 +
        kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
 -      pci_lock_rescan_remove();
 -      pci_dev_put(pdev);
 +
        pci_stop_and_remove_bus_device(pdev);
 -      if (removal->rescan && bus) {
 +      pci_dev_put(pdev);
 +
 +      if (removal->rescan) {
                if (bus->parent)
                        bus = bus->parent;
                pci_rescan_bus(bus);
        }
 +
 +out:
        pci_unlock_rescan_remove();
  
        kfree(removal);
@@@ -2144,7 -2133,6 +2144,7 @@@ void iwl_trans_pcie_remove(struct iwl_t
                return;
  
        IWL_ERR(trans, "Device gone - scheduling removal!\n");
 +      iwl_pcie_dump_csr(trans);
  
        /*
         * get a module reference to avoid doing this
@@@ -2377,6 -2365,32 +2377,6 @@@ static int iwl_trans_pcie_read_config32
                                     ofs, val);
  }
  
 -static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
 -{
 -      int i;
 -
 -      for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
 -              struct iwl_txq *txq = trans->txqs.txq[i];
 -
 -              if (i == trans->txqs.cmd.q_id)
 -                      continue;
 -
 -              spin_lock_bh(&txq->lock);
 -
 -              if (!block && !(WARN_ON_ONCE(!txq->block))) {
 -                      txq->block--;
 -                      if (!txq->block) {
 -                              iwl_write32(trans, HBUS_TARG_WRPTR,
 -                                          txq->write_ptr | (i << 8));
 -                      }
 -              } else if (block) {
 -                      txq->block++;
 -              }
 -
 -              spin_unlock_bh(&txq->lock);
 -      }
 -}
 -
  #define IWL_FLUSH_WAIT_MS     2000
  
  static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
@@@ -3092,7 -3106,7 +3092,7 @@@ static u32 iwl_trans_pcie_dump_rbs(stru
        struct iwl_rxq *rxq = &trans_pcie->rxq[0];
        u32 i, r, j, rb_len = 0;
  
-       spin_lock(&rxq->lock);
+       spin_lock_bh(&rxq->lock);
  
        r = iwl_get_closed_rb_stts(trans, rxq);
  
                *data = iwl_fw_error_next_data(*data);
        }
  
-       spin_unlock(&rxq->lock);
+       spin_unlock_bh(&rxq->lock);
  
        return rb_len;
  }
@@@ -3558,6 -3572,7 +3558,6 @@@ static const struct iwl_trans_ops trans
        .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
  
        .freeze_txq_timer = iwl_trans_txq_freeze_timer,
 -      .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
  #ifdef CONFIG_IWLWIFI_DEBUGFS
        .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
  #endif
index 8bbb0e17229d715f5415aac0c2b1a9f1e4e340e0,68ad915203aa54df3daceeff38c5ec04101315a0..00230f106294bef5ef9869b88548762e1941fd95
@@@ -9,11 -9,11 +9,11 @@@
  
  #if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
  
 -#define Q_READ(_dev, _q, _field) ({                                   \
 +#define Q_READ(_q, _field) ({                                         \
        u32 _offset = offsetof(struct mt76_queue_regs, _field);         \
        u32 _val;                                                       \
        if ((_q)->flags & MT_QFLAG_WED)                                 \
 -              _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed,       \
 +              _val = mtk_wed_device_reg_read((_q)->wed,               \
                                               ((_q)->wed_regs +        \
                                                _offset));              \
        else                                                            \
        _val;                                                           \
  })
  
 -#define Q_WRITE(_dev, _q, _field, _val)       do {                            \
 +#define Q_WRITE(_q, _field, _val)     do {                            \
        u32 _offset = offsetof(struct mt76_queue_regs, _field);         \
        if ((_q)->flags & MT_QFLAG_WED)                                 \
 -              mtk_wed_device_reg_write(&(_dev)->mmio.wed,             \
 +              mtk_wed_device_reg_write((_q)->wed,                     \
                                         ((_q)->wed_regs + _offset),    \
                                         _val);                         \
        else                                                            \
@@@ -33,8 -33,8 +33,8 @@@
  
  #else
  
 -#define Q_READ(_dev, _q, _field)      readl(&(_q)->regs->_field)
 -#define Q_WRITE(_dev, _q, _field, _val)       writel(_val, &(_q)->regs->_field)
 +#define Q_READ(_q, _field)            readl(&(_q)->regs->_field)
 +#define Q_WRITE(_q, _field, _val)     writel(_val, &(_q)->regs->_field)
  
  #endif
  
@@@ -188,67 -188,41 +188,67 @@@ EXPORT_SYMBOL_GPL(mt76_free_pending_rxw
  static void
  mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
  {
 -      Q_WRITE(dev, q, desc_base, q->desc_dma);
 -      Q_WRITE(dev, q, ring_size, q->ndesc);
 -      q->head = Q_READ(dev, q, dma_idx);
 +      Q_WRITE(q, desc_base, q->desc_dma);
 +      if (q->flags & MT_QFLAG_WED_RRO_EN)
 +              Q_WRITE(q, ring_size, MT_DMA_RRO_EN | q->ndesc);
 +      else
 +              Q_WRITE(q, ring_size, q->ndesc);
 +      q->head = Q_READ(q, dma_idx);
        q->tail = q->head;
  }
  
  static void
 -mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
 +__mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q,
 +                     bool reset_idx)
  {
 -      int i;
 -
        if (!q || !q->ndesc)
                return;
  
 -      /* clear descriptors */
 -      for (i = 0; i < q->ndesc; i++)
 -              q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 +      if (!mt76_queue_is_wed_rro_ind(q)) {
 +              int i;
 +
 +              /* clear descriptors */
 +              for (i = 0; i < q->ndesc; i++)
 +                      q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 +      }
  
 -      Q_WRITE(dev, q, cpu_idx, 0);
 -      Q_WRITE(dev, q, dma_idx, 0);
 +      if (reset_idx) {
 +              Q_WRITE(q, cpu_idx, 0);
 +              Q_WRITE(q, dma_idx, 0);
 +      }
        mt76_dma_sync_idx(dev, q);
  }
  
 +static void
 +mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
 +{
 +      __mt76_dma_queue_reset(dev, q, true);
 +}
 +
  static int
  mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
                    struct mt76_queue_buf *buf, void *data)
  {
 -      struct mt76_desc *desc = &q->desc[q->head];
        struct mt76_queue_entry *entry = &q->entry[q->head];
        struct mt76_txwi_cache *txwi = NULL;
 -      u32 buf1 = 0, ctrl;
 +      struct mt76_desc *desc;
        int idx = q->head;
 +      u32 buf1 = 0, ctrl;
        int rx_token;
  
 +      if (mt76_queue_is_wed_rro_ind(q)) {
 +              struct mt76_wed_rro_desc *rro_desc;
 +
 +              rro_desc = (struct mt76_wed_rro_desc *)q->desc;
 +              data = &rro_desc[q->head];
 +              goto done;
 +      }
 +
 +      desc = &q->desc[q->head];
        ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
 +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 +      buf1 = FIELD_PREP(MT_DMA_CTL_SDP0_H, buf->addr >> 32);
 +#endif
  
        if (mt76_queue_is_wed_rx(q)) {
                txwi = mt76_get_rxwi(dev);
        WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
        WRITE_ONCE(desc->info, 0);
  
 +done:
        entry->dma_addr[0] = buf->addr;
        entry->dma_len[0] = buf->len;
        entry->txwi = txwi;
@@@ -315,18 -288,11 +315,18 @@@ mt76_dma_add_buf(struct mt76_dev *dev, 
                entry->dma_len[0] = buf[0].len;
  
                ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
 +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 +              info |= FIELD_PREP(MT_DMA_CTL_SDP0_H, buf[0].addr >> 32);
 +#endif
                if (i < nbufs - 1) {
                        entry->dma_addr[1] = buf[1].addr;
                        entry->dma_len[1] = buf[1].len;
                        buf1 = buf[1].addr;
                        ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
 +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
 +                      info |= FIELD_PREP(MT_DMA_CTL_SDP1_H,
 +                                         buf[1].addr >> 32);
 +#endif
                        if (buf[1].skip_unmap)
                                entry->skip_buf1 = true;
                }
@@@ -377,7 -343,7 +377,7 @@@ static voi
  mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
  {
        wmb();
 -      Q_WRITE(dev, q, cpu_idx, q->head);
 +      Q_WRITE(q, cpu_idx, q->head);
  }
  
  static void
@@@ -393,7 -359,7 +393,7 @@@ mt76_dma_tx_cleanup(struct mt76_dev *de
        if (flush)
                last = -1;
        else
 -              last = Q_READ(dev, q, dma_idx);
 +              last = Q_READ(q, dma_idx);
  
        while (q->queued > 0 && q->tail != last) {
                mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
                }
  
                if (!flush && q->tail == last)
 -                      last = Q_READ(dev, q, dma_idx);
 +                      last = Q_READ(q, dma_idx);
        }
        spin_unlock_bh(&q->cleanup_lock);
  
@@@ -426,26 -392,19 +426,26 @@@ mt76_dma_get_buf(struct mt76_dev *dev, 
  {
        struct mt76_queue_entry *e = &q->entry[idx];
        struct mt76_desc *desc = &q->desc[idx];
 -      void *buf;
 +      u32 ctrl, desc_info, buf1;
 +      void *buf = e->buf;
 +
 +      if (mt76_queue_is_wed_rro_ind(q))
 +              goto done;
  
 +      ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
        if (len) {
 -              u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
                *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
                *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
        }
  
 +      desc_info = le32_to_cpu(desc->info);
        if (info)
 -              *info = le32_to_cpu(desc->info);
 +              *info = desc_info;
 +
 +      buf1 = le32_to_cpu(desc->buf1);
 +      mt76_dma_should_drop_buf(drop, ctrl, buf1, desc_info);
  
        if (mt76_queue_is_wed_rx(q)) {
 -              u32 buf1 = le32_to_cpu(desc->buf1);
                u32 token = FIELD_GET(MT_DMA_CTL_TOKEN, buf1);
                struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
  
                t->ptr = NULL;
  
                mt76_put_rxwi(dev, t);
 -
 -              if (drop) {
 -                      u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
 -
 -                      *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
 -                                         MT_DMA_CTL_DROP));
 -
 +              if (drop)
                        *drop |= !!(buf1 & MT_DMA_CTL_WO_DROP);
 -              }
        } else {
 -              buf = e->buf;
 -              e->buf = NULL;
                dma_sync_single_for_cpu(dev->dma_dev, e->dma_addr[0],
                                SKB_WITH_OVERHEAD(q->buf_size),
                                page_pool_get_dma_dir(q->page_pool));
        }
  
 +done:
 +      e->buf = NULL;
        return buf;
  }
  
@@@ -484,16 -450,11 +484,16 @@@ mt76_dma_dequeue(struct mt76_dev *dev, 
        if (!q->queued)
                return NULL;
  
 -      if (flush)
 -              q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 -      else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
 +      if (mt76_queue_is_wed_rro_data(q))
                return NULL;
  
 +      if (!mt76_queue_is_wed_rro_ind(q)) {
 +              if (flush)
 +                      q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 +              else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
 +                      return NULL;
 +      }
 +
        q->tail = (q->tail + 1) % q->ndesc;
        q->queued--;
  
@@@ -645,14 -606,11 +645,14 @@@ mt76_dma_rx_fill(struct mt76_dev *dev, 
        spin_lock_bh(&q->lock);
  
        while (q->queued < q->ndesc - 1) {
 +              struct mt76_queue_buf qbuf = {};
                enum dma_data_direction dir;
 -              struct mt76_queue_buf qbuf;
                dma_addr_t addr;
                int offset;
 -              void *buf;
 +              void *buf = NULL;
 +
 +              if (mt76_queue_is_wed_rro_ind(q))
 +                      goto done;
  
                buf = mt76_get_page_pool_buf(q, &offset, q->buf_size);
                if (!buf)
                dma_sync_single_for_device(dev->dma_dev, addr, len, dir);
  
                qbuf.addr = addr + q->buf_offset;
 +done:
                qbuf.len = len - q->buf_offset;
                qbuf.skip_unmap = false;
                if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
                frames++;
        }
  
 -      if (frames)
 +      if (frames || mt76_queue_is_wed_rx(q))
                mt76_dma_kick_queue(dev, q);
  
        spin_unlock_bh(&q->lock);
  int mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q, bool reset)
  {
  #ifdef CONFIG_NET_MEDIATEK_SOC_WED
 -      struct mtk_wed_device *wed = &dev->mmio.wed;
 -      int ret, type, ring;
 -      u8 flags;
 +      int ret = 0, type, ring;
 +      u16 flags;
  
        if (!q || !q->ndesc)
                return -EINVAL;
  
        flags = q->flags;
 -      if (!mtk_wed_device_active(wed))
 +      if (!q->wed || !mtk_wed_device_active(q->wed))
                q->flags &= ~MT_QFLAG_WED;
  
        if (!(q->flags & MT_QFLAG_WED))
  
        switch (type) {
        case MT76_WED_Q_TX:
 -              ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, reset);
 +              ret = mtk_wed_device_tx_ring_setup(q->wed, ring, q->regs,
 +                                                 reset);
                if (!ret)
 -                      q->wed_regs = wed->tx_ring[ring].reg_base;
 +                      q->wed_regs = q->wed->tx_ring[ring].reg_base;
                break;
        case MT76_WED_Q_TXFREE:
                /* WED txfree queue needs ring to be initialized before setup */
                q->flags = 0;
                mt76_dma_queue_reset(dev, q);
                mt76_dma_rx_fill(dev, q, false);
 -              q->flags = flags;
  
 -              ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
 +              ret = mtk_wed_device_txfree_ring_setup(q->wed, q->regs);
                if (!ret)
 -                      q->wed_regs = wed->txfree_ring.reg_base;
 +                      q->wed_regs = q->wed->txfree_ring.reg_base;
                break;
        case MT76_WED_Q_RX:
 -              ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, reset);
 +              ret = mtk_wed_device_rx_ring_setup(q->wed, ring, q->regs,
 +                                                 reset);
                if (!ret)
 -                      q->wed_regs = wed->rx_ring[ring].reg_base;
 +                      q->wed_regs = q->wed->rx_ring[ring].reg_base;
 +              break;
 +      case MT76_WED_RRO_Q_DATA:
 +              q->flags &= ~MT_QFLAG_WED;
 +              __mt76_dma_queue_reset(dev, q, false);
 +              mtk_wed_device_rro_rx_ring_setup(q->wed, ring, q->regs);
 +              q->head = q->ndesc - 1;
 +              q->queued = q->head;
 +              break;
 +      case MT76_WED_RRO_Q_MSDU_PG:
 +              q->flags &= ~MT_QFLAG_WED;
 +              __mt76_dma_queue_reset(dev, q, false);
 +              mtk_wed_device_msdu_pg_rx_ring_setup(q->wed, ring, q->regs);
 +              q->head = q->ndesc - 1;
 +              q->queued = q->head;
 +              break;
 +      case MT76_WED_RRO_Q_IND:
 +              q->flags &= ~MT_QFLAG_WED;
 +              mt76_dma_queue_reset(dev, q);
 +              mt76_dma_rx_fill(dev, q, false);
 +              mtk_wed_device_ind_rx_ring_setup(q->wed, q->regs);
                break;
        default:
                ret = -EINVAL;
 +              break;
        }
 +      q->flags = flags;
  
        return ret;
  #else
@@@ -771,26 -706,11 +771,26 @@@ mt76_dma_alloc_queue(struct mt76_dev *d
        q->buf_size = bufsize;
        q->hw_idx = idx;
  
 -      size = q->ndesc * sizeof(struct mt76_desc);
 -      q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
 +      size = mt76_queue_is_wed_rro_ind(q) ? sizeof(struct mt76_wed_rro_desc)
 +                                          : sizeof(struct mt76_desc);
 +      q->desc = dmam_alloc_coherent(dev->dma_dev, q->ndesc * size,
 +                                    &q->desc_dma, GFP_KERNEL);
        if (!q->desc)
                return -ENOMEM;
  
 +      if (mt76_queue_is_wed_rro_ind(q)) {
 +              struct mt76_wed_rro_desc *rro_desc;
 +              int i;
 +
 +              rro_desc = (struct mt76_wed_rro_desc *)q->desc;
 +              for (i = 0; i < q->ndesc; i++) {
 +                      struct mt76_wed_rro_ind *cmd;
 +
 +                      cmd = (struct mt76_wed_rro_ind *)&rro_desc[i];
 +                      cmd->magic_cnt = MT_DMA_WED_IND_CMD_CNT - 1;
 +              }
 +      }
 +
        size = q->ndesc * sizeof(*q->entry);
        q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
        if (!q->entry)
        if (ret)
                return ret;
  
 -      if (q->flags != MT_WED_Q_TXFREE)
 -              mt76_dma_queue_reset(dev, q);
 +      if (mtk_wed_device_active(&dev->mmio.wed)) {
 +              if ((mtk_wed_get_rx_capa(&dev->mmio.wed) && mt76_queue_is_wed_rro(q)) ||
 +                  mt76_queue_is_wed_tx_free(q))
 +                      return 0;
 +      }
 +
 +      mt76_dma_queue_reset(dev, q);
  
        return 0;
  }
@@@ -832,8 -747,7 +832,8 @@@ mt76_dma_rx_cleanup(struct mt76_dev *de
                if (!buf)
                        break;
  
 -              mt76_put_page_pool_buf(buf, false);
 +              if (!mt76_queue_is_wed_rro(q))
 +                      mt76_put_page_pool_buf(buf, false);
        } while (1);
  
        spin_lock_bh(&q->lock);
@@@ -849,36 -763,27 +849,36 @@@ static voi
  mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
  {
        struct mt76_queue *q = &dev->q_rx[qid];
 -      int i;
  
        if (!q->ndesc)
                return;
  
 -      for (i = 0; i < q->ndesc; i++)
 -              q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 +      if (!mt76_queue_is_wed_rro_ind(q)) {
 +              int i;
 +
 +              for (i = 0; i < q->ndesc; i++)
 +                      q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
 +      }
  
        mt76_dma_rx_cleanup(dev, q);
  
        /* reset WED rx queues */
        mt76_dma_wed_setup(dev, q, true);
 -      if (q->flags != MT_WED_Q_TXFREE) {
 -              mt76_dma_sync_idx(dev, q);
 -              mt76_dma_rx_fill(dev, q, false);
 -      }
 +
 +      if (mt76_queue_is_wed_tx_free(q))
 +              return;
 +
 +      if (mtk_wed_device_active(&dev->mmio.wed) &&
 +          mt76_queue_is_wed_rro(q))
 +              return;
 +
 +      mt76_dma_sync_idx(dev, q);
 +      mt76_dma_rx_fill(dev, q, false);
  }
  
  static void
  mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
-                 int len, bool more, u32 info)
+                 int len, bool more, u32 info, bool allow_direct)
  {
        struct sk_buff *skb = q->rx_head;
        struct skb_shared_info *shinfo = skb_shinfo(skb);
  
                skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
        } else {
-               mt76_put_page_pool_buf(data, true);
+               mt76_put_page_pool_buf(data, allow_direct);
        }
  
        if (more)
@@@ -910,11 -815,12 +910,12 @@@ mt76_dma_rx_process(struct mt76_dev *de
        struct sk_buff *skb;
        unsigned char *data;
        bool check_ddone = false;
+       bool allow_direct = !mt76_queue_is_wed_rx(q);
        bool more;
  
        if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
 -          q->flags == MT_WED_Q_TXFREE) {
 -              dma_idx = Q_READ(dev, q, dma_idx);
 +          mt76_queue_is_wed_tx_free(q)) {
 +              dma_idx = Q_READ(q, dma_idx);
                check_ddone = true;
        }
  
  
                if (check_ddone) {
                        if (q->tail == dma_idx)
 -                              dma_idx = Q_READ(dev, q, dma_idx);
 +                              dma_idx = Q_READ(q, dma_idx);
  
                        if (q->tail == dma_idx)
                                break;
                }
  
                if (q->rx_head) {
-                       mt76_add_fragment(dev, q, data, len, more, info);
+                       mt76_add_fragment(dev, q, data, len, more, info,
+                                         allow_direct);
                        continue;
                }
  
                continue;
  
  free_frag:
-               mt76_put_page_pool_buf(data, true);
+               mt76_put_page_pool_buf(data, allow_direct);
        }
  
        mt76_dma_rx_fill(dev, q, true);
@@@ -1052,20 -959,6 +1054,20 @@@ void mt76_dma_attach(struct mt76_dev *d
  }
  EXPORT_SYMBOL_GPL(mt76_dma_attach);
  
 +void mt76_dma_wed_reset(struct mt76_dev *dev)
 +{
 +      struct mt76_mmio *mmio = &dev->mmio;
 +
 +      if (!test_bit(MT76_STATE_WED_RESET, &dev->phy.state))
 +              return;
 +
 +      complete(&mmio->wed_reset);
 +
 +      if (!wait_for_completion_timeout(&mmio->wed_reset_complete, 3 * HZ))
 +              dev_err(dev->dev, "wed reset complete timeout\n");
 +}
 +EXPORT_SYMBOL_GPL(mt76_dma_wed_reset);
 +
  void mt76_dma_cleanup(struct mt76_dev *dev)
  {
        int i;
        mt76_for_each_q_rx(dev, i) {
                struct mt76_queue *q = &dev->q_rx[i];
  
 +              if (mtk_wed_device_active(&dev->mmio.wed) &&
 +                  mt76_queue_is_wed_rro(q))
 +                      continue;
 +
                netif_napi_del(&dev->napi[i]);
                mt76_dma_rx_cleanup(dev, q);
  
                page_pool_destroy(q->page_pool);
        }
  
 -      mt76_free_pending_txwi(dev);
 -      mt76_free_pending_rxwi(dev);
 -
        if (mtk_wed_device_active(&dev->mmio.wed))
                mtk_wed_device_detach(&dev->mmio.wed);
 +
 +      if (mtk_wed_device_active(&dev->mmio.wed_hif2))
 +              mtk_wed_device_detach(&dev->mmio.wed_hif2);
 +
 +      mt76_free_pending_txwi(dev);
 +      mt76_free_pending_rxwi(dev);
  }
  EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
index 8ad008591e3209edc3ab28ae27f34ea54e688bbc,2b5e500bf0930f546403edd8dab60a4a76f43e0b..52808dd225132ad6c28e6acf317dbe3f0a17399e
  #define IEEE80211_SN_MODULO           (IEEE80211_MAX_SN + 1)
  
  
 -/* PV1 Layout 11ah 9.8.3.1 */
 +/* PV1 Layout IEEE 802.11-2020 9.8.3.1 */
  #define IEEE80211_PV1_FCTL_VERS               0x0003
  #define IEEE80211_PV1_FCTL_FTYPE      0x001c
  #define IEEE80211_PV1_FCTL_STYPE      0x00e0
 -#define IEEE80211_PV1_FCTL_TODS               0x0100
 +#define IEEE80211_PV1_FCTL_FROMDS             0x0100
  #define IEEE80211_PV1_FCTL_MOREFRAGS  0x0200
  #define IEEE80211_PV1_FCTL_PM         0x0400
  #define IEEE80211_PV1_FCTL_MOREDATA   0x0800
@@@ -4447,7 -4447,8 +4447,8 @@@ ieee80211_is_protected_dual_of_public_a
                action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
                action != WLAN_PUB_ACTION_FTM_REQUEST &&
                action != WLAN_PUB_ACTION_FTM_RESPONSE &&
-               action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+               action != WLAN_PUB_ACTION_FILS_DISCOVERY &&
+               action != WLAN_PUB_ACTION_VENDOR_SPECIFIC;
  }
  
  /**
diff --combined include/net/sock.h
index 8b6fe164b218dc90fa5f2b3c7a36ba7d6b226866,0201136b0b9cafb4ff7f180efbf0360de2389c9d..ba6642811db4e94233f0fa39d3c9e81e51369687
@@@ -2799,6 -2799,11 +2799,11 @@@ static inline bool sk_is_tcp(const stru
        return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
  }
  
+ static inline bool sk_is_stream_unix(const struct sock *sk)
+ {
+       return sk->sk_family == AF_UNIX && sk->sk_type == SOCK_STREAM;
+ }
  /**
   * sk_eat_skb - Release a skb if it is no longer needed
   * @sk: socket to eat this skb from
@@@ -2920,6 -2925,7 +2925,6 @@@ extern __u32 sysctl_wmem_max
  extern __u32 sysctl_rmem_max;
  
  extern int sysctl_tstamp_allow_data;
 -extern int sysctl_optmem_max;
  
  extern __u32 sysctl_wmem_default;
  extern __u32 sysctl_rmem_default;
diff --combined net/core/dev.c
index b875040783209e95afb92217a0a07ede42a2e425,ad20bebe153fc7d16a4e3ce8ebeabf075acacb4a..f9d4b550ef4bf09faec37f26afc1217b3d10fcbc
@@@ -165,6 -165,7 +165,6 @@@ static int netif_rx_internal(struct sk_
  static int call_netdevice_notifiers_extack(unsigned long val,
                                           struct net_device *dev,
                                           struct netlink_ext_ack *extack);
 -static struct napi_struct *napi_by_id(unsigned int napi_id);
  
  /*
   * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@@ -3471,6 -3472,9 +3471,9 @@@ static netdev_features_t gso_features_c
        if (gso_segs > READ_ONCE(dev->gso_max_segs))
                return features & ~NETIF_F_GSO_MASK;
  
+       if (unlikely(skb->len >= READ_ONCE(dev->gso_max_size)))
+               return features & ~NETIF_F_GSO_MASK;
        if (!skb_shinfo(skb)->gso_type) {
                skb_warn_bad_offload(skb);
                return features & ~NETIF_F_GSO_MASK;
@@@ -3753,8 -3757,6 +3756,8 @@@ static inline int __dev_xmit_skb(struc
  
        qdisc_calculate_pkt_len(skb, q);
  
 +      tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_DROP);
 +
        if (q->flags & TCQ_F_NOLOCK) {
                if (q->flags & TCQ_F_CAN_BYPASS && nolock_qdisc_is_empty(q) &&
                    qdisc_run_begin(q)) {
  no_lock_out:
                if (unlikely(to_free))
                        kfree_skb_list_reason(to_free,
 -                                            SKB_DROP_REASON_QDISC_DROP);
 +                                            tcf_get_drop_reason(to_free));
                return rc;
        }
  
        }
        spin_unlock(root_lock);
        if (unlikely(to_free))
 -              kfree_skb_list_reason(to_free, SKB_DROP_REASON_QDISC_DROP);
 +              kfree_skb_list_reason(to_free,
 +                                    tcf_get_drop_reason(to_free));
        if (unlikely(contended))
                spin_unlock(&q->busylock);
        return rc;
@@@ -3926,14 -3927,14 +3929,14 @@@ static int tc_run(struct tcx_entry *ent
  
        tc_skb_cb(skb)->mru = 0;
        tc_skb_cb(skb)->post_ct = false;
 -      res.drop_reason = *drop_reason;
 +      tcf_set_drop_reason(skb, *drop_reason);
  
        mini_qdisc_bstats_cpu_update(miniq, skb);
        ret = tcf_classify(skb, miniq->block, miniq->filter_list, &res, false);
        /* Only tcf related quirks below. */
        switch (ret) {
        case TC_ACT_SHOT:
 -              *drop_reason = res.drop_reason;
 +              *drop_reason = tcf_get_drop_reason(skb);
                mini_qdisc_qstats_cpu_drop(miniq);
                break;
        case TC_ACT_OK:
@@@ -6141,7 -6142,7 +6144,7 @@@ bool napi_complete_done(struct napi_str
  EXPORT_SYMBOL(napi_complete_done);
  
  /* must be called under rcu_read_lock(), as we dont take a reference */
 -static struct napi_struct *napi_by_id(unsigned int napi_id)
 +struct napi_struct *napi_by_id(unsigned int napi_id)
  {
        unsigned int hash = napi_id % HASH_SIZE(napi_hash);
        struct napi_struct *napi;
@@@ -6402,43 -6403,6 +6405,43 @@@ int dev_set_threaded(struct net_device 
  }
  EXPORT_SYMBOL(dev_set_threaded);
  
 +/**
 + * netif_queue_set_napi - Associate queue with the napi
 + * @dev: device to which NAPI and queue belong
 + * @queue_index: Index of queue
 + * @type: queue type as RX or TX
 + * @napi: NAPI context, pass NULL to clear previously set NAPI
 + *
 + * Set queue with its corresponding napi context. This should be done after
 + * registering the NAPI handler for the queue-vector and the queues have been
 + * mapped to the corresponding interrupt vector.
 + */
 +void netif_queue_set_napi(struct net_device *dev, unsigned int queue_index,
 +                        enum netdev_queue_type type, struct napi_struct *napi)
 +{
 +      struct netdev_rx_queue *rxq;
 +      struct netdev_queue *txq;
 +
 +      if (WARN_ON_ONCE(napi && !napi->dev))
 +              return;
 +      if (dev->reg_state >= NETREG_REGISTERED)
 +              ASSERT_RTNL();
 +
 +      switch (type) {
 +      case NETDEV_QUEUE_TYPE_RX:
 +              rxq = __netif_get_rx_queue(dev, queue_index);
 +              rxq->napi = napi;
 +              return;
 +      case NETDEV_QUEUE_TYPE_TX:
 +              txq = netdev_get_tx_queue(dev, queue_index);
 +              txq->napi = napi;
 +              return;
 +      default:
 +              return;
 +      }
 +}
 +EXPORT_SYMBOL(netif_queue_set_napi);
 +
  void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
                           int (*poll)(struct napi_struct *, int), int weight)
  {
         */
        if (dev->threaded && napi_kthread_create(napi))
                dev->threaded = 0;
 +      netif_napi_set_irq(napi, -1);
  }
  EXPORT_SYMBOL(netif_napi_add_weight);
  
@@@ -10551,7 -10514,7 +10554,7 @@@ void netdev_run_todo(void
                write_lock(&dev_base_lock);
                dev->reg_state = NETREG_UNREGISTERED;
                write_unlock(&dev_base_lock);
 -              linkwatch_forget_dev(dev);
 +              linkwatch_sync_dev(dev);
        }
  
        while (!list_empty(&list)) {
@@@ -11276,19 -11239,17 +11279,19 @@@ int __dev_change_net_namespace(struct n
        dev_net_set(dev, net);
        dev->ifindex = new_ifindex;
  
 -      /* Send a netdev-add uevent to the new namespace */
 -      kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
 -      netdev_adjacent_add_links(dev);
 -
        if (new_name[0]) /* Rename the netdev to prepared name */
                strscpy(dev->name, new_name, IFNAMSIZ);
  
        /* Fixup kobjects */
 +      dev_set_uevent_suppress(&dev->dev, 1);
        err = device_rename(&dev->dev, dev->name);
 +      dev_set_uevent_suppress(&dev->dev, 0);
        WARN_ON(err);
  
 +      /* Send a netdev-add uevent to the new namespace */
 +      kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
 +      netdev_adjacent_add_links(dev);
 +
        /* Adapt owner in case owning user namespace of target network
         * namespace is different from the original one.
         */
@@@ -11612,60 -11573,6 +11615,60 @@@ static struct pernet_operations __net_i
        .exit_batch = default_device_exit_batch,
  };
  
 +static void __init net_dev_struct_check(void)
 +{
 +      /* TX read-mostly hotpath */
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq);
 +#ifdef CONFIG_XPS
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps);
 +#endif
 +#ifdef CONFIG_NETFILTER_EGRESS
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress);
 +#endif
 +#ifdef CONFIG_NET_XGRESS
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress);
 +#endif
 +      CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 152);
 +
 +      /* TXRX read-mostly hotpath */
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
 +      CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 30);
 +
 +      /* RX read-mostly hotpath */
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_flush_timeout);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, napi_defer_hard_irqs);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data);
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net);
 +#ifdef CONFIG_NETPOLL
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo);
 +#endif
 +#ifdef CONFIG_NET_XGRESS
 +      CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress);
 +#endif
 +      CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 96);
 +}
 +
  /*
   *    Initialize the DEV module. At boot time this walks the device list and
   *    unhooks any devices that fail to initialise (normally hardware not
@@@ -11683,8 -11590,6 +11686,8 @@@ static int __init net_dev_init(void
  
        BUG_ON(!dev_boot_phase);
  
 +      net_dev_struct_check();
 +
        if (dev_proc_init())
                goto out;
  
diff --combined net/core/skbuff.c
index 4d4b11b0a83deb5ba0a064a9b7bafab3f82f4666,94cc40a6f797561b219a47c7ae7ee58c21692cd5..ce5687ddb7684ae315651beab82a8c84e45885f2
@@@ -890,11 -890,6 +890,11 @@@ static void skb_clone_fraglist(struct s
                skb_get(list);
  }
  
 +static bool is_pp_page(struct page *page)
 +{
 +      return (page->pp_magic & ~0x3UL) == PP_SIGNATURE;
 +}
 +
  #if IS_ENABLED(CONFIG_PAGE_POOL)
  bool napi_pp_put_page(struct page *page, bool napi_safe)
  {
         * and page_is_pfmemalloc() is checked in __page_pool_put_page()
         * to avoid recycling the pfmemalloc page.
         */
 -      if (unlikely((page->pp_magic & ~0x3UL) != PP_SIGNATURE))
 +      if (unlikely(!is_pp_page(page)))
                return false;
  
        pp = page->pp;
@@@ -947,37 -942,6 +947,37 @@@ static bool skb_pp_recycle(struct sk_bu
        return napi_pp_put_page(virt_to_page(data), napi_safe);
  }
  
 +/**
 + * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
 + * @skb:      page pool aware skb
 + *
 + * Increase the fragment reference count (pp_ref_count) of a skb. This is
 + * intended to gain fragment references only for page pool aware skbs,
 + * i.e. when skb->pp_recycle is true, and not for fragments in a
 + * non-pp-recycling skb. It has a fallback to increase references on normal
 + * pages, as page pool aware skbs may also have normal page fragments.
 + */
 +static int skb_pp_frag_ref(struct sk_buff *skb)
 +{
 +      struct skb_shared_info *shinfo;
 +      struct page *head_page;
 +      int i;
 +
 +      if (!skb->pp_recycle)
 +              return -EINVAL;
 +
 +      shinfo = skb_shinfo(skb);
 +
 +      for (i = 0; i < shinfo->nr_frags; i++) {
 +              head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
 +              if (likely(is_pp_page(head_page)))
 +                      page_pool_ref_page(head_page);
 +              else
 +                      page_ref_inc(head_page);
 +      }
 +      return 0;
 +}
 +
  static void skb_kfree_head(void *head, unsigned int end_offset)
  {
        if (end_offset == SKB_SMALL_HEAD_HEADROOM)
@@@ -4861,7 -4825,9 +4861,9 @@@ static __always_inline unsigned int skb
  static void skb_extensions_init(void)
  {
        BUILD_BUG_ON(SKB_EXT_NUM >= 8);
+ #if !IS_ENABLED(CONFIG_KCOV_INSTRUMENT_ALL)
        BUILD_BUG_ON(skb_ext_total_length() > 255);
+ #endif
  
        skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
                                             SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
@@@ -5801,12 -5767,17 +5803,12 @@@ bool skb_try_coalesce(struct sk_buff *t
                return false;
  
        /* In general, avoid mixing page_pool and non-page_pool allocated
 -       * pages within the same SKB. Additionally avoid dealing with clones
 -       * with page_pool pages, in case the SKB is using page_pool fragment
 -       * references (page_pool_alloc_frag()). Since we only take full page
 -       * references for cloned SKBs at the moment that would result in
 -       * inconsistent reference counts.
 -       * In theory we could take full references if @from is cloned and
 -       * !@to->pp_recycle but its tricky (due to potential race with
 -       * the clone disappearing) and rare, so not worth dealing with.
 +       * pages within the same SKB. In theory we could take full
 +       * references if @from is cloned and !@to->pp_recycle but its
 +       * tricky (due to potential race with the clone disappearing) and
 +       * rare, so not worth dealing with.
         */
 -      if (to->pp_recycle != from->pp_recycle ||
 -          (from->pp_recycle && skb_cloned(from)))
 +      if (to->pp_recycle != from->pp_recycle)
                return false;
  
        if (len <= skb_tailroom(to)) {
        /* if the skb is not cloned this does nothing
         * since we set nr_frags to 0.
         */
 -      for (i = 0; i < from_shinfo->nr_frags; i++)
 -              __skb_frag_ref(&from_shinfo->frags[i]);
 +      if (skb_pp_frag_ref(from)) {
 +              for (i = 0; i < from_shinfo->nr_frags; i++)
 +                      __skb_frag_ref(&from_shinfo->frags[i]);
 +      }
  
        to->truesize += delta;
        to->len += len;
diff --combined net/mac80211/cfg.c
index e0a4f9eecb2cb4373d3485425712bf3cbe5565a3,eb1d3ef843538c2966e02bcbd6f458e671e538ef..489dd97f51724a86053a9c4e9269487c4c7e928b
@@@ -1270,7 -1270,7 +1270,7 @@@ static int ieee80211_start_ap(struct wi
                return -EALREADY;
  
        if (params->smps_mode != NL80211_SMPS_OFF)
 -              return -ENOTSUPP;
 +              return -EOPNOTSUPP;
  
        link->smps_mode = IEEE80211_SMPS_OFF;
  
@@@ -1788,10 -1788,10 +1788,10 @@@ static int sta_link_apply_parameters(st
                                          lockdep_is_held(&local->hw.wiphy->mtx));
  
        /*
-        * If there are no changes, then accept a link that doesn't exist,
+        * If there are no changes, then accept a link that exist,
         * unless it's a new link.
         */
-       if (params->link_id < 0 && !new_link &&
+       if (params->link_id >= 0 && !new_link &&
            !params->link_mac && !params->txpwr_set &&
            !params->supported_rates_len &&
            !params->ht_capa && !params->vht_capa &&
@@@ -2556,7 -2556,7 +2556,7 @@@ static int ieee80211_update_mesh_config
                 * devices that report signal in dBm.
                 */
                if (!ieee80211_hw_check(&sdata->local->hw, SIGNAL_DBM))
 -                      return -ENOTSUPP;
 +                      return -EOPNOTSUPP;
                conf->rssi_threshold = nconf->rssi_threshold;
        }
        if (_chg_mesh_attr(NL80211_MESHCONF_HT_OPMODE, mask)) {
diff --combined net/mac80211/mlme.c
index a693ca2cf8cdc9e7ea7cd223e2969fe93611cf13,c8998cf01b7a54e565aa7d950cfb7affc20be7e5..40a4fbfff5304dc4ee6ab60a00fc74071dc59ffd
@@@ -135,7 -135,6 +135,7 @@@ ieee80211_handle_puncturing_bitmap(stru
                                   u16 bitmap, u64 *changed)
  {
        struct cfg80211_chan_def *chandef = &link->conf->chandef;
 +      struct ieee80211_local *local = link->sdata->local;
        u16 extracted;
        u64 _changed = 0;
  
                                                         bitmap);
  
                if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
 -                                                           chandef))
 +                                                           chandef) &&
 +                  !(bitmap && ieee80211_hw_check(&local->hw,
 +                                                 DISALLOW_PUNCTURING)))
                        break;
                link->u.mgd.conn_flags |=
                        ieee80211_chandef_downgrade(chandef);
@@@ -1385,7 -1382,7 +1385,7 @@@ static int ieee80211_send_assoc(struct 
        struct ieee80211_mgmt *mgmt;
        u8 *pos, qos_info, *ie_start;
        size_t offset, noffset;
 -      u16 capab = WLAN_CAPABILITY_ESS, link_capab;
 +      u16 capab = 0, link_capab;
        __le16 listen_int;
        struct element *ext_capa = NULL;
        enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
        *pos++ = assoc_data->ssid_len;
        memcpy(pos, assoc_data->ssid, assoc_data->ssid_len);
  
 +      /*
 +       * This bit is technically reserved, so it shouldn't matter for either
 +       * the AP or us, but it also means we shouldn't set it. However, we've
 +       * always set it in the past, and apparently some EHT APs check that
 +       * we don't set it. To avoid interoperability issues with old APs that
 +       * for some reason check it and want it to be set, set the bit for all
 +       * pre-EHT connections as we used to do.
 +       */
 +      if (link->u.mgd.conn_flags & IEEE80211_CONN_DISABLE_EHT)
 +              capab |= WLAN_CAPABILITY_ESS;
 +
        /* add the elements for the assoc (main) link */
        link_capab = capab;
        offset = ieee80211_assoc_link_elems(sdata, skb, &link_capab,
@@@ -5696,7 -5682,6 +5696,7 @@@ static bool ieee80211_config_puncturing
                                        const struct ieee80211_eht_operation *eht_oper,
                                        u64 *changed)
  {
 +      struct ieee80211_local *local = link->sdata->local;
        u16 bitmap = 0, extracted;
  
        if ((eht_oper->params & IEEE80211_EHT_OPER_INFO_PRESENT) &&
                return false;
        }
  
 +      if (bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING))
 +              return false;
 +
        ieee80211_handle_puncturing_bitmap(link, eht_oper, bitmap, changed);
        return true;
  }
@@@ -5800,7 -5782,7 +5800,7 @@@ static void ieee80211_ml_reconfiguratio
  {
        const struct ieee80211_multi_link_elem *ml;
        const struct element *sub;
-       size_t ml_len;
+       ssize_t ml_len;
        unsigned long removed_links = 0;
        u16 link_removal_timeout[IEEE80211_MLD_MAX_NUM_LINKS] = {};
        u8 link_id;
                                             elems->scratch + elems->scratch_len -
                                             elems->scratch_pos,
                                             WLAN_EID_FRAGMENT);
+       if (ml_len < 0)
+               return;
  
        elems->ml_reconf = (const void *)elems->scratch_pos;
        elems->ml_reconf_len = ml_len;
@@@ -7602,8 -7586,7 +7604,8 @@@ ieee80211_setup_assoc_link(struct ieee8
  
                        bitmap = get_unaligned_le16(disable_subchannel_bitmap);
                        if (cfg80211_valid_disable_subchannel_bitmap(&bitmap,
 -                                                                   &link->conf->chandef))
 +                                                                   &link->conf->chandef) &&
 +                          !(bitmap && ieee80211_hw_check(&local->hw, DISALLOW_PUNCTURING)))
                                ieee80211_handle_puncturing_bitmap(link,
                                                                   eht_oper,
                                                                   bitmap,
diff --combined net/mptcp/protocol.h
index 458a2d7bb0ddc1f64433d519058cfd29afb03606,aa1a93fe40ffaf3e77752802d8437b0d2780d2fb..1240268f9e9e14fc1bf4526c87f27fb48ab511f3
  #define MPTCP_ERROR_REPORT    3
  #define MPTCP_RETRANSMIT      4
  #define MPTCP_FLUSH_JOIN_LIST 5
- #define MPTCP_CONNECTED               6
+ #define MPTCP_SYNC_STATE      6
  #define MPTCP_SYNC_SNDBUF     7
  
  struct mptcp_skb_cb {
@@@ -296,6 -296,9 +296,9 @@@ struct mptcp_sock 
        bool            use_64bit_ack; /* Set when we received a 64-bit DSN */
        bool            csum_enabled;
        bool            allow_infinite_fallback;
+       u8              pending_state; /* A subflow asked to set this sk_state,
+                                       * protected by the msk data lock
+                                       */
        u8              mpc_endpoint_id;
        u8              recvmsg_inq:1,
                        cork:1,
@@@ -728,7 -731,7 +731,7 @@@ void mptcp_get_options(const struct sk_
                       struct mptcp_options_received *mp_opt);
  
  void mptcp_finish_connect(struct sock *sk);
- void __mptcp_set_connected(struct sock *sk);
+ void __mptcp_sync_state(struct sock *sk, int state);
  void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout);
  
  static inline void mptcp_stop_tout_timer(struct sock *sk)
@@@ -1072,15 -1075,6 +1075,15 @@@ static inline void __mptcp_do_fallback(
        set_bit(MPTCP_FALLBACK_DONE, &msk->flags);
  }
  
 +static inline bool __mptcp_has_initial_subflow(const struct mptcp_sock *msk)
 +{
 +      struct sock *ssk = READ_ONCE(msk->first);
 +
 +      return ssk && ((1 << inet_sk_state_load(ssk)) &
 +                     (TCPF_ESTABLISHED | TCPF_SYN_SENT |
 +                      TCPF_SYN_RECV | TCPF_LISTEN));
 +}
 +
  static inline void mptcp_do_fallback(struct sock *ssk)
  {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
@@@ -1124,7 -1118,7 +1127,7 @@@ static inline bool subflow_simultaneous
  {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
  
-       return sk->sk_state == TCP_ESTABLISHED &&
+       return (1 << sk->sk_state) & (TCPF_ESTABLISHED | TCPF_FIN_WAIT1) &&
               is_active_ssk(subflow) &&
               !subflow->conn_finished;
  }
index f0c854d6511c6322315bc8661e96b4dfc79d1c01,8247a7c69c36d7d446083d422ada0c5c6d897625..81f094e7f0f79a98bc1e237ca307f5ac10d238bd
@@@ -58,7 -58,6 +58,7 @@@ TARGETS += net/forwardin
  TARGETS += net/hsr
  TARGETS += net/mptcp
  TARGETS += net/openvswitch
 +TARGETS += net/tcp_ao
  TARGETS += netfilter
  TARGETS += nsfs
  TARGETS += perf_events
@@@ -156,12 -155,10 +156,10 @@@ ifneq ($(KBUILD_OUTPUT),
    abs_objtree := $(realpath $(abs_objtree))
    BUILD := $(abs_objtree)/kselftest
    KHDR_INCLUDES := -isystem ${abs_objtree}/usr/include
-   KHDR_DIR := ${abs_objtree}/usr/include
  else
    BUILD := $(CURDIR)
    abs_srctree := $(shell cd $(top_srcdir) && pwd)
    KHDR_INCLUDES := -isystem ${abs_srctree}/usr/include
-   KHDR_DIR := ${abs_srctree}/usr/include
    DEFAULT_INSTALL_HDR_PATH := 1
  endif
  
@@@ -175,7 -172,7 +173,7 @@@ export KHDR_INCLUDE
  # all isn't the first target in the file.
  .DEFAULT_GOAL := all
  
- all: kernel_header_files
+ all:
        @ret=1;                                                 \
        for TARGET in $(TARGETS); do                            \
                BUILD_TARGET=$$BUILD/$$TARGET;                  \
                                $(if $(FORCE_TARGETS),|| exit); \
                ret=$$((ret * $$?));                            \
        done; exit $$ret;
- kernel_header_files:
-       @ls $(KHDR_DIR)/linux/*.h >/dev/null 2>/dev/null;                          \
-       if [ $$? -ne 0 ]; then                                                     \
-             RED='\033[1;31m';                                                  \
-             NOCOLOR='\033[0m';                                                 \
-             echo;                                                              \
-             echo -e "$${RED}error$${NOCOLOR}: missing kernel header files.";   \
-             echo "Please run this and try again:";                             \
-             echo;                                                              \
-             echo "    cd $(top_srcdir)";                                       \
-             echo "    make headers";                                           \
-             echo;                                                              \
-           exit 1;                                                                \
-       fi
- .PHONY: kernel_header_files
  
  run_tests: all
        @for TARGET in $(TARGETS); do \
index f7bfb2b09c82b6eb82d8b1f115f8e99440e6da93,49a29dbc19107229c52b1b2bdd055854295fca4f..b946088017f1d40c3367af5c24668f9a8a6c075e
@@@ -1,3 -1,6 +1,3 @@@
 -CONFIG_9P_FS=y
 -CONFIG_9P_FS_POSIX_ACL=y
 -CONFIG_9P_FS_SECURITY=y
  CONFIG_AGP=y
  CONFIG_AGP_AMD64=y
  CONFIG_AGP_INTEL=y
@@@ -42,11 -45,11 +42,10 @@@ CONFIG_CPU_IDLE_GOV_LADDER=
  CONFIG_CPUSETS=y
  CONFIG_CRC_T10DIF=y
  CONFIG_CRYPTO_BLAKE2B=y
 -CONFIG_CRYPTO_DEV_VIRTIO=y
  CONFIG_CRYPTO_SEQIV=y
  CONFIG_CRYPTO_XXHASH=y
  CONFIG_DCB=y
  CONFIG_DEBUG_ATOMIC_SLEEP=y
- CONFIG_DEBUG_CREDENTIALS=y
  CONFIG_DEBUG_INFO_BTF=y
  CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
  CONFIG_DEBUG_MEMORY_INIT=y
@@@ -141,6 -144,8 +140,6 @@@ CONFIG_MEMORY_FAILURE=
  CONFIG_MINIX_SUBPARTITION=y
  CONFIG_NAMESPACES=y
  CONFIG_NET=y
 -CONFIG_NET_9P=y
 -CONFIG_NET_9P_VIRTIO=y
  CONFIG_NET_ACT_BPF=y
  CONFIG_NET_CLS_CGROUP=y
  CONFIG_NET_EMATCH=y
@@@ -222,6 -227,12 +221,6 @@@ CONFIG_USER_NS=
  CONFIG_VALIDATE_FS_PARSER=y
  CONFIG_VETH=y
  CONFIG_VIRT_DRIVERS=y
 -CONFIG_VIRTIO_BALLOON=y
 -CONFIG_VIRTIO_BLK=y
 -CONFIG_VIRTIO_CONSOLE=y
 -CONFIG_VIRTIO_NET=y
 -CONFIG_VIRTIO_PCI=y
 -CONFIG_VIRTIO_VSOCKETS_COMMON=y
  CONFIG_VLAN_8021Q=y
  CONFIG_VSOCKETS=y
  CONFIG_VSOCKETS_LOOPBACK=y
index 14bd68da746693f4b11e073420231e885c5894f3,9e5bf59a20bff4569ec0cfd4fc1a7413975b53b0..50818075e566e1abf1f2f9e587951e5abed238fc
@@@ -54,7 -54,7 +54,7 @@@ TEST_PROGS += ip_local_port_range.s
  TEST_PROGS += rps_default_mask.sh
  TEST_PROGS += big_tcp.sh
  TEST_PROGS_EXTENDED := in_netns.sh setup_loopback.sh setup_veth.sh
 -TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh
 +TEST_PROGS_EXTENDED += toeplitz_client.sh toeplitz.sh lib.sh
  TEST_GEN_FILES =  socket nettest
  TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any
  TEST_GEN_FILES += tcp_mmap tcp_inq psock_snd txring_overwrite
@@@ -91,7 -91,7 +91,8 @@@ TEST_PROGS += test_bridge_neigh_suppres
  TEST_PROGS += test_vxlan_nolocalbypass.sh
  TEST_PROGS += test_bridge_backup_port.sh
  TEST_PROGS += fdb_flush.sh
 +TEST_PROGS += fq_band_pktlimit.sh
+ TEST_PROGS += vlan_hw_filter.sh
  
  TEST_FILES := settings
  
index 8362ea454af3877f3c50a161b8b7538ea886edf7,24a57b3ae2155d27de8229ab8e7dce242984b991..87590a43b50dfdd4fb5aa1ef79b276576339ce21
@@@ -511,6 -511,13 +511,6 @@@ get_failed_tests_ids(
        done | sort -n
  }
  
 -print_file_err()
 -{
 -      ls -l "$1" 1>&2
 -      echo -n "Trailing bytes are: "
 -      tail -c 27 "$1"
 -}
 -
  check_transfer()
  {
        local in=$1
                local sum=$((0${a} + 0${b}))
                if [ $check_invert -eq 0 ] || [ $sum -ne $((0xff)) ]; then
                        fail_test "$what does not match (in, out):"
 -                      print_file_err "$in"
 -                      print_file_err "$out"
 +                      mptcp_lib_print_file_err "$in"
 +                      mptcp_lib_print_file_err "$out"
  
                        return 1
                else
@@@ -580,9 -587,49 +580,9 @@@ link_failure(
        done
  }
  
 -# $1: IP address
 -is_v6()
 -{
 -      [ -z "${1##*:*}" ]
 -}
 -
 -# $1: ns, $2: port
 -wait_local_port_listen()
 -{
 -      local listener_ns="${1}"
 -      local port="${2}"
 -
 -      local port_hex
 -      port_hex="$(printf "%04X" "${port}")"
 -
 -      local i
 -      for i in $(seq 10); do
 -              ip netns exec "${listener_ns}" cat /proc/net/tcp* | \
 -                      awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) {rc=0; exit}} END {exit rc}" &&
 -                      break
 -              sleep 0.1
 -      done
 -}
 -
 -# $1: ns ; $2: counter
 -get_counter()
 -{
 -      local ns="${1}"
 -      local counter="${2}"
 -      local count
 -
 -      count=$(ip netns exec ${ns} nstat -asz "${counter}" | awk 'NR==1 {next} {print $2}')
 -      if [ -z "${count}" ]; then
 -              mptcp_lib_fail_if_expected_feature "${counter} counter"
 -              return 1
 -      fi
 -
 -      echo "${count}"
 -}
 -
  rm_addr_count()
  {
 -      get_counter "${1}" "MPTcpExtRmAddr"
 +      mptcp_lib_get_counter "${1}" "MPTcpExtRmAddr"
  }
  
  # $1: ns, $2: old rm_addr counter in $ns
@@@ -602,7 -649,7 +602,7 @@@ wait_rm_addr(
  
  rm_sf_count()
  {
 -      get_counter "${1}" "MPTcpExtRmSubflow"
 +      mptcp_lib_get_counter "${1}" "MPTcpExtRmSubflow"
  }
  
  # $1: ns, $2: old rm_sf counter in $ns
@@@ -625,20 -672,26 +625,20 @@@ wait_mpj(
        local ns="${1}"
        local cnt old_cnt
  
 -      old_cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
 +      old_cnt=$(mptcp_lib_get_counter ${ns} "MPTcpExtMPJoinAckRx")
  
        local i
        for i in $(seq 10); do
 -              cnt=$(get_counter ${ns} "MPTcpExtMPJoinAckRx")
 +              cnt=$(mptcp_lib_get_counter ${ns} "MPTcpExtMPJoinAckRx")
                [ "$cnt" = "${old_cnt}" ] || break
                sleep 0.1
        done
  }
  
 -kill_wait()
 -{
 -      kill $1 > /dev/null 2>&1
 -      wait $1 2>/dev/null
 -}
 -
  kill_events_pids()
  {
 -      kill_wait $evts_ns1_pid
 -      kill_wait $evts_ns2_pid
 +      mptcp_lib_kill_wait $evts_ns1_pid
 +      mptcp_lib_kill_wait $evts_ns2_pid
  }
  
  kill_tests_wait()
@@@ -848,7 -901,7 +848,7 @@@ pm_nl_set_endpoint(
                local id=10
                while [ $add_nr_ns1 -gt 0 ]; do
                        local addr
 -                      if is_v6 "${connect_addr}"; then
 +                      if mptcp_lib_is_v6 "${connect_addr}"; then
                                addr="dead:beef:$counter::1"
                        else
                                addr="10.0.$counter.1"
                local id=20
                while [ $add_nr_ns2 -gt 0 ]; do
                        local addr
 -                      if is_v6 "${connect_addr}"; then
 +                      if mptcp_lib_is_v6 "${connect_addr}"; then
                                addr="dead:beef:$counter::2"
                        else
                                addr="10.0.$counter.2"
                        pm_nl_flush_endpoint ${connector_ns}
                elif [ $rm_nr_ns2 -eq 9 ]; then
                        local addr
 -                      if is_v6 "${connect_addr}"; then
 +                      if mptcp_lib_is_v6 "${connect_addr}"; then
                                addr="dead:beef:1::2"
                        else
                                addr="10.0.1.2"
@@@ -1064,7 -1117,7 +1064,7 @@@ do_transfer(
        fi
        local spid=$!
  
 -      wait_local_port_listen "${listener_ns}" "${port}"
 +      mptcp_lib_wait_local_port_listen "${listener_ns}" "${port}"
  
        extra_cl_args="$extra_args $extra_cl_args"
        if [ "$test_linkfail" -eq 0 ];then
@@@ -1146,7 -1199,8 +1146,7 @@@ make_file(
        local who=$2
        local size=$3
  
 -      dd if=/dev/urandom of="$name" bs=1024 count=$size 2> /dev/null
 -      echo -e "\nMPTCP_TEST_FILE_END_MARKER" >> "$name"
 +      mptcp_lib_make_file $name 1024 $size
  
        print_info "Test file (size $size KB) for $who"
  }
@@@ -1230,7 -1284,7 +1230,7 @@@ chk_csum_nr(
        fi
  
        print_check "sum"
 -      count=$(get_counter ${ns1} "MPTcpExtDataCsumErr")
 +      count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtDataCsumErr")
        if [ "$count" != "$csum_ns1" ]; then
                extra_msg="$extra_msg ns1=$count"
        fi
                print_ok
        fi
        print_check "csum"
 -      count=$(get_counter ${ns2} "MPTcpExtDataCsumErr")
 +      count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtDataCsumErr")
        if [ "$count" != "$csum_ns2" ]; then
                extra_msg="$extra_msg ns2=$count"
        fi
@@@ -1287,7 -1341,7 +1287,7 @@@ chk_fail_nr(
        fi
  
        print_check "ftx"
 -      count=$(get_counter ${ns_tx} "MPTcpExtMPFailTx")
 +      count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFailTx")
        if [ "$count" != "$fail_tx" ]; then
                extra_msg="$extra_msg,tx=$count"
        fi
        fi
  
        print_check "failrx"
 -      count=$(get_counter ${ns_rx} "MPTcpExtMPFailRx")
 +      count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFailRx")
        if [ "$count" != "$fail_rx" ]; then
                extra_msg="$extra_msg,rx=$count"
        fi
@@@ -1334,7 -1388,7 +1334,7 @@@ chk_fclose_nr(
        fi
  
        print_check "ctx"
 -      count=$(get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
 +      count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPFastcloseTx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$fclose_tx" ]; then
        fi
  
        print_check "fclzrx"
 -      count=$(get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
 +      count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPFastcloseRx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$fclose_rx" ]; then
@@@ -1375,7 -1429,7 +1375,7 @@@ chk_rst_nr(
        fi
  
        print_check "rtx"
 -      count=$(get_counter ${ns_tx} "MPTcpExtMPRstTx")
 +      count=$(mptcp_lib_get_counter ${ns_tx} "MPTcpExtMPRstTx")
        if [ -z "$count" ]; then
                print_skip
        # accept more rst than expected except if we don't expect any
        fi
  
        print_check "rstrx"
 -      count=$(get_counter ${ns_rx} "MPTcpExtMPRstRx")
 +      count=$(mptcp_lib_get_counter ${ns_rx} "MPTcpExtMPRstRx")
        if [ -z "$count" ]; then
                print_skip
        # accept more rst than expected except if we don't expect any
@@@ -1408,7 -1462,7 +1408,7 @@@ chk_infi_nr(
        local count
  
        print_check "itx"
 -      count=$(get_counter ${ns2} "MPTcpExtInfiniteMapTx")
 +      count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtInfiniteMapTx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$infi_tx" ]; then
        fi
  
        print_check "infirx"
 -      count=$(get_counter ${ns1} "MPTcpExtInfiniteMapRx")
 +      count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtInfiniteMapRx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$infi_rx" ]; then
@@@ -1447,7 -1501,7 +1447,7 @@@ chk_join_nr(
        fi
  
        print_check "syn"
 -      count=$(get_counter ${ns1} "MPTcpExtMPJoinSynRx")
 +      count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinSynRx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$syn_nr" ]; then
  
        print_check "synack"
        with_cookie=$(ip netns exec $ns2 sysctl -n net.ipv4.tcp_syncookies)
 -      count=$(get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
 +      count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinSynAckRx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$syn_ack_nr" ]; then
        fi
  
        print_check "ack"
 -      count=$(get_counter ${ns1} "MPTcpExtMPJoinAckRx")
 +      count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinAckRx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$ack_nr" ]; then
@@@ -1508,8 -1562,8 +1508,8 @@@ chk_stale_nr(
  
        print_check "stale"
  
 -      stale_nr=$(get_counter ${ns} "MPTcpExtSubflowStale")
 -      recover_nr=$(get_counter ${ns} "MPTcpExtSubflowRecover")
 +      stale_nr=$(mptcp_lib_get_counter ${ns} "MPTcpExtSubflowStale")
 +      recover_nr=$(mptcp_lib_get_counter ${ns} "MPTcpExtSubflowRecover")
        if [ -z "$stale_nr" ] || [ -z "$recover_nr" ]; then
                print_skip
        elif [ $stale_nr -lt $stale_min ] ||
@@@ -1546,7 -1600,7 +1546,7 @@@ chk_add_nr(
        timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
  
        print_check "add"
 -      count=$(get_counter ${ns2} "MPTcpExtAddAddr")
 +      count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtAddAddr")
        if [ -z "$count" ]; then
                print_skip
        # if the test configured a short timeout tolerate greater then expected
        fi
  
        print_check "echo"
 -      count=$(get_counter ${ns1} "MPTcpExtEchoAdd")
 +      count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtEchoAdd")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$echo_nr" ]; then
  
        if [ $port_nr -gt 0 ]; then
                print_check "pt"
 -              count=$(get_counter ${ns2} "MPTcpExtPortAdd")
 +              count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtPortAdd")
                if [ -z "$count" ]; then
                        print_skip
                elif [ "$count" != "$port_nr" ]; then
                fi
  
                print_check "syn"
 -              count=$(get_counter ${ns1} "MPTcpExtMPJoinPortSynRx")
 +              count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortSynRx")
                if [ -z "$count" ]; then
                        print_skip
                elif [ "$count" != "$syn_nr" ]; then
                fi
  
                print_check "synack"
 -              count=$(get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx")
 +              count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtMPJoinPortSynAckRx")
                if [ -z "$count" ]; then
                        print_skip
                elif [ "$count" != "$syn_ack_nr" ]; then
                fi
  
                print_check "ack"
 -              count=$(get_counter ${ns1} "MPTcpExtMPJoinPortAckRx")
 +              count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPJoinPortAckRx")
                if [ -z "$count" ]; then
                        print_skip
                elif [ "$count" != "$ack_nr" ]; then
                fi
  
                print_check "syn"
 -              count=$(get_counter ${ns1} "MPTcpExtMismatchPortSynRx")
 +              count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortSynRx")
                if [ -z "$count" ]; then
                        print_skip
                elif [ "$count" != "$mis_syn_nr" ]; then
                fi
  
                print_check "ack"
 -              count=$(get_counter ${ns1} "MPTcpExtMismatchPortAckRx")
 +              count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMismatchPortAckRx")
                if [ -z "$count" ]; then
                        print_skip
                elif [ "$count" != "$mis_ack_nr" ]; then
@@@ -1645,7 -1699,7 +1645,7 @@@ chk_add_tx_nr(
        timeout=$(ip netns exec $ns1 sysctl -n net.mptcp.add_addr_timeout)
  
        print_check "add TX"
 -      count=$(get_counter ${ns1} "MPTcpExtAddAddrTx")
 +      count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtAddAddrTx")
        if [ -z "$count" ]; then
                print_skip
        # if the test configured a short timeout tolerate greater then expected
        fi
  
        print_check "echo TX"
 -      count=$(get_counter ${ns2} "MPTcpExtEchoAddTx")
 +      count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtEchoAddTx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$echo_tx_nr" ]; then
@@@ -1695,7 -1749,7 +1695,7 @@@ chk_rm_nr(
        fi
  
        print_check "rm"
 -      count=$(get_counter ${addr_ns} "MPTcpExtRmAddr")
 +      count=$(mptcp_lib_get_counter ${addr_ns} "MPTcpExtRmAddr")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$rm_addr_nr" ]; then
        fi
  
        print_check "rmsf"
 -      count=$(get_counter ${subflow_ns} "MPTcpExtRmSubflow")
 +      count=$(mptcp_lib_get_counter ${subflow_ns} "MPTcpExtRmSubflow")
        if [ -z "$count" ]; then
                print_skip
        elif [ -n "$simult" ]; then
                local cnt suffix
  
 -              cnt=$(get_counter ${addr_ns} "MPTcpExtRmSubflow")
 +              cnt=$(mptcp_lib_get_counter ${addr_ns} "MPTcpExtRmSubflow")
  
                # in case of simult flush, the subflow removal count on each side is
                # unreliable
@@@ -1740,7 -1794,7 +1740,7 @@@ chk_rm_tx_nr(
        local rm_addr_tx_nr=$1
  
        print_check "rm TX"
 -      count=$(get_counter ${ns2} "MPTcpExtRmAddrTx")
 +      count=$(mptcp_lib_get_counter ${ns2} "MPTcpExtRmAddrTx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$rm_addr_tx_nr" ]; then
@@@ -1757,7 -1811,7 +1757,7 @@@ chk_prio_nr(
        local count
  
        print_check "ptx"
 -      count=$(get_counter ${ns1} "MPTcpExtMPPrioTx")
 +      count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPPrioTx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$mp_prio_nr_tx" ]; then
        fi
  
        print_check "prx"
 -      count=$(get_counter ${ns1} "MPTcpExtMPPrioRx")
 +      count=$(mptcp_lib_get_counter ${ns1} "MPTcpExtMPPrioRx")
        if [ -z "$count" ]; then
                print_skip
        elif [ "$count" != "$mp_prio_nr_rx" ]; then
@@@ -1813,10 -1867,12 +1813,10 @@@ chk_mptcp_info(
        local cnt2
        local dump_stats
  
 -      print_check "mptcp_info ${info1:0:8}=$exp1:$exp2"
 +      print_check "mptcp_info ${info1:0:15}=$exp1:$exp2"
  
 -      cnt1=$(ss -N $ns1 -inmHM | grep "$info1:" |
 -             sed -n 's/.*\('"$info1"':\)\([[:digit:]]*\).*$/\2/p;q')
 -      cnt2=$(ss -N $ns2 -inmHM | grep "$info2:" |
 -             sed -n 's/.*\('"$info2"':\)\([[:digit:]]*\).*$/\2/p;q')
 +      cnt1=$(ss -N $ns1 -inmHM | mptcp_lib_get_info_value "$info1" "$info1")
 +      cnt2=$(ss -N $ns2 -inmHM | mptcp_lib_get_info_value "$info2" "$info2")
        # 'ss' only display active connections and counters that are not 0.
        [ -z "$cnt1" ] && cnt1=0
        [ -z "$cnt2" ] && cnt2=0
        fi
  }
  
 +# $1: subflows in ns1 ; $2: subflows in ns2
 +# number of all subflows, including the initial subflow.
 +chk_subflows_total()
 +{
 +      local cnt1
 +      local cnt2
 +      local info="subflows_total"
 +      local dump_stats
 +
 +      # if subflows_total counter is supported, use it:
 +      if [ -n "$(ss -N $ns1 -inmHM | mptcp_lib_get_info_value $info $info)" ]; then
 +              chk_mptcp_info $info $1 $info $2
 +              return
 +      fi
 +
 +      print_check "$info $1:$2"
 +
 +      # if not, count the TCP connections that are in fact MPTCP subflows
 +      cnt1=$(ss -N $ns1 -ti state established state syn-sent state syn-recv |
 +             grep -c tcp-ulp-mptcp)
 +      cnt2=$(ss -N $ns2 -ti state established state syn-sent state syn-recv |
 +             grep -c tcp-ulp-mptcp)
 +
 +      if [ "$1" != "$cnt1" ] || [ "$2" != "$cnt2" ]; then
 +              fail_test "got subflows $cnt1:$cnt2 expected $1:$2"
 +              dump_stats=1
 +      else
 +              print_ok
 +      fi
 +
 +      if [ "$dump_stats" = 1 ]; then
 +              ss -N $ns1 -ti
 +              ss -N $ns2 -ti
 +      fi
 +}
 +
  chk_link_usage()
  {
        local ns=$1
@@@ -1901,7 -1921,7 +1901,7 @@@ wait_attempt_fail(
        while [ $time -lt $timeout_ms ]; do
                local cnt
  
 -              cnt=$(get_counter ${ns} "TcpAttemptFails")
 +              cnt=$(mptcp_lib_get_counter ${ns} "TcpAttemptFails")
  
                [ "$cnt" = 1 ] && return 1
                time=$((time + 100))
@@@ -2756,7 -2776,7 +2756,7 @@@ backup_tests(
        fi
  
        if reset "mpc backup" &&
-          continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+          continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
                pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
                speed=slow \
                        run_tests $ns1 $ns2 10.0.1.1
        fi
  
        if reset "mpc backup both sides" &&
-          continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+          continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
                pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
                pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
                speed=slow \
        fi
  
        if reset "mpc switch to backup" &&
-          continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+          continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
                pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
                sflags=backup speed=slow \
                        run_tests $ns1 $ns2 10.0.1.1
        fi
  
        if reset "mpc switch to backup both sides" &&
-          continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
+          continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
                pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
                pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
                sflags=backup speed=slow \
        fi
  }
  
 +SUB_ESTABLISHED=10 # MPTCP_EVENT_SUB_ESTABLISHED
  LISTENER_CREATED=15 #MPTCP_EVENT_LISTENER_CREATED
  LISTENER_CLOSED=16  #MPTCP_EVENT_LISTENER_CLOSED
  
@@@ -2829,13 -2848,13 +2829,13 @@@ verify_listener_events(
                return
        fi
  
 -      type=$(grep "type:$e_type," $evt | sed -n 's/.*\(type:\)\([[:digit:]]*\).*$/\2/p;q')
 -      family=$(grep "type:$e_type," $evt | sed -n 's/.*\(family:\)\([[:digit:]]*\).*$/\2/p;q')
 -      sport=$(grep "type:$e_type," $evt | sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
 +      type=$(mptcp_lib_evts_get_info type "$evt" "$e_type")
 +      family=$(mptcp_lib_evts_get_info family "$evt" "$e_type")
 +      sport=$(mptcp_lib_evts_get_info sport "$evt" "$e_type")
        if [ $family ] && [ $family = $AF_INET6 ]; then
 -              saddr=$(grep "type:$e_type," $evt | sed -n 's/.*\(saddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
 +              saddr=$(mptcp_lib_evts_get_info saddr6 "$evt" "$e_type")
        else
 -              saddr=$(grep "type:$e_type," $evt | sed -n 's/.*\(saddr4:\)\([0-9.]*\).*$/\2/p;q')
 +              saddr=$(mptcp_lib_evts_get_info saddr4 "$evt" "$e_type")
        fi
  
        if [ $type ] && [ $type = $e_type ] &&
@@@ -3230,7 -3249,8 +3230,7 @@@ fastclose_tests(
  pedit_action_pkts()
  {
        tc -n $ns2 -j -s action show action pedit index 100 | \
 -              grep "packets" | \
 -              sed 's/.*"packets":\([0-9]\+\),.*/\1/'
 +              mptcp_lib_get_info_value \"packets\" packets
  }
  
  fail_tests()
        fi
  }
  
 +# $1: ns ; $2: addr ; $3: id
  userspace_pm_add_addr()
  {
 -      local addr=$1
 -      local id=$2
 +      local evts=$evts_ns1
        local tk
  
 -      tk=$(grep "type:1," "$evts_ns1" |
 -           sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
 -      ip netns exec $ns1 ./pm_nl_ctl ann $addr token $tk id $id
 +      [ "$1" == "$ns2" ] && evts=$evts_ns2
 +      tk=$(mptcp_lib_evts_get_info token "$evts")
 +
 +      ip netns exec $1 ./pm_nl_ctl ann $2 token $tk id $3
        sleep 1
  }
  
 -userspace_pm_rm_sf_addr_ns1()
 +# $1: ns ; $2: id
 +userspace_pm_rm_addr()
  {
 -      local addr=$1
 -      local id=$2
 -      local tk sp da dp
 -      local cnt_addr cnt_sf
 -
 -      tk=$(grep "type:1," "$evts_ns1" |
 -           sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q')
 -      sp=$(grep "type:10" "$evts_ns1" |
 -           sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
 -      da=$(grep "type:10" "$evts_ns1" |
 -           sed -n 's/.*\(daddr6:\)\([0-9a-f:.]*\).*$/\2/p;q')
 -      dp=$(grep "type:10" "$evts_ns1" |
 -           sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q')
 -      cnt_addr=$(rm_addr_count ${ns1})
 -      cnt_sf=$(rm_sf_count ${ns1})
 -      ip netns exec $ns1 ./pm_nl_ctl rem token $tk id $id
 -      ip netns exec $ns1 ./pm_nl_ctl dsf lip "::ffff:$addr" \
 -                              lport $sp rip $da rport $dp token $tk
 -      wait_rm_addr $ns1 "${cnt_addr}"
 -      wait_rm_sf $ns1 "${cnt_sf}"
 +      local evts=$evts_ns1
 +      local tk
 +      local cnt
 +
 +      [ "$1" == "$ns2" ] && evts=$evts_ns2
 +      tk=$(mptcp_lib_evts_get_info token "$evts")
 +
 +      cnt=$(rm_addr_count ${1})
 +      ip netns exec $1 ./pm_nl_ctl rem token $tk id $2
 +      wait_rm_addr $1 "${cnt}"
  }
  
 +# $1: ns ; $2: addr ; $3: id
  userspace_pm_add_sf()
  {
 -      local addr=$1
 -      local id=$2
 +      local evts=$evts_ns1
        local tk da dp
  
 -      tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
 -      da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
 -      dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
 -      ip netns exec $ns2 ./pm_nl_ctl csf lip $addr lid $id \
 +      [ "$1" == "$ns2" ] && evts=$evts_ns2
 +      tk=$(mptcp_lib_evts_get_info token "$evts")
 +      da=$(mptcp_lib_evts_get_info daddr4 "$evts")
 +      dp=$(mptcp_lib_evts_get_info dport "$evts")
 +
 +      ip netns exec $1 ./pm_nl_ctl csf lip $2 lid $3 \
                                rip $da rport $dp token $tk
        sleep 1
  }
  
 -userspace_pm_rm_sf_addr_ns2()
 +# $1: ns ; $2: addr $3: event type
 +userspace_pm_rm_sf()
  {
 -      local addr=$1
 -      local id=$2
 +      local evts=$evts_ns1
 +      local t=${3:-1}
 +      local ip=4
        local tk da dp sp
 -      local cnt_addr cnt_sf
 -
 -      tk=$(sed -n 's/.*\(token:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
 -      da=$(sed -n 's/.*\(daddr4:\)\([0-9.]*\).*$/\2/p;q' "$evts_ns2")
 -      dp=$(sed -n 's/.*\(dport:\)\([[:digit:]]*\).*$/\2/p;q' "$evts_ns2")
 -      sp=$(grep "type:10" "$evts_ns2" |
 -           sed -n 's/.*\(sport:\)\([[:digit:]]*\).*$/\2/p;q')
 -      cnt_addr=$(rm_addr_count ${ns2})
 -      cnt_sf=$(rm_sf_count ${ns2})
 -      ip netns exec $ns2 ./pm_nl_ctl rem token $tk id $id
 -      ip netns exec $ns2 ./pm_nl_ctl dsf lip $addr lport $sp \
 +      local cnt
 +
 +      [ "$1" == "$ns2" ] && evts=$evts_ns2
 +      if mptcp_lib_is_v6 $2; then ip=6; fi
 +      tk=$(mptcp_lib_evts_get_info token "$evts")
 +      da=$(mptcp_lib_evts_get_info "daddr$ip" "$evts" $t)
 +      dp=$(mptcp_lib_evts_get_info dport "$evts" $t)
 +      sp=$(mptcp_lib_evts_get_info sport "$evts" $t)
 +
 +      cnt=$(rm_sf_count ${1})
 +      ip netns exec $1 ./pm_nl_ctl dsf lip $2 lport $sp \
                                rip $da rport $dp token $tk
 -      wait_rm_addr $ns2 "${cnt_addr}"
 -      wait_rm_sf $ns2 "${cnt_sf}"
 +      wait_rm_sf $1 "${cnt}"
  }
  
  userspace_tests()
                        run_tests $ns1 $ns2 10.0.1.1 &
                local tests_pid=$!
                wait_mpj $ns1
 -              userspace_pm_add_addr 10.0.2.1 10
 +              userspace_pm_add_addr $ns1 10.0.2.1 10
                chk_join_nr 1 1 1
                chk_add_nr 1 1
                chk_mptcp_info subflows 1 subflows 1
 +              chk_subflows_total 2 2
                chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
 -              userspace_pm_rm_sf_addr_ns1 10.0.2.1 10
 +              userspace_pm_rm_addr $ns1 10
 +              userspace_pm_rm_sf $ns1 "::ffff:10.0.2.1" $SUB_ESTABLISHED
                chk_rm_nr 1 1 invert
                chk_mptcp_info subflows 0 subflows 0
 +              chk_subflows_total 1 1
                kill_events_pids
                wait $tests_pid
        fi
                        run_tests $ns1 $ns2 10.0.1.1 &
                local tests_pid=$!
                wait_mpj $ns2
 -              userspace_pm_add_sf 10.0.3.2 20
 +              userspace_pm_add_sf $ns2 10.0.3.2 20
                chk_join_nr 1 1 1
                chk_mptcp_info subflows 1 subflows 1
 -              userspace_pm_rm_sf_addr_ns2 10.0.3.2 20
 +              chk_subflows_total 2 2
 +              userspace_pm_rm_addr $ns2 20
 +              userspace_pm_rm_sf $ns2 10.0.3.2 $SUB_ESTABLISHED
                chk_rm_nr 1 1
                chk_mptcp_info subflows 0 subflows 0
 +              chk_subflows_total 1 1
 +              kill_events_pids
 +              wait $tests_pid
 +      fi
 +
 +      # userspace pm create id 0 subflow
 +      if reset_with_events "userspace pm create id 0 subflow" &&
 +         continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
 +              set_userspace_pm $ns2
 +              pm_nl_set_limits $ns1 0 1
 +              speed=5 \
 +                      run_tests $ns1 $ns2 10.0.1.1 &
 +              local tests_pid=$!
 +              wait_mpj $ns2
 +              chk_mptcp_info subflows 0 subflows 0
 +              chk_subflows_total 1 1
 +              userspace_pm_add_sf $ns2 10.0.3.2 0
 +              chk_join_nr 1 1 1
 +              chk_mptcp_info subflows 1 subflows 1
 +              chk_subflows_total 2 2
 +              kill_events_pids
 +              wait $tests_pid
 +      fi
 +
 +      # userspace pm remove initial subflow
 +      if reset_with_events "userspace pm remove initial subflow" &&
 +         continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
 +              set_userspace_pm $ns2
 +              pm_nl_set_limits $ns1 0 1
 +              speed=5 \
 +                      run_tests $ns1 $ns2 10.0.1.1 &
 +              local tests_pid=$!
 +              wait_mpj $ns2
 +              userspace_pm_add_sf $ns2 10.0.3.2 20
 +              chk_join_nr 1 1 1
 +              chk_mptcp_info subflows 1 subflows 1
 +              chk_subflows_total 2 2
 +              userspace_pm_rm_sf $ns2 10.0.1.2
 +              # we don't look at the counter linked to the RM_ADDR but
 +              # to the one linked to the subflows that have been removed
 +              chk_rm_nr 0 1
 +              chk_rst_nr 0 0 invert
 +              chk_mptcp_info subflows 1 subflows 1
 +              chk_subflows_total 1 1
 +              kill_events_pids
 +              wait $tests_pid
 +      fi
 +
 +      # userspace pm send RM_ADDR for ID 0
 +      if reset_with_events "userspace pm send RM_ADDR for ID 0" &&
 +         continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then
 +              set_userspace_pm $ns1
 +              pm_nl_set_limits $ns2 1 1
 +              speed=5 \
 +                      run_tests $ns1 $ns2 10.0.1.1 &
 +              local tests_pid=$!
 +              wait_mpj $ns1
 +              userspace_pm_add_addr $ns1 10.0.2.1 10
 +              chk_join_nr 1 1 1
 +              chk_add_nr 1 1
 +              chk_mptcp_info subflows 1 subflows 1
 +              chk_subflows_total 2 2
 +              chk_mptcp_info add_addr_signal 1 add_addr_accepted 1
 +              userspace_pm_rm_addr $ns1 0
 +              # we don't look at the counter linked to the subflows that
 +              # have been removed but to the one linked to the RM_ADDR
 +              chk_rm_nr 1 0 invert
 +              chk_rst_nr 0 0 invert
 +              chk_mptcp_info subflows 1 subflows 1
 +              chk_subflows_total 1 1
                kill_events_pids
                wait $tests_pid
        fi
This page took 0.537721 seconds and 4 git commands to generate.