]> Git Repo - linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
authorJakub Kicinski <[email protected]>
Sat, 28 Nov 2020 02:25:27 +0000 (18:25 -0800)
committerJakub Kicinski <[email protected]>
Sat, 28 Nov 2020 02:25:27 +0000 (18:25 -0800)
Trivial conflict in CAN, keep the net-next + the byteswap wrapper.

Conflicts:
drivers/net/can/usb/gs_usb.c

Signed-off-by: Jakub Kicinski <[email protected]>
25 files changed:
1  2 
MAINTAINERS
drivers/net/bonding/bond_main.c
drivers/net/can/m_can/m_can.c
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
drivers/net/can/usb/gs_usb.c
drivers/net/ethernet/freescale/enetc/enetc_qos.c
drivers/net/ethernet/ibm/ibmvnic.c
drivers/net/ethernet/ibm/ibmvnic.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
drivers/net/tun.c
drivers/nfc/s3fwrn5/i2c.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
include/linux/netdevice.h
include/net/tls.h
net/can/af_can.c
net/core/devlink.c
net/core/skbuff.c
net/ipv4/tcp_ipv4.c
net/mptcp/subflow.c
net/packet/af_packet.c
net/tls/tls_device.c
net/tls/tls_sw.c

diff --combined MAINTAINERS
index 8545e72c495c9755b51c924ce87e532a8398f006,48cf2fec73d3e607e9b64bb92ecc258fca34e60a..a7bdebf955bbe6f70cf15c2094aea2f3a96ffb62
@@@ -1995,7 -1995,6 +1995,6 @@@ N:      lpc18x
  
  ARM/LPC32XX SOC SUPPORT
  M:    Vladimir Zapolskiy <[email protected]>
- M:    Sylvain Lemieux <[email protected]>
  L:    [email protected] (moderated for non-subscribers)
  S:    Maintained
  T:    git git://github.com/vzapolskiy/linux-lpc32xx.git
@@@ -3528,11 -3527,12 +3527,12 @@@ BROADCOM BRCM80211 IEEE802.11n WIRELES
  M:    Arend van Spriel <[email protected]>
  M:    Franky Lin <[email protected]>
  M:    Hante Meuleman <[email protected]>
- M:    Chi-Hsien Lin <[email protected]>
- M:    Wright Feng <[email protected]>
+ M:    Chi-hsien Lin <[email protected]>
+ M:    Wright Feng <[email protected]>
+ M:    Chung-hsien Hsu <[email protected]>
  L:    [email protected]
  L:    [email protected]
- L:    brcm80211-dev-list@cypress.com
+ L:    SHA-cyfmac-dev-list@infineon.com
  S:    Supported
  F:    drivers/net/wireless/broadcom/brcm80211/
  
@@@ -6895,6 -6895,12 +6895,6 @@@ S:     Maintaine
  W:    http://floatingpoint.sourceforge.net/emulator/index.html
  F:    arch/x86/math-emu/
  
 -FRAME RELAY DLCI/FRAD (Sangoma drivers too)
 -L:    [email protected]
 -S:    Orphan
 -F:    drivers/net/wan/dlci.c
 -F:    drivers/net/wan/sdla.c
 -
  FRAMEBUFFER LAYER
  M:    Bartlomiej Zolnierkiewicz <[email protected]>
  L:    [email protected]
@@@ -7882,15 -7888,6 +7882,15 @@@ F:    include/linux/hippidevice.
  F:    include/uapi/linux/if_hippi.h
  F:    net/802/hippi.c
  
 +HIRSCHMANN HELLCREEK ETHERNET SWITCH DRIVER
 +M:    Kurt Kanzenbach <[email protected]>
 +L:    [email protected]
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
 +F:    drivers/net/dsa/hirschmann/*
 +F:    include/linux/platform_data/hirschmann-hellcreek.h
 +F:    net/dsa/tag_hellcreek.c
 +
  HISILICON DMA DRIVER
  M:    Zhou Wang <[email protected]>
  L:    [email protected]
@@@ -9080,6 -9077,16 +9080,6 @@@ W:     https://wireless.wiki.kernel.org/en/
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi.git
  F:    drivers/net/wireless/intel/iwlwifi/
  
 -INTEL WIRELESS WIMAX CONNECTION 2400
 -M:    Inaky Perez-Gonzalez <[email protected]>
 -M:    [email protected]
 -L:    [email protected] (subscribers-only)
 -S:    Supported
 -W:    http://linuxwimax.org
 -F:    Documentation/admin-guide/wimax/i2400m.rst
 -F:    drivers/net/wimax/i2400m/
 -F:    include/uapi/linux/wimax/i2400m.h
 -
  INTEL WMI SLIM BOOTLOADER (SBL) FIRMWARE UPDATE DRIVER
  M:    Jithu Joseph <[email protected]>
  R:    Maurice Ma <[email protected]>
@@@ -9155,6 -9162,7 +9155,7 @@@ F:      include/linux/iomap.
  
  IOMMU DRIVERS
  M:    Joerg Roedel <[email protected]>
+ M:    Will Deacon <[email protected]>
  L:    [email protected]
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
@@@ -9638,6 -9646,7 +9639,7 @@@ F:      Documentation/virt/kvm/s390
  F:    arch/s390/include/asm/gmap.h
  F:    arch/s390/include/asm/kvm*
  F:    arch/s390/include/uapi/asm/kvm*
+ F:    arch/s390/kernel/uv.c
  F:    arch/s390/kvm/
  F:    arch/s390/mm/gmap.c
  F:    tools/testing/selftests/kvm/*/s390x/
@@@ -10453,7 -10462,6 +10455,7 @@@ M:   Srujana Challa <[email protected]
  L:    [email protected]
  S:    Maintained
  F:    drivers/crypto/marvell/
 +F:    include/linux/soc/marvell/octeontx2/
  
  MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
  M:    Mirko Lindner <[email protected]>
@@@ -10526,7 -10534,6 +10528,7 @@@ M:   hariprasad <[email protected]
  L:    [email protected]
  S:    Supported
  F:    drivers/net/ethernet/marvell/octeontx2/nic/
 +F:    include/linux/soc/marvell/octeontx2/
  
  MARVELL OCTEONTX2 RVU ADMIN FUNCTION DRIVER
  M:    Sunil Goutham <[email protected]>
@@@ -11487,7 -11494,7 +11489,7 @@@ M:   Woojung Huh <[email protected]
  M:    Microchip Linux Driver Support <[email protected]>
  L:    [email protected]
  S:    Maintained
 -F:    Documentation/devicetree/bindings/net/dsa/ksz.txt
 +F:    Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
  F:    drivers/net/dsa/microchip/*
  F:    include/linux/platform_data/microchip-ksz.h
  F:    net/dsa/tag_ksz.c
  S:    Maintained
  W:    https://github.com/multipath-tcp/mptcp_net-next/wiki
  B:    https://github.com/multipath-tcp/mptcp_net-next/issues
 +F:    Documentation/networking/mptcp-sysctl.rst
  F:    include/net/mptcp.h
  F:    include/uapi/linux/mptcp.h
  F:    net/mptcp/
@@@ -13156,7 -13162,9 +13158,9 @@@ M:   Jesper Dangaard Brouer <hawk@kernel.
  M:    Ilias Apalodimas <[email protected]>
  L:    [email protected]
  S:    Supported
+ F:    Documentation/networking/page_pool.rst
  F:    include/net/page_pool.h
+ F:    include/trace/events/page_pool.h
  F:    net/core/page_pool.c
  
  PANASONIC LAPTOP ACPI EXTRAS DRIVER
@@@ -14798,7 -14806,7 +14802,7 @@@ T:   git git://git.kernel.org/pub/scm/lin
  F:    drivers/net/wireless/realtek/rtlwifi/
  
  REALTEK WIRELESS DRIVER (rtw88)
- M:    Yan-Hsuan Chuang <yhchuang@realtek.com>
+ M:    Yan-Hsuan Chuang <tony0620emma@gmail.com>
  L:    [email protected]
  S:    Maintained
  F:    drivers/net/wireless/realtek/rtw88/
@@@ -15771,9 -15779,8 +15775,8 @@@ F:   drivers/slimbus
  F:    include/linux/slimbus.h
  
  SFC NETWORK DRIVER
- M:    Solarflare linux maintainers <[email protected]>
- M:    Edward Cree <[email protected]>
- M:    Martin Habets <[email protected]>
+ M:    Edward Cree <[email protected]>
+ M:    Martin Habets <[email protected]>
  L:    [email protected]
  S:    Supported
  F:    drivers/net/ethernet/sfc/
@@@ -18871,6 -18878,18 +18874,6 @@@ S:  Supporte
  W:    https://wireless.wiki.kernel.org/en/users/Drivers/wil6210
  F:    drivers/net/wireless/ath/wil6210/
  
 -WIMAX STACK
 -M:    Inaky Perez-Gonzalez <[email protected]>
 -M:    [email protected]
 -L:    [email protected] (subscribers-only)
 -S:    Supported
 -W:    http://linuxwimax.org
 -F:    Documentation/admin-guide/wimax/wimax.rst
 -F:    include/linux/wimax/debug.h
 -F:    include/net/wimax.h
 -F:    include/uapi/linux/wimax.h
 -F:    net/wimax/
 -
  WINBOND CIR DRIVER
  M:    David Härdeman <[email protected]>
  S:    Maintained
index 71c9677d135f745bbc21c247a623ad01d294b806,47afc5938c26bcab92726af762efd95bdc066db7..e0880a3840d7a15695405105360bf2275788dbe3
@@@ -1228,14 -1228,14 +1228,14 @@@ static netdev_features_t bond_fix_featu
  }
  
  #define BOND_VLAN_FEATURES    (NETIF_F_HW_CSUM | NETIF_F_SG | \
 -                               NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
 +                               NETIF_F_FRAGLIST | NETIF_F_GSO_SOFTWARE | \
                                 NETIF_F_HIGHDMA | NETIF_F_LRO)
  
  #define BOND_ENC_FEATURES     (NETIF_F_HW_CSUM | NETIF_F_SG | \
 -                               NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
 +                               NETIF_F_RXCSUM | NETIF_F_GSO_SOFTWARE)
  
  #define BOND_MPLS_FEATURES    (NETIF_F_HW_CSUM | NETIF_F_SG | \
 -                               NETIF_F_ALL_TSO)
 +                               NETIF_F_GSO_SOFTWARE)
  
  
  static void bond_compute_features(struct bonding *bond)
@@@ -1291,7 -1291,8 +1291,7 @@@ done
        bond_dev->vlan_features = vlan_features;
        bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
                                    NETIF_F_HW_VLAN_CTAG_TX |
 -                                  NETIF_F_HW_VLAN_STAG_TX |
 -                                  NETIF_F_GSO_UDP_L4;
 +                                  NETIF_F_HW_VLAN_STAG_TX;
  #ifdef CONFIG_XFRM_OFFLOAD
        bond_dev->hw_enc_features |= xfrm_features;
  #endif /* CONFIG_XFRM_OFFLOAD */
@@@ -1459,7 -1460,39 +1459,39 @@@ static void bond_upper_dev_unlink(struc
        slave->dev->flags &= ~IFF_SLAVE;
  }
  
- static struct slave *bond_alloc_slave(struct bonding *bond)
+ static void slave_kobj_release(struct kobject *kobj)
+ {
+       struct slave *slave = to_slave(kobj);
+       struct bonding *bond = bond_get_bond_by_slave(slave);
+       cancel_delayed_work_sync(&slave->notify_work);
+       if (BOND_MODE(bond) == BOND_MODE_8023AD)
+               kfree(SLAVE_AD_INFO(slave));
+       kfree(slave);
+ }
+ static struct kobj_type slave_ktype = {
+       .release = slave_kobj_release,
+ #ifdef CONFIG_SYSFS
+       .sysfs_ops = &slave_sysfs_ops,
+ #endif
+ };
+ static int bond_kobj_init(struct slave *slave)
+ {
+       int err;
+       err = kobject_init_and_add(&slave->kobj, &slave_ktype,
+                                  &(slave->dev->dev.kobj), "bonding_slave");
+       if (err)
+               kobject_put(&slave->kobj);
+       return err;
+ }
+ static struct slave *bond_alloc_slave(struct bonding *bond,
+                                     struct net_device *slave_dev)
  {
        struct slave *slave = NULL;
  
        if (!slave)
                return NULL;
  
+       slave->bond = bond;
+       slave->dev = slave_dev;
+       if (bond_kobj_init(slave))
+               return NULL;
        if (BOND_MODE(bond) == BOND_MODE_8023AD) {
                SLAVE_AD_INFO(slave) = kzalloc(sizeof(struct ad_slave_info),
                                               GFP_KERNEL);
                if (!SLAVE_AD_INFO(slave)) {
-                       kfree(slave);
+                       kobject_put(&slave->kobj);
                        return NULL;
                }
        }
        return slave;
  }
  
- static void bond_free_slave(struct slave *slave)
- {
-       struct bonding *bond = bond_get_bond_by_slave(slave);
-       cancel_delayed_work_sync(&slave->notify_work);
-       if (BOND_MODE(bond) == BOND_MODE_8023AD)
-               kfree(SLAVE_AD_INFO(slave));
-       kfree(slave);
- }
  static void bond_fill_ifbond(struct bonding *bond, struct ifbond *info)
  {
        info->bond_mode = BOND_MODE(bond);
@@@ -1677,14 -1705,12 +1704,12 @@@ int bond_enslave(struct net_device *bon
                        goto err_undo_flags;
        }
  
-       new_slave = bond_alloc_slave(bond);
+       new_slave = bond_alloc_slave(bond, slave_dev);
        if (!new_slave) {
                res = -ENOMEM;
                goto err_undo_flags;
        }
  
-       new_slave->bond = bond;
-       new_slave->dev = slave_dev;
        /* Set the new_slave's queue_id to be zero.  Queue ID mapping
         * is set via sysfs or module option if desired.
         */
@@@ -2006,7 -2032,7 +2031,7 @@@ err_restore_mtu
        dev_set_mtu(slave_dev, new_slave->original_mtu);
  
  err_free:
-       bond_free_slave(new_slave);
+       kobject_put(&new_slave->kobj);
  
  err_undo_flags:
        /* Enslave of first slave has failed and we need to fix master's mac */
@@@ -2186,7 -2212,7 +2211,7 @@@ static int __bond_release_one(struct ne
        if (!netif_is_bond_master(slave_dev))
                slave_dev->priv_flags &= ~IFF_BONDING;
  
-       bond_free_slave(slave);
+       kobject_put(&slave->kobj);
  
        return 0;
  }
@@@ -4720,7 -4746,7 +4745,7 @@@ void bond_setup(struct net_device *bond
                                NETIF_F_HW_VLAN_CTAG_RX |
                                NETIF_F_HW_VLAN_CTAG_FILTER;
  
 -      bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL | NETIF_F_GSO_UDP_L4;
 +      bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
  #ifdef CONFIG_XFRM_OFFLOAD
        bond_dev->hw_features |= BOND_XFRM_FEATURES;
  #endif /* CONFIG_XFRM_OFFLOAD */
index a345e22f545ef1eb9a4639bf9dcfccacb6d292bd,61a93b19203799a71c07cf73a78309f12118ef4b..553ff39199aa0b93467beee3ff1c6545260be942
@@@ -457,9 -457,9 +457,9 @@@ static void m_can_read_fifo(struct net_
        }
  
        if (dlc & RX_BUF_FDF)
 -              cf->len = can_dlc2len((dlc >> 16) & 0x0F);
 +              cf->len = can_fd_dlc2len((dlc >> 16) & 0x0F);
        else
 -              cf->len = get_can_dlc((dlc >> 16) & 0x0F);
 +              cf->len = can_cc_dlc2len((dlc >> 16) & 0x0F);
  
        id = m_can_fifo_read(cdev, fgi, M_CAN_FIFO_ID);
        if (id & RX_BUF_XTD)
@@@ -596,7 -596,7 +596,7 @@@ static int m_can_handle_lec_err(struct 
        }
  
        stats->rx_packets++;
 -      stats->rx_bytes += cf->can_dlc;
 +      stats->rx_bytes += cf->len;
        netif_receive_skb(skb);
  
        return 1;
@@@ -723,7 -723,7 +723,7 @@@ static int m_can_handle_state_change(st
        }
  
        stats->rx_packets++;
 -      stats->rx_bytes += cf->can_dlc;
 +      stats->rx_bytes += cf->len;
        netif_receive_skb(skb);
  
        return 1;
@@@ -1033,7 -1033,7 +1033,7 @@@ static const struct can_bittiming_cons
        .name = KBUILD_MODNAME,
        .tseg1_min = 2,         /* Time segment 1 = prop_seg + phase_seg1 */
        .tseg1_max = 256,
-       .tseg2_min = 1,         /* Time segment 2 = phase_seg2 */
+       .tseg2_min = 2,         /* Time segment 2 = phase_seg2 */
        .tseg2_max = 128,
        .sjw_max = 128,
        .brp_min = 1,
@@@ -1385,6 -1385,8 +1385,8 @@@ static int m_can_dev_setup(struct m_can
                                                &m_can_data_bittiming_const_31X;
                break;
        case 32:
+       case 33:
+               /* Support both MCAN version v3.2.x and v3.3.0 */
                m_can_dev->can.bittiming_const = m_can_dev->bit_timing ?
                        m_can_dev->bit_timing : &m_can_bittiming_const_31X;
  
@@@ -1489,7 -1491,7 +1491,7 @@@ static netdev_tx_t m_can_tx_handler(str
                /* message ram configuration */
                m_can_fifo_write(cdev, 0, M_CAN_FIFO_ID, id);
                m_can_fifo_write(cdev, 0, M_CAN_FIFO_DLC,
 -                               can_len2dlc(cf->len) << 16);
 +                               can_fd_len2dlc(cf->len) << 16);
  
                for (i = 0; i < cf->len; i += 4)
                        m_can_fifo_write(cdev, 0,
                m_can_fifo_write(cdev, putidx, M_CAN_FIFO_DLC,
                                 ((putidx << TX_BUF_MM_SHIFT) &
                                  TX_BUF_MM_MASK) |
 -                               (can_len2dlc(cf->len) << 16) |
 +                               (can_fd_len2dlc(cf->len) << 16) |
                                 fdflags | TX_BUF_EFC);
  
                for (i = 0; i < cf->len; i += 4)
@@@ -1653,7 -1655,7 +1655,7 @@@ static int m_can_open(struct net_devic
                INIT_WORK(&cdev->tx_work, m_can_tx_work_queue);
  
                err = request_threaded_irq(dev->irq, NULL, m_can_isr,
-                                          IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+                                          IRQF_ONESHOT,
                                           dev->name, dev);
        } else {
                err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
index 3297eb7ecc9c2b91ddcd2107a3bf00dba0c4f0aa,8a39be076e143e21634b976e9494d986eec3c054..d0f2f5c739070fac9ce94dfe9e88cce684af746c
@@@ -644,7 -644,10 +644,7 @@@ static int mcp251xfd_chip_softreset(con
                return 0;
        }
  
 -      if (err)
 -              return err;
 -
 -      return -ETIMEDOUT;
 +      return err;
  }
  
  static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv)
@@@ -1402,12 -1405,12 +1402,12 @@@ mcp251xfd_hw_rx_obj_to_skb(const struc
                        cfd->flags |= CANFD_BRS;
  
                dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC, hw_rx_obj->flags);
 -              cfd->len = can_dlc2len(get_canfd_dlc(dlc));
 +              cfd->len = can_fd_dlc2len(dlc);
        } else {
                if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)
                        cfd->can_id |= CAN_RTR_FLAG;
  
 -              cfd->len = get_can_dlc(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
 +              cfd->len = can_cc_dlc2len(FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC,
                                                 hw_rx_obj->flags));
        }
  
@@@ -2241,7 -2244,7 +2241,7 @@@ mcp251xfd_tx_obj_from_skb(const struct 
         * harm, only the lower 7 bits will be transferred into the
         * TEF object.
         */
 -      dlc = can_len2dlc(cfd->len);
 +      dlc = can_fd_len2dlc(cfd->len);
        flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq) |
                FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC, dlc);
  
  
        /* Clear data at end of CAN frame */
        offset = round_down(cfd->len, sizeof(u32));
 -      len = round_up(can_dlc2len(dlc), sizeof(u32)) - offset;
 +      len = round_up(can_fd_dlc2len(dlc), sizeof(u32)) - offset;
        if (MCP251XFD_SANITIZE_CAN && len)
                memset(hw_tx_obj->data + offset, 0x0, len);
        memcpy(hw_tx_obj->data, cfd->data, cfd->len);
        /* Number of bytes to be written into the RAM of the controller */
        len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags);
        if (MCP251XFD_SANITIZE_CAN)
 -              len += round_up(can_dlc2len(dlc), sizeof(u32));
 +              len += round_up(can_fd_dlc2len(dlc), sizeof(u32));
        else
                len += round_up(cfd->len, sizeof(u32));
  
@@@ -2735,6 -2738,10 +2735,10 @@@ static int mcp251xfd_probe(struct spi_d
        u32 freq;
        int err;
  
+       if (!spi->irq)
+               return dev_err_probe(&spi->dev, -ENXIO,
+                                    "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n");
        rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int",
                                         GPIOD_IN);
        if (PTR_ERR(rx_int) == -EPROBE_DEFER)
index a0336e895d941163acf525ac7dc2bf7c6fd00860,018ca3b057a3baeb59d460385b464c5faa7b7747..0487095e1fd0468b205ae09f113f84b292d0d91e
@@@ -9,7 -9,6 +9,7 @@@
   * Many thanks to all socketcan devs!
   */
  
 +#include <linux/ethtool.h>
  #include <linux/init.h>
  #include <linux/signal.h>
  #include <linux/module.h>
@@@ -64,21 -63,27 +64,27 @@@ enum gs_can_identify_mode 
  };
  
  /* data types passed between host and device */
+ /* The firmware on the original USB2CAN by Geschwister Schneider
+  * Technologie Entwicklungs- und Vertriebs UG exchanges all data
+  * between the host and the device in host byte order. This is done
+  * with the struct gs_host_config::byte_order member, which is sent
+  * first to indicate the desired byte order.
+  *
+  * The widely used open source firmware candleLight doesn't support
+  * this feature and exchanges the data in little endian byte order.
+  */
  struct gs_host_config {
-       u32 byte_order;
+       __le32 byte_order;
  } __packed;
- /* All data exchanged between host and device is exchanged in host byte order,
-  * thanks to the struct gs_host_config byte_order member, which is sent first
-  * to indicate the desired byte order.
-  */
  
  struct gs_device_config {
        u8 reserved1;
        u8 reserved2;
        u8 reserved3;
        u8 icount;
-       u32 sw_version;
-       u32 hw_version;
+       __le32 sw_version;
+       __le32 hw_version;
  } __packed;
  
  #define GS_CAN_MODE_NORMAL               0
  #define GS_CAN_MODE_ONE_SHOT             BIT(3)
  
  struct gs_device_mode {
-       u32 mode;
-       u32 flags;
+       __le32 mode;
+       __le32 flags;
  } __packed;
  
  struct gs_device_state {
-       u32 state;
-       u32 rxerr;
-       u32 txerr;
+       __le32 state;
+       __le32 rxerr;
+       __le32 txerr;
  } __packed;
  
  struct gs_device_bittiming {
-       u32 prop_seg;
-       u32 phase_seg1;
-       u32 phase_seg2;
-       u32 sjw;
-       u32 brp;
+       __le32 prop_seg;
+       __le32 phase_seg1;
+       __le32 phase_seg2;
+       __le32 sjw;
+       __le32 brp;
  } __packed;
  
  struct gs_identify_mode {
-       u32 mode;
+       __le32 mode;
  } __packed;
  
  #define GS_CAN_FEATURE_LISTEN_ONLY      BIT(0)
  #define GS_CAN_FEATURE_IDENTIFY         BIT(5)
  
  struct gs_device_bt_const {
-       u32 feature;
-       u32 fclk_can;
-       u32 tseg1_min;
-       u32 tseg1_max;
-       u32 tseg2_min;
-       u32 tseg2_max;
-       u32 sjw_max;
-       u32 brp_min;
-       u32 brp_max;
-       u32 brp_inc;
+       __le32 feature;
+       __le32 fclk_can;
+       __le32 tseg1_min;
+       __le32 tseg1_max;
+       __le32 tseg2_min;
+       __le32 tseg2_max;
+       __le32 sjw_max;
+       __le32 brp_min;
+       __le32 brp_max;
+       __le32 brp_inc;
  } __packed;
  
  #define GS_CAN_FLAG_OVERFLOW 1
  
  struct gs_host_frame {
        u32 echo_id;
-       u32 can_id;
+       __le32 can_id;
  
        u8 can_dlc;
        u8 channel;
@@@ -330,13 -335,13 +336,13 @@@ static void gs_usb_receive_bulk_callbac
                if (!skb)
                        return;
  
-               cf->can_id = hf->can_id;
+               cf->can_id = le32_to_cpu(hf->can_id);
  
 -              cf->can_dlc = get_can_dlc(hf->can_dlc);
 +              can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode);
                memcpy(cf->data, hf->data, 8);
  
                /* ERROR frames tell us information about the controller */
-               if (hf->can_id & CAN_ERR_FLAG)
+               if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG)
                        gs_update_state(dev, cf);
  
                netdev->stats.rx_packets++;
                        goto resubmit_urb;
  
                cf->can_id |= CAN_ERR_CRTL;
 -              cf->can_dlc = CAN_ERR_DLC;
 +              cf->len = CAN_ERR_DLC;
                cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
                stats->rx_over_errors++;
                stats->rx_errors++;
@@@ -419,11 -424,11 +425,11 @@@ static int gs_usb_set_bittiming(struct 
        if (!dbt)
                return -ENOMEM;
  
-       dbt->prop_seg = bt->prop_seg;
-       dbt->phase_seg1 = bt->phase_seg1;
-       dbt->phase_seg2 = bt->phase_seg2;
-       dbt->sjw = bt->sjw;
-       dbt->brp = bt->brp;
+       dbt->prop_seg = cpu_to_le32(bt->prop_seg);
+       dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
+       dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
+       dbt->sjw = cpu_to_le32(bt->sjw);
+       dbt->brp = cpu_to_le32(bt->brp);
  
        /* request bit timings */
        rc = usb_control_msg(interface_to_usbdev(intf),
@@@ -504,10 -509,9 +510,10 @@@ static netdev_tx_t gs_can_start_xmit(st
  
        cf = (struct can_frame *)skb->data;
  
-       hf->can_id = cf->can_id;
+       hf->can_id = cpu_to_le32(cf->can_id);
 -      hf->can_dlc = cf->can_dlc;
 -      memcpy(hf->data, cf->data, cf->can_dlc);
 +      hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode);
 +
 +      memcpy(hf->data, cf->data, cf->len);
  
        usb_fill_bulk_urb(urb, dev->udev,
                          usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
@@@ -575,6 -579,7 +581,7 @@@ static int gs_can_open(struct net_devic
        int rc, i;
        struct gs_device_mode *dm;
        u32 ctrlmode;
+       u32 flags = 0;
  
        rc = open_candev(netdev);
        if (rc)
  
        /* flags */
        ctrlmode = dev->can.ctrlmode;
-       dm->flags = 0;
  
        if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
-               dm->flags |= GS_CAN_MODE_LOOP_BACK;
+               flags |= GS_CAN_MODE_LOOP_BACK;
        else if (ctrlmode & CAN_CTRLMODE_LISTENONLY)
-               dm->flags |= GS_CAN_MODE_LISTEN_ONLY;
+               flags |= GS_CAN_MODE_LISTEN_ONLY;
  
        /* Controller is not allowed to retry TX
         * this mode is unavailable on atmels uc3c hardware
         */
        if (ctrlmode & CAN_CTRLMODE_ONE_SHOT)
-               dm->flags |= GS_CAN_MODE_ONE_SHOT;
+               flags |= GS_CAN_MODE_ONE_SHOT;
  
        if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
-               dm->flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+               flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
  
        /* finally start device */
-       dm->mode = GS_CAN_MODE_START;
+       dm->mode = cpu_to_le32(GS_CAN_MODE_START);
+       dm->flags = cpu_to_le32(flags);
        rc = usb_control_msg(interface_to_usbdev(dev->iface),
                             usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
                             GS_USB_BREQ_MODE,
@@@ -739,9 -744,9 +746,9 @@@ static int gs_usb_set_identify(struct n
                return -ENOMEM;
  
        if (do_identify)
-               imode->mode = GS_CAN_IDENTIFY_ON;
+               imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
        else
-               imode->mode = GS_CAN_IDENTIFY_OFF;
+               imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
  
        rc = usb_control_msg(interface_to_usbdev(dev->iface),
                             usb_sndctrlpipe(interface_to_usbdev(dev->iface),
@@@ -792,6 -797,7 +799,7 @@@ static struct gs_can *gs_make_candev(un
        struct net_device *netdev;
        int rc;
        struct gs_device_bt_const *bt_const;
+       u32 feature;
  
        bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
        if (!bt_const)
  
        /* dev setup */
        strcpy(dev->bt_const.name, "gs_usb");
-       dev->bt_const.tseg1_min = bt_const->tseg1_min;
-       dev->bt_const.tseg1_max = bt_const->tseg1_max;
-       dev->bt_const.tseg2_min = bt_const->tseg2_min;
-       dev->bt_const.tseg2_max = bt_const->tseg2_max;
-       dev->bt_const.sjw_max = bt_const->sjw_max;
-       dev->bt_const.brp_min = bt_const->brp_min;
-       dev->bt_const.brp_max = bt_const->brp_max;
-       dev->bt_const.brp_inc = bt_const->brp_inc;
+       dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
+       dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
+       dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
+       dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
+       dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
+       dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
+       dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
+       dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
  
        dev->udev = interface_to_usbdev(intf);
        dev->iface = intf;
  
        /* can setup */
        dev->can.state = CAN_STATE_STOPPED;
-       dev->can.clock.freq = bt_const->fclk_can;
+       dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
        dev->can.bittiming_const = &dev->bt_const;
        dev->can.do_set_bittiming = gs_usb_set_bittiming;
  
 -      dev->can.ctrlmode_supported = 0;
 +      dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
  
-       if (bt_const->feature & GS_CAN_FEATURE_LISTEN_ONLY)
+       feature = le32_to_cpu(bt_const->feature);
+       if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
                dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
  
-       if (bt_const->feature & GS_CAN_FEATURE_LOOP_BACK)
+       if (feature & GS_CAN_FEATURE_LOOP_BACK)
                dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK;
  
-       if (bt_const->feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
+       if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE)
                dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES;
  
-       if (bt_const->feature & GS_CAN_FEATURE_ONE_SHOT)
+       if (feature & GS_CAN_FEATURE_ONE_SHOT)
                dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT;
  
        SET_NETDEV_DEV(netdev, &intf->dev);
  
-       if (dconf->sw_version > 1)
-               if (bt_const->feature & GS_CAN_FEATURE_IDENTIFY)
+       if (le32_to_cpu(dconf->sw_version) > 1)
+               if (feature & GS_CAN_FEATURE_IDENTIFY)
                        netdev->ethtool_ops = &gs_usb_ethtool_ops;
  
        kfree(bt_const);
@@@ -912,7 -919,7 +921,7 @@@ static int gs_usb_probe(struct usb_inte
        if (!hconf)
                return -ENOMEM;
  
-       hconf->byte_order = 0x0000beef;
+       hconf->byte_order = cpu_to_le32(0x0000beef);
  
        /* send host config */
        rc = usb_control_msg(interface_to_usbdev(intf),
index aeb21dc4809991270ac92caf01c05c0f2ce6f850,dbceb99c4441adbccafba6a6756b63c6b4c4aeb9..a9aee219fb581d6cb68808369f56b915b39ec285
@@@ -92,18 -92,8 +92,8 @@@ static int enetc_setup_taprio(struct ne
        gcl_config->atc = 0xff;
        gcl_config->acl_len = cpu_to_le16(gcl_len);
  
-       if (!admin_conf->base_time) {
-               gcl_data->btl =
-                       cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR0));
-               gcl_data->bth =
-                       cpu_to_le32(enetc_rd(&priv->si->hw, ENETC_SICTR1));
-       } else {
-               gcl_data->btl =
-                       cpu_to_le32(lower_32_bits(admin_conf->base_time));
-               gcl_data->bth =
-                       cpu_to_le32(upper_32_bits(admin_conf->base_time));
-       }
+       gcl_data->btl = cpu_to_le32(lower_32_bits(admin_conf->base_time));
+       gcl_data->bth = cpu_to_le32(upper_32_bits(admin_conf->base_time));
        gcl_data->ct = cpu_to_le32(admin_conf->cycle_time);
        gcl_data->cte = cpu_to_le32(admin_conf->cycle_time_extension);
  
                return -ENOMEM;
        }
  
 -      cbd.addr[0] = lower_32_bits(dma);
 -      cbd.addr[1] = upper_32_bits(dma);
 +      cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
 +      cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
        cbd.cls = BDCR_CMD_PORT_GCL;
        cbd.status_flags = 0;
  
@@@ -506,15 -496,16 +496,15 @@@ static int enetc_streamid_hw_set(struc
                return -ENOMEM;
        }
  
 -      cbd.addr[0] = lower_32_bits(dma);
 -      cbd.addr[1] = upper_32_bits(dma);
 +      cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
 +      cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
        eth_broadcast_addr(si_data->dmac);
 -      si_data->vid_vidm_tg =
 -              cpu_to_le16(ENETC_CBDR_SID_VID_MASK
 -                          + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
 +      si_data->vid_vidm_tg = (ENETC_CBDR_SID_VID_MASK
 +                             + ((0x3 << 14) | ENETC_CBDR_SID_VIDM));
  
        si_conf = &cbd.sid_set;
        /* Only one port supported for one entry, set itself */
 -      si_conf->iports = 1 << enetc_get_port(priv);
 +      si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv));
        si_conf->id_type = 1;
        si_conf->oui[2] = 0x0;
        si_conf->oui[1] = 0x80;
  
        si_conf->en = 0x80;
        si_conf->stream_handle = cpu_to_le32(sid->handle);
 -      si_conf->iports = 1 << enetc_get_port(priv);
 +      si_conf->iports = cpu_to_le32(1 << enetc_get_port(priv));
        si_conf->id_type = sid->filtertype;
        si_conf->oui[2] = 0x0;
        si_conf->oui[1] = 0x80;
  
        cbd.length = cpu_to_le16(data_size);
  
 -      cbd.addr[0] = lower_32_bits(dma);
 -      cbd.addr[1] = upper_32_bits(dma);
 +      cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
 +      cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
  
        /* VIDM default to be 1.
         * VID Match. If set (b1) then the VID must match, otherwise
         */
        if (si_conf->id_type == STREAMID_TYPE_NULL) {
                ether_addr_copy(si_data->dmac, sid->dst_mac);
 -              si_data->vid_vidm_tg =
 -              cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
 -                          ((((u16)(sid->tagged) & 0x3) << 14)
 -                           | ENETC_CBDR_SID_VIDM));
 +              si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
 +                                     ((((u16)(sid->tagged) & 0x3) << 14)
 +                                     | ENETC_CBDR_SID_VIDM);
        } else if (si_conf->id_type == STREAMID_TYPE_SMAC) {
                ether_addr_copy(si_data->smac, sid->src_mac);
 -              si_data->vid_vidm_tg =
 -              cpu_to_le16((sid->vid & ENETC_CBDR_SID_VID_MASK) +
 -                          ((((u16)(sid->tagged) & 0x3) << 14)
 -                           | ENETC_CBDR_SID_VIDM));
 +              si_data->vid_vidm_tg = (sid->vid & ENETC_CBDR_SID_VID_MASK) +
 +                                     ((((u16)(sid->tagged) & 0x3) << 14)
 +                                     | ENETC_CBDR_SID_VIDM);
        }
  
        err = enetc_send_cmd(priv->si, &cbd);
@@@ -601,7 -594,7 +591,7 @@@ static int enetc_streamfilter_hw_set(st
        }
  
        sfi_config->sg_inst_table_index = cpu_to_le16(sfi->gate_id);
 -      sfi_config->input_ports = 1 << enetc_get_port(priv);
 +      sfi_config->input_ports = cpu_to_le32(1 << enetc_get_port(priv));
  
        /* The priority value which may be matched against the
         * frame’s priority value to determine a match for this entry.
@@@ -655,8 -648,8 +645,8 @@@ static int enetc_streamcounter_hw_get(s
                err = -ENOMEM;
                goto exit;
        }
 -      cbd.addr[0] = lower_32_bits(dma);
 -      cbd.addr[1] = upper_32_bits(dma);
 +      cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
 +      cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
  
        cbd.length = cpu_to_le16(data_size);
  
        if (err)
                goto exit;
  
 -      cnt->matching_frames_count =
 -                      ((u64)le32_to_cpu(data_buf->matchh) << 32)
 -                      + data_buf->matchl;
 +      cnt->matching_frames_count = ((u64)data_buf->matchh << 32) +
 +                                   data_buf->matchl;
  
 -      cnt->not_passing_sdu_count =
 -                      ((u64)le32_to_cpu(data_buf->msdu_droph) << 32)
 -                      + data_buf->msdu_dropl;
 +      cnt->not_passing_sdu_count = ((u64)data_buf->msdu_droph << 32) +
 +                                   data_buf->msdu_dropl;
  
        cnt->passing_sdu_count = cnt->matching_frames_count
                                - cnt->not_passing_sdu_count;
  
        cnt->not_passing_frames_count =
 -              ((u64)le32_to_cpu(data_buf->stream_gate_droph) << 32)
 -              + le32_to_cpu(data_buf->stream_gate_dropl);
 +                              ((u64)data_buf->stream_gate_droph << 32) +
 +                              data_buf->stream_gate_dropl;
  
 -      cnt->passing_frames_count = cnt->matching_frames_count
 -                              - cnt->not_passing_sdu_count
 -                              - cnt->not_passing_frames_count;
 +      cnt->passing_frames_count = cnt->matching_frames_count -
 +                                  cnt->not_passing_sdu_count -
 +                                  cnt->not_passing_frames_count;
  
 -      cnt->red_frames_count =
 -              ((u64)le32_to_cpu(data_buf->flow_meter_droph) << 32)
 -              + le32_to_cpu(data_buf->flow_meter_dropl);
 +      cnt->red_frames_count = ((u64)data_buf->flow_meter_droph << 32) +
 +                              data_buf->flow_meter_dropl;
  
  exit:
        kfree(data_buf);
@@@ -789,15 -785,15 +779,15 @@@ static int enetc_streamgate_hw_set(stru
                return -ENOMEM;
        }
  
 -      cbd.addr[0] = lower_32_bits(dma);
 -      cbd.addr[1] = upper_32_bits(dma);
 +      cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
 +      cbd.addr[1] = cpu_to_le32(upper_32_bits(dma));
  
        sgce = &sgcl_data->sgcl[0];
  
        sgcl_config->agtst = 0x80;
  
 -      sgcl_data->ct = cpu_to_le32(sgi->cycletime);
 -      sgcl_data->cte = cpu_to_le32(sgi->cycletimext);
 +      sgcl_data->ct = sgi->cycletime;
 +      sgcl_data->cte = sgi->cycletimext;
  
        if (sgi->init_ipv >= 0)
                sgcl_config->aipv = (sgi->init_ipv & 0x7) | 0x8;
                        to->msdu[2] = (from->maxoctets >> 16) & 0xFF;
                }
  
 -              to->interval = cpu_to_le32(from->interval);
 +              to->interval = from->interval;
        }
  
        /* If basetime is less than now, calculate start time */
                err = get_start_ns(now, sgi->cycletime, &start);
                if (err)
                        goto exit;
 -              sgcl_data->btl = cpu_to_le32(lower_32_bits(start));
 -              sgcl_data->bth = cpu_to_le32(upper_32_bits(start));
 +              sgcl_data->btl = lower_32_bits(start);
 +              sgcl_data->bth = upper_32_bits(start);
        } else {
                u32 hi, lo;
  
                hi = upper_32_bits(sgi->basetime);
                lo = lower_32_bits(sgi->basetime);
 -              sgcl_data->bth = cpu_to_le32(hi);
 -              sgcl_data->btl = cpu_to_le32(lo);
 +              sgcl_data->bth = hi;
 +              sgcl_data->btl = lo;
        }
  
        err = enetc_send_cmd(priv->si, &cbd);
index 9fe43ab0496db04048eaaae03fef3ff4b73d4f9e,2491ebc978716d78149f9839c437dc6e6d5ba57c..cdd1ff9aa9c45c9aef454c2d7700de31f7fe8b64
@@@ -84,6 -84,8 +84,6 @@@ static int ibmvnic_reset_crq(struct ibm
  static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
  static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
  static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
 -static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
 -                     union sub_crq *sub_crq);
  static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
  static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
  static int enable_scrq_irq(struct ibmvnic_adapter *,
@@@ -304,11 -306,9 +304,11 @@@ static void replenish_rx_pool(struct ib
        int count = pool->size - atomic_read(&pool->available);
        u64 handle = adapter->rx_scrq[pool->index]->handle;
        struct device *dev = &adapter->vdev->dev;
 +      struct ibmvnic_ind_xmit_queue *ind_bufp;
 +      struct ibmvnic_sub_crq_queue *rx_scrq;
 +      union sub_crq *sub_crq;
        int buffers_added = 0;
        unsigned long lpar_rc;
 -      union sub_crq sub_crq;
        struct sk_buff *skb;
        unsigned int offset;
        dma_addr_t dma_addr;
        if (!pool->active)
                return;
  
 +      rx_scrq = adapter->rx_scrq[pool->index];
 +      ind_bufp = &rx_scrq->ind_buf;
        for (i = 0; i < count; ++i) {
 -              skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
 +              skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
                if (!skb) {
                        dev_err(dev, "Couldn't replenish rx buff\n");
                        adapter->replenish_no_mem++;
                pool->rx_buff[index].pool_index = pool->index;
                pool->rx_buff[index].size = pool->buff_size;
  
 -              memset(&sub_crq, 0, sizeof(sub_crq));
 -              sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
 -              sub_crq.rx_add.correlator =
 +              sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
 +              memset(sub_crq, 0, sizeof(*sub_crq));
 +              sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
 +              sub_crq->rx_add.correlator =
                    cpu_to_be64((u64)&pool->rx_buff[index]);
 -              sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
 -              sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
 +              sub_crq->rx_add.ioba = cpu_to_be32(dma_addr);
 +              sub_crq->rx_add.map_id = pool->long_term_buff.map_id;
  
                /* The length field of the sCRQ is defined to be 24 bits so the
                 * buffer size needs to be left shifted by a byte before it is
  #ifdef __LITTLE_ENDIAN__
                shift = 8;
  #endif
 -              sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
 -
 -              lpar_rc = send_subcrq(adapter, handle, &sub_crq);
 -              if (lpar_rc != H_SUCCESS)
 -                      goto failure;
 -
 -              buffers_added++;
 -              adapter->replenish_add_buff_success++;
 +              sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
                pool->next_free = (pool->next_free + 1) % pool->size;
 +              if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
 +                  i == count - 1) {
 +                      lpar_rc =
 +                              send_subcrq_indirect(adapter, handle,
 +                                                   (u64)ind_bufp->indir_dma,
 +                                                   (u64)ind_bufp->index);
 +                      if (lpar_rc != H_SUCCESS)
 +                              goto failure;
 +                      buffers_added += ind_bufp->index;
 +                      adapter->replenish_add_buff_success += ind_bufp->index;
 +                      ind_bufp->index = 0;
 +              }
        }
        atomic_add(buffers_added, &pool->available);
        return;
  failure:
        if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
                dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
 -      pool->free_map[pool->next_free] = index;
 -      pool->rx_buff[index].skb = NULL;
 -
 -      dev_kfree_skb_any(skb);
 -      adapter->replenish_add_buff_failure++;
 -      atomic_add(buffers_added, &pool->available);
 +      for (i = ind_bufp->index - 1; i >= 0; --i) {
 +              struct ibmvnic_rx_buff *rx_buff;
  
 +              pool->next_free = pool->next_free == 0 ?
 +                                pool->size - 1 : pool->next_free - 1;
 +              sub_crq = &ind_bufp->indir_arr[i];
 +              rx_buff = (struct ibmvnic_rx_buff *)
 +                              be64_to_cpu(sub_crq->rx_add.correlator);
 +              index = (int)(rx_buff - pool->rx_buff);
 +              pool->free_map[pool->next_free] = index;
 +              dev_kfree_skb_any(pool->rx_buff[index].skb);
 +              pool->rx_buff[index].skb = NULL;
 +      }
 +      ind_bufp->index = 0;
        if (lpar_rc == H_CLOSED || adapter->failover_pending) {
                /* Disable buffer pool replenishment and report carrier off if
                 * queue is closed or pending failover.
@@@ -498,7 -483,7 +498,7 @@@ static int reset_rx_pools(struct ibmvni
  
                if (rx_pool->buff_size != buff_size) {
                        free_long_term_buff(adapter, &rx_pool->long_term_buff);
 -                      rx_pool->buff_size = buff_size;
 +                      rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
                        rc = alloc_long_term_buff(adapter,
                                                  &rx_pool->long_term_buff,
                                                  rx_pool->size *
@@@ -592,7 -577,7 +592,7 @@@ static int init_rx_pools(struct net_dev
  
                rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
                rx_pool->index = i;
 -              rx_pool->buff_size = buff_size;
 +              rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
                rx_pool->active = 1;
  
                rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
@@@ -745,7 -730,6 +745,7 @@@ static int init_tx_pools(struct net_dev
  {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        int tx_subcrqs;
 +      u64 buff_size;
        int i, rc;
  
        tx_subcrqs = adapter->num_active_tx_scrqs;
        adapter->num_active_tx_pools = tx_subcrqs;
  
        for (i = 0; i < tx_subcrqs; i++) {
 +              buff_size = adapter->req_mtu + VLAN_HLEN;
 +              buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
                rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
                                      adapter->req_tx_entries_per_subcrq,
 -                                    adapter->req_mtu + VLAN_HLEN);
 +                                    buff_size);
                if (rc) {
                        release_tx_pools(adapter);
                        return rc;
@@@ -1166,7 -1148,6 +1166,7 @@@ static int __ibmvnic_open(struct net_de
                if (prev_state == VNIC_CLOSED)
                        enable_irq(adapter->tx_scrq[i]->irq);
                enable_scrq_irq(adapter, adapter->tx_scrq[i]);
 +              netdev_tx_reset_queue(netdev_get_tx_queue(netdev, i));
        }
  
        rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_UP);
@@@ -1497,18 -1478,17 +1497,18 @@@ static int create_hdr_descs(u8 hdr_fiel
   * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
   */
  
 -static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
 +static void build_hdr_descs_arr(struct sk_buff *skb,
 +                              union sub_crq *indir_arr,
                                int *num_entries, u8 hdr_field)
  {
        int hdr_len[3] = {0, 0, 0};
 +      u8 hdr_data[140] = {0};
        int tot_len;
 -      u8 *hdr_data = txbuff->hdr_data;
  
 -      tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
 -                               txbuff->hdr_data);
 +      tot_len = build_hdr_data(hdr_field, skb, hdr_len,
 +                               hdr_data);
        *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
 -                       txbuff->indir_arr + 1);
 +                                       indir_arr + 1);
  }
  
  static int ibmvnic_xmit_workarounds(struct sk_buff *skb,
        return 0;
  }
  
 +static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
 +                                       struct ibmvnic_sub_crq_queue *tx_scrq)
 +{
 +      struct ibmvnic_ind_xmit_queue *ind_bufp;
 +      struct ibmvnic_tx_buff *tx_buff;
 +      struct ibmvnic_tx_pool *tx_pool;
 +      union sub_crq tx_scrq_entry;
 +      int queue_num;
 +      int entries;
 +      int index;
 +      int i;
 +
 +      ind_bufp = &tx_scrq->ind_buf;
 +      entries = (u64)ind_bufp->index;
 +      queue_num = tx_scrq->pool_index;
 +
 +      for (i = entries - 1; i >= 0; --i) {
 +              tx_scrq_entry = ind_bufp->indir_arr[i];
 +              if (tx_scrq_entry.v1.type != IBMVNIC_TX_DESC)
 +                      continue;
 +              index = be32_to_cpu(tx_scrq_entry.v1.correlator);
 +              if (index & IBMVNIC_TSO_POOL_MASK) {
 +                      tx_pool = &adapter->tso_pool[queue_num];
 +                      index &= ~IBMVNIC_TSO_POOL_MASK;
 +              } else {
 +                      tx_pool = &adapter->tx_pool[queue_num];
 +              }
 +              tx_pool->free_map[tx_pool->consumer_index] = index;
 +              tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
 +                                        tx_pool->num_buffers - 1 :
 +                                        tx_pool->consumer_index - 1;
 +              tx_buff = &tx_pool->tx_buff[index];
 +              adapter->netdev->stats.tx_packets--;
 +              adapter->netdev->stats.tx_bytes -= tx_buff->skb->len;
 +              adapter->tx_stats_buffers[queue_num].packets--;
 +              adapter->tx_stats_buffers[queue_num].bytes -=
 +                                              tx_buff->skb->len;
 +              dev_kfree_skb_any(tx_buff->skb);
 +              tx_buff->skb = NULL;
 +              adapter->netdev->stats.tx_dropped++;
 +      }
 +      ind_bufp->index = 0;
 +      if (atomic_sub_return(entries, &tx_scrq->used) <=
 +          (adapter->req_tx_entries_per_subcrq / 2) &&
 +          __netif_subqueue_stopped(adapter->netdev, queue_num)) {
 +              netif_wake_subqueue(adapter->netdev, queue_num);
 +              netdev_dbg(adapter->netdev, "Started queue %d\n",
 +                         queue_num);
 +      }
 +}
 +
 +static int ibmvnic_tx_scrq_flush(struct ibmvnic_adapter *adapter,
 +                               struct ibmvnic_sub_crq_queue *tx_scrq)
 +{
 +      struct ibmvnic_ind_xmit_queue *ind_bufp;
 +      u64 dma_addr;
 +      u64 entries;
 +      u64 handle;
 +      int rc;
 +
 +      ind_bufp = &tx_scrq->ind_buf;
 +      dma_addr = (u64)ind_bufp->indir_dma;
 +      entries = (u64)ind_bufp->index;
 +      handle = tx_scrq->handle;
 +
 +      if (!entries)
 +              return 0;
 +      rc = send_subcrq_indirect(adapter, handle, dma_addr, entries);
 +      if (rc)
 +              ibmvnic_tx_scrq_clean_buffer(adapter, tx_scrq);
 +      else
 +              ind_bufp->index = 0;
 +      return 0;
 +}
 +
  static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
  {
        struct ibmvnic_adapter *adapter = netdev_priv(netdev);
        int queue_num = skb_get_queue_mapping(skb);
        u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
        struct device *dev = &adapter->vdev->dev;
 +      struct ibmvnic_ind_xmit_queue *ind_bufp;
        struct ibmvnic_tx_buff *tx_buff = NULL;
        struct ibmvnic_sub_crq_queue *tx_scrq;
        struct ibmvnic_tx_pool *tx_pool;
        unsigned int tx_send_failed = 0;
 +      netdev_tx_t ret = NETDEV_TX_OK;
        unsigned int tx_map_failed = 0;
 +      union sub_crq indir_arr[16];
        unsigned int tx_dropped = 0;
        unsigned int tx_packets = 0;
        unsigned int tx_bytes = 0;
        unsigned char *dst;
        int index = 0;
        u8 proto = 0;
 -      u64 handle;
 -      netdev_tx_t ret = NETDEV_TX_OK;
 +
 +      tx_scrq = adapter->tx_scrq[queue_num];
 +      txq = netdev_get_tx_queue(netdev, queue_num);
 +      ind_bufp = &tx_scrq->ind_buf;
  
        if (test_bit(0, &adapter->resetting)) {
                if (!netif_subqueue_stopped(netdev, skb))
                tx_send_failed++;
                tx_dropped++;
                ret = NETDEV_TX_OK;
 +              ibmvnic_tx_scrq_flush(adapter, tx_scrq);
                goto out;
        }
  
                tx_dropped++;
                tx_send_failed++;
                ret = NETDEV_TX_OK;
 +              ibmvnic_tx_scrq_flush(adapter, tx_scrq);
                goto out;
        }
        if (skb_is_gso(skb))
        else
                tx_pool = &adapter->tx_pool[queue_num];
  
 -      tx_scrq = adapter->tx_scrq[queue_num];
 -      txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
 -      handle = tx_scrq->handle;
 -
        index = tx_pool->free_map[tx_pool->consumer_index];
  
        if (index == IBMVNIC_INVALID_MAP) {
                tx_send_failed++;
                tx_dropped++;
                ret = NETDEV_TX_OK;
 +              ibmvnic_tx_scrq_flush(adapter, tx_scrq);
                goto out;
        }
  
  
        tx_buff = &tx_pool->tx_buff[index];
        tx_buff->skb = skb;
 -      tx_buff->data_dma[0] = data_dma_addr;
 -      tx_buff->data_len[0] = skb->len;
        tx_buff->index = index;
        tx_buff->pool_index = queue_num;
 -      tx_buff->last_frag = true;
  
        memset(&tx_crq, 0, sizeof(tx_crq));
        tx_crq.v1.first = IBMVNIC_CRQ_CMD;
                tx_crq.v1.mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
                hdrs += 2;
        }
 -      /* determine if l2/3/4 headers are sent to firmware */
 -      if ((*hdrs >> 7) & 1) {
 -              build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
 -              tx_crq.v1.n_crq_elem = num_entries;
 -              tx_buff->num_entries = num_entries;
 -              tx_buff->indir_arr[0] = tx_crq;
 -              tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
 -                                                  sizeof(tx_buff->indir_arr),
 -                                                  DMA_TO_DEVICE);
 -              if (dma_mapping_error(dev, tx_buff->indir_dma)) {
 -                      dev_kfree_skb_any(skb);
 -                      tx_buff->skb = NULL;
 -                      if (!firmware_has_feature(FW_FEATURE_CMO))
 -                              dev_err(dev, "tx: unable to map descriptor array\n");
 -                      tx_map_failed++;
 -                      tx_dropped++;
 -                      ret = NETDEV_TX_OK;
 -                      goto tx_err_out;
 -              }
 -              lpar_rc = send_subcrq_indirect(adapter, handle,
 -                                             (u64)tx_buff->indir_dma,
 -                                             (u64)num_entries);
 -              dma_unmap_single(dev, tx_buff->indir_dma,
 -                               sizeof(tx_buff->indir_arr), DMA_TO_DEVICE);
 -      } else {
 -              tx_buff->num_entries = num_entries;
 -              lpar_rc = send_subcrq(adapter, handle,
 -                                    &tx_crq);
 -      }
 -      if (lpar_rc != H_SUCCESS) {
 -              if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
 -                      dev_err_ratelimited(dev, "tx: send failed\n");
 -              dev_kfree_skb_any(skb);
 -              tx_buff->skb = NULL;
  
 -              if (lpar_rc == H_CLOSED || adapter->failover_pending) {
 -                      /* Disable TX and report carrier off if queue is closed
 -                       * or pending failover.
 -                       * Firmware guarantees that a signal will be sent to the
 -                       * driver, triggering a reset or some other action.
 -                       */
 -                      netif_tx_stop_all_queues(netdev);
 -                      netif_carrier_off(netdev);
 -              }
 +      if ((*hdrs >> 7) & 1)
 +              build_hdr_descs_arr(skb, indir_arr, &num_entries, *hdrs);
  
 -              tx_send_failed++;
 -              tx_dropped++;
 -              ret = NETDEV_TX_OK;
 -              goto tx_err_out;
 +      tx_crq.v1.n_crq_elem = num_entries;
 +      tx_buff->num_entries = num_entries;
 +      /* flush buffer if current entry can not fit */
 +      if (num_entries + ind_bufp->index > IBMVNIC_MAX_IND_DESCS) {
 +              lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
 +              if (lpar_rc != H_SUCCESS)
 +                      goto tx_flush_err;
 +      }
 +
 +      indir_arr[0] = tx_crq;
 +      memcpy(&ind_bufp->indir_arr[ind_bufp->index], &indir_arr[0],
 +             num_entries * sizeof(struct ibmvnic_generic_scrq));
 +      ind_bufp->index += num_entries;
 +      if (__netdev_tx_sent_queue(txq, skb->len,
 +                                 netdev_xmit_more() &&
 +                                 ind_bufp->index < IBMVNIC_MAX_IND_DESCS)) {
 +              lpar_rc = ibmvnic_tx_scrq_flush(adapter, tx_scrq);
 +              if (lpar_rc != H_SUCCESS)
 +                      goto tx_err;
        }
  
        if (atomic_add_return(num_entries, &tx_scrq->used)
        ret = NETDEV_TX_OK;
        goto out;
  
 -tx_err_out:
 -      /* roll back consumer index and map array*/
 -      if (tx_pool->consumer_index == 0)
 -              tx_pool->consumer_index =
 -                      tx_pool->num_buffers - 1;
 -      else
 -              tx_pool->consumer_index--;
 -      tx_pool->free_map[tx_pool->consumer_index] = index;
 +tx_flush_err:
 +      dev_kfree_skb_any(skb);
 +      tx_buff->skb = NULL;
 +      tx_pool->consumer_index = tx_pool->consumer_index == 0 ?
 +                                tx_pool->num_buffers - 1 :
 +                                tx_pool->consumer_index - 1;
 +      tx_dropped++;
 +tx_err:
 +      if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
 +              dev_err_ratelimited(dev, "tx: send failed\n");
 +
 +      if (lpar_rc == H_CLOSED || adapter->failover_pending) {
 +              /* Disable TX and report carrier off if queue is closed
 +               * or pending failover.
 +               * Firmware guarantees that a signal will be sent to the
 +               * driver, triggering a reset or some other action.
 +               */
 +              netif_tx_stop_all_queues(netdev);
 +              netif_carrier_off(netdev);
 +      }
  out:
        netdev->stats.tx_dropped += tx_dropped;
        netdev->stats.tx_bytes += tx_bytes;
@@@ -2156,8 -2074,11 +2156,11 @@@ static int do_reset(struct ibmvnic_adap
        for (i = 0; i < adapter->req_rx_queues; i++)
                napi_schedule(&adapter->napi[i]);
  
-       if (adapter->reset_reason != VNIC_RESET_FAILOVER)
+       if (adapter->reset_reason == VNIC_RESET_FAILOVER ||
+           adapter->reset_reason == VNIC_RESET_MOBILITY) {
                call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+               call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
+       }
  
        rc = 0;
  
@@@ -2227,6 -2148,9 +2230,9 @@@ static int do_hard_reset(struct ibmvnic
        if (rc)
                return IBMVNIC_OPEN_FAILED;
  
+       call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, netdev);
+       call_netdevice_notifiers(NETDEV_RESEND_IGMP, netdev);
        return 0;
  }
  
@@@ -2291,7 -2215,6 +2297,6 @@@ static void __ibmvnic_reset(struct work
  
                if (!saved_state) {
                        reset_state = adapter->state;
-                       adapter->state = VNIC_RESETTING;
                        saved_state = true;
                }
                spin_unlock_irqrestore(&adapter->state_lock, flags);
@@@ -2432,6 -2355,12 +2437,12 @@@ static void ibmvnic_tx_timeout(struct n
  {
        struct ibmvnic_adapter *adapter = netdev_priv(dev);
  
+       if (test_bit(0, &adapter->resetting)) {
+               netdev_err(adapter->netdev,
+                          "Adapter is resetting, skip timeout reset\n");
+               return;
+       }
        ibmvnic_reset(adapter, VNIC_RESET_TIMEOUT);
  }
  
@@@ -2450,17 -2379,10 +2461,17 @@@ static void remove_buff_from_pool(struc
  
  static int ibmvnic_poll(struct napi_struct *napi, int budget)
  {
 -      struct net_device *netdev = napi->dev;
 -      struct ibmvnic_adapter *adapter = netdev_priv(netdev);
 -      int scrq_num = (int)(napi - adapter->napi);
 -      int frames_processed = 0;
 +      struct ibmvnic_sub_crq_queue *rx_scrq;
 +      struct ibmvnic_adapter *adapter;
 +      struct net_device *netdev;
 +      int frames_processed;
 +      int scrq_num;
 +
 +      netdev = napi->dev;
 +      adapter = netdev_priv(netdev);
 +      scrq_num = (int)(napi - adapter->napi);
 +      frames_processed = 0;
 +      rx_scrq = adapter->rx_scrq[scrq_num];
  
  restart_poll:
        while (frames_processed < budget) {
  
                if (unlikely(test_bit(0, &adapter->resetting) &&
                             adapter->reset_reason != VNIC_RESET_NON_FATAL)) {
 -                      enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
 +                      enable_scrq_irq(adapter, rx_scrq);
                        napi_complete_done(napi, frames_processed);
                        return frames_processed;
                }
  
 -              if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
 +              if (!pending_scrq(adapter, rx_scrq))
                        break;
 -              next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
 +              next = ibmvnic_next_scrq(adapter, rx_scrq);
                rx_buff =
                    (struct ibmvnic_rx_buff *)be64_to_cpu(next->
                                                          rx_comp.correlator);
                frames_processed++;
        }
  
 -      if (adapter->state != VNIC_CLOSING)
 +      if (adapter->state != VNIC_CLOSING &&
 +          ((atomic_read(&adapter->rx_pool[scrq_num].available) <
 +            adapter->req_rx_add_entries_per_subcrq / 2) ||
 +            frames_processed < budget))
                replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
 -
        if (frames_processed < budget) {
 -              enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
 -              napi_complete_done(napi, frames_processed);
 -              if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
 -                  napi_reschedule(napi)) {
 -                      disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
 -                      goto restart_poll;
 +              if (napi_complete_done(napi, frames_processed)) {
 +                      enable_scrq_irq(adapter, rx_scrq);
 +                      if (pending_scrq(adapter, rx_scrq)) {
 +                              rmb();
 +                              if (napi_reschedule(napi)) {
 +                                      disable_scrq_irq(adapter, rx_scrq);
 +                                      goto restart_poll;
 +                              }
 +                      }
                }
        }
        return frames_processed;
@@@ -2952,7 -2869,6 +2963,7 @@@ static int reset_one_sub_crq_queue(stru
        memset(scrq->msgs, 0, 4 * PAGE_SIZE);
        atomic_set(&scrq->used, 0);
        scrq->cur = 0;
 +      scrq->ind_buf.index = 0;
  
        rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
                           4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
@@@ -2963,6 -2879,9 +2974,9 @@@ static int reset_sub_crq_queues(struct 
  {
        int i, rc;
  
+       if (!adapter->tx_scrq || !adapter->rx_scrq)
+               return -EINVAL;
        for (i = 0; i < adapter->req_tx_queues; i++) {
                netdev_dbg(adapter->netdev, "Re-setting tx_scrq[%d]\n", i);
                rc = reset_one_sub_crq_queue(adapter, adapter->tx_scrq[i]);
@@@ -3004,11 -2923,6 +3018,11 @@@ static void release_sub_crq_queue(struc
                }
        }
  
 +      dma_free_coherent(dev,
 +                        IBMVNIC_IND_ARR_SZ,
 +                        scrq->ind_buf.indir_arr,
 +                        scrq->ind_buf.indir_dma);
 +
        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
        free_pages((unsigned long)scrq->msgs, 2);
@@@ -3055,17 -2969,6 +3069,17 @@@ static struct ibmvnic_sub_crq_queue *in
  
        scrq->adapter = adapter;
        scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
 +      scrq->ind_buf.index = 0;
 +
 +      scrq->ind_buf.indir_arr =
 +              dma_alloc_coherent(dev,
 +                                 IBMVNIC_IND_ARR_SZ,
 +                                 &scrq->ind_buf.indir_dma,
 +                                 GFP_KERNEL);
 +
 +      if (!scrq->ind_buf.indir_arr)
 +              goto indir_failed;
 +
        spin_lock_init(&scrq->lock);
  
        netdev_dbg(adapter->netdev,
  
        return scrq;
  
 +indir_failed:
 +      do {
 +              rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
 +                                      adapter->vdev->unit_address,
 +                                      scrq->crq_num);
 +      } while (rc == H_BUSY || rc == H_IS_LONG_BUSY(rc));
  reg_failed:
        dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
                         DMA_BIDIRECTIONAL);
@@@ -3194,17 -3091,14 +3208,17 @@@ static int ibmvnic_complete_tx(struct i
        struct device *dev = &adapter->vdev->dev;
        struct ibmvnic_tx_pool *tx_pool;
        struct ibmvnic_tx_buff *txbuff;
 +      struct netdev_queue *txq;
        union sub_crq *next;
        int index;
 -      int i, j;
 +      int i;
  
  restart_loop:
        while (pending_scrq(adapter, scrq)) {
                unsigned int pool = scrq->pool_index;
                int num_entries = 0;
 +              int total_bytes = 0;
 +              int num_packets = 0;
  
                next = ibmvnic_next_scrq(adapter, scrq);
                for (i = 0; i < next->tx_comp.num_comps; i++) {
                        }
  
                        txbuff = &tx_pool->tx_buff[index];
 -
 -                      for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
 -                              if (!txbuff->data_dma[j])
 -                                      continue;
 -
 -                              txbuff->data_dma[j] = 0;
 -                      }
 -
 -                      if (txbuff->last_frag) {
 -                              dev_kfree_skb_any(txbuff->skb);
 +                      num_packets++;
 +                      num_entries += txbuff->num_entries;
 +                      if (txbuff->skb) {
 +                              total_bytes += txbuff->skb->len;
 +                              dev_consume_skb_irq(txbuff->skb);
                                txbuff->skb = NULL;
 +                      } else {
 +                              netdev_warn(adapter->netdev,
 +                                          "TX completion received with NULL socket buffer\n");
                        }
 -
 -                      num_entries += txbuff->num_entries;
 -
                        tx_pool->free_map[tx_pool->producer_index] = index;
                        tx_pool->producer_index =
                                (tx_pool->producer_index + 1) %
                /* remove tx_comp scrq*/
                next->tx_comp.first = 0;
  
 +              txq = netdev_get_tx_queue(adapter->netdev, scrq->pool_index);
 +              netdev_tx_completed_queue(txq, num_packets, total_bytes);
 +
                if (atomic_sub_return(num_entries, &scrq->used) <=
                    (adapter->req_tx_entries_per_subcrq / 2) &&
                    __netif_subqueue_stopped(adapter->netdev,
@@@ -3642,6 -3538,38 +3656,6 @@@ static void print_subcrq_error(struct d
        }
  }
  
 -static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
 -                     union sub_crq *sub_crq)
 -{
 -      unsigned int ua = adapter->vdev->unit_address;
 -      struct device *dev = &adapter->vdev->dev;
 -      u64 *u64_crq = (u64 *)sub_crq;
 -      int rc;
 -
 -      netdev_dbg(adapter->netdev,
 -                 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
 -                 (unsigned long int)cpu_to_be64(remote_handle),
 -                 (unsigned long int)cpu_to_be64(u64_crq[0]),
 -                 (unsigned long int)cpu_to_be64(u64_crq[1]),
 -                 (unsigned long int)cpu_to_be64(u64_crq[2]),
 -                 (unsigned long int)cpu_to_be64(u64_crq[3]));
 -
 -      /* Make sure the hypervisor sees the complete request */
 -      mb();
 -
 -      rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
 -                              cpu_to_be64(remote_handle),
 -                              cpu_to_be64(u64_crq[0]),
 -                              cpu_to_be64(u64_crq[1]),
 -                              cpu_to_be64(u64_crq[2]),
 -                              cpu_to_be64(u64_crq[3]));
 -
 -      if (rc)
 -              print_subcrq_error(dev, rc, __func__);
 -
 -      return rc;
 -}
 -
  static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
                                u64 remote_handle, u64 ioba, u64 num_entries)
  {
@@@ -5044,6 -4972,9 +5058,9 @@@ static int ibmvnic_reset_crq(struct ibm
        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
  
        /* Clean out the queue */
+       if (!crq->msgs)
+               return -EINVAL;
        memset(crq->msgs, 0, PAGE_SIZE);
        crq->cur = 0;
        crq->active = false;
@@@ -5348,7 -5279,7 +5365,7 @@@ static int ibmvnic_remove(struct vio_de
        unsigned long flags;
  
        spin_lock_irqsave(&adapter->state_lock, flags);
-       if (adapter->state == VNIC_RESETTING) {
+       if (test_bit(0, &adapter->resetting)) {
                spin_unlock_irqrestore(&adapter->state_lock, flags);
                return -EBUSY;
        }
index 9911d926dd7f64389e19f28fb0f9308d40b732c1,47a3fd71c96f4a0352abf511ad099adc1bdf48cb..9d0c32ad763bf4889a1b36b8943dce31bb30a4b4
@@@ -31,8 -31,6 +31,8 @@@
  #define IBMVNIC_BUFFS_PER_POOL        100
  #define IBMVNIC_MAX_QUEUES    16
  #define IBMVNIC_MAX_QUEUE_SZ   4096
 +#define IBMVNIC_MAX_IND_DESCS  128
 +#define IBMVNIC_IND_ARR_SZ    (IBMVNIC_MAX_IND_DESCS * 32)
  
  #define IBMVNIC_TSO_BUF_SZ    65536
  #define IBMVNIC_TSO_BUFS      64
@@@ -226,6 -224,8 +226,6 @@@ struct ibmvnic_tx_comp_desc 
  #define IBMVNIC_TCP_CHKSUM            0x20
  #define IBMVNIC_UDP_CHKSUM            0x08
  
 -#define IBMVNIC_MAX_FRAGS_PER_CRQ 3
 -
  struct ibmvnic_tx_desc {
        u8 first;
        u8 type;
@@@ -861,12 -861,6 +861,12 @@@ union sub_crq 
        struct ibmvnic_rx_buff_add_desc rx_add;
  };
  
 +struct ibmvnic_ind_xmit_queue {
 +      union sub_crq *indir_arr;
 +      dma_addr_t indir_dma;
 +      int index;
 +};
 +
  struct ibmvnic_sub_crq_queue {
        union sub_crq *msgs;
        int size, cur;
        spinlock_t lock;
        struct sk_buff *rx_skb_top;
        struct ibmvnic_adapter *adapter;
 +      struct ibmvnic_ind_xmit_queue ind_buf;
        atomic_t used;
        char name[32];
        u64 handle;
 -};
 +} ____cacheline_aligned;
  
  struct ibmvnic_long_term_buff {
        unsigned char *buff;
  
  struct ibmvnic_tx_buff {
        struct sk_buff *skb;
 -      dma_addr_t data_dma[IBMVNIC_MAX_FRAGS_PER_CRQ];
 -      unsigned int data_len[IBMVNIC_MAX_FRAGS_PER_CRQ];
        int index;
        int pool_index;
 -      bool last_frag;
 -      union sub_crq indir_arr[6];
 -      u8 hdr_data[140];
 -      dma_addr_t indir_dma;
        int num_entries;
  };
  
@@@ -907,7 -906,7 +907,7 @@@ struct ibmvnic_tx_pool 
        struct ibmvnic_long_term_buff long_term_buff;
        int num_buffers;
        int buf_size;
 -};
 +} ____cacheline_aligned;
  
  struct ibmvnic_rx_buff {
        struct sk_buff *skb;
@@@ -928,7 -927,7 +928,7 @@@ struct ibmvnic_rx_pool 
        int next_alloc;
        int active;
        struct ibmvnic_long_term_buff long_term_buff;
 -};
 +} ____cacheline_aligned;
  
  struct ibmvnic_vpd {
        unsigned char *buff;
@@@ -943,8 -942,7 +943,7 @@@ enum vnic_state {VNIC_PROBING = 1
                 VNIC_CLOSING,
                 VNIC_CLOSED,
                 VNIC_REMOVING,
-                VNIC_REMOVED,
-                VNIC_RESETTING};
+                VNIC_REMOVED};
  
  enum ibmvnic_reset_reason {VNIC_RESET_FAILOVER = 1,
                           VNIC_RESET_MOBILITY,
@@@ -1015,8 -1013,8 +1014,8 @@@ struct ibmvnic_adapter 
        atomic_t running_cap_crqs;
        bool wait_capability;
  
 -      struct ibmvnic_sub_crq_queue **tx_scrq;
 -      struct ibmvnic_sub_crq_queue **rx_scrq;
 +      struct ibmvnic_sub_crq_queue **tx_scrq ____cacheline_aligned;
 +      struct ibmvnic_sub_crq_queue **rx_scrq ____cacheline_aligned;
  
        /* rx structs */
        struct napi_struct *napi;
index 2532f8eed6a8a552a1381f7c54e4307cec0ebc77,1b5390ec3d78a4e50b0a7c31c33b36669369d136..729c4f0d5ac5299a10b9862d7143d724b6eb88c0
@@@ -63,7 -63,7 +63,7 @@@ static void i40e_vc_notify_vf_link_stat
        } else if (vf->link_forced) {
                pfe.event_data.link_event.link_status = vf->link_up;
                pfe.event_data.link_event.link_speed =
 -                      (vf->link_up ? VIRTCHNL_LINK_SPEED_40GB : 0);
 +                      (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0);
        } else {
                pfe.event_data.link_event.link_status =
                        ls->link_info & I40E_AQ_LINK_UP;
@@@ -1403,7 -1403,8 +1403,8 @@@ static void i40e_cleanup_reset_vf(struc
   * @vf: pointer to the VF structure
   * @flr: VFLR was issued or not
   *
-  * Returns true if the VF is reset, false otherwise.
+  * Returns true if the VF is in reset, resets successfully, or resets
+  * are disabled and false otherwise.
   **/
  bool i40e_reset_vf(struct i40e_vf *vf, bool flr)
  {
        u32 reg;
        int i;
  
+       if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state))
+               return true;
        /* If the VFs have been disabled, this means something else is
         * resetting the VF, so we shouldn't continue.
         */
        if (test_and_set_bit(__I40E_VF_DISABLE, pf->state))
-               return false;
+               return true;
  
        i40e_trigger_vf_reset(vf, flr);
  
@@@ -1581,6 -1585,15 +1585,15 @@@ void i40e_free_vfs(struct i40e_pf *pf
  
        i40e_notify_client_of_vf_enable(pf, 0);
  
+       /* Disable IOV before freeing resources. This lets any VF drivers
+        * running in the host get themselves cleaned up before we yank
+        * the carpet out from underneath their feet.
+        */
+       if (!pci_vfs_assigned(pf->pdev))
+               pci_disable_sriov(pf->pdev);
+       else
+               dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
        /* Amortize wait time by stopping all VFs at the same time */
        for (i = 0; i < pf->num_alloc_vfs; i++) {
                if (test_bit(I40E_VF_STATE_INIT, &pf->vf[i].vf_states))
                i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[i].lan_vsi_idx]);
        }
  
-       /* Disable IOV before freeing resources. This lets any VF drivers
-        * running in the host get themselves cleaned up before we yank
-        * the carpet out from underneath their feet.
-        */
-       if (!pci_vfs_assigned(pf->pdev))
-               pci_disable_sriov(pf->pdev);
-       else
-               dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
        /* free up VF resources */
        tmp = pf->num_alloc_vfs;
        pf->num_alloc_vfs = 0;
@@@ -4437,7 -4441,6 +4441,7 @@@ int i40e_ndo_set_vf_link_state(struct n
  {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_pf *pf = np->vsi->back;
 +      struct i40e_link_status *ls = &pf->hw.phy.link_info;
        struct virtchnl_pf_event pfe;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vf *vf;
                vf->link_forced = true;
                vf->link_up = true;
                pfe.event_data.link_event.link_status = true;
 -              pfe.event_data.link_event.link_speed = VIRTCHNL_LINK_SPEED_40GB;
 +              pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed);
                break;
        case IFLA_VF_LINK_STATE_DISABLE:
                vf->link_forced = true;
index 3ed4f4cda7f9b58df9bc04db4f3f73fcdd0c3287,ced6d76a0d8532e21d59646cfc98b252f023d7d5..29f765a246a05f91ae4f53081b583af73a63b6cf
@@@ -379,27 -379,6 +379,27 @@@ static void dwmac4_set_eee_pls(struct m
        writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
  }
  
 +static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
 +{
 +      void __iomem *ioaddr = hw->pcsr;
 +      int value = et & STMMAC_ET_MAX;
 +      int regval;
 +
 +      /* Program LPI entry timer value into register */
 +      writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
 +
 +      /* Enable/disable LPI entry timer */
 +      regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
 +      regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
 +
 +      if (et)
 +              regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
 +      else
 +              regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
 +
 +      writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
 +}
 +
  static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
  {
        void __iomem *ioaddr = hw->pcsr;
@@@ -1185,7 -1164,6 +1185,7 @@@ const struct stmmac_ops dwmac4_ops = 
        .get_umac_addr = dwmac4_get_umac_addr,
        .set_eee_mode = dwmac4_set_eee_mode,
        .reset_eee_mode = dwmac4_reset_eee_mode,
 +      .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
        .set_eee_timer = dwmac4_set_eee_timer,
        .set_eee_pls = dwmac4_set_eee_pls,
        .pcs_ctrl_ane = dwmac4_ctrl_ane,
        .pcs_get_adv_lp = dwmac4_get_adv_lp,
        .debug = dwmac4_debug,
        .set_filter = dwmac4_set_filter,
-       .flex_pps_config = dwmac5_flex_pps_config,
        .set_mac_loopback = dwmac4_set_mac_loopback,
        .update_vlan_hash = dwmac4_update_vlan_hash,
        .sarc_configure = dwmac4_sarc_configure,
@@@ -1228,7 -1205,6 +1227,7 @@@ const struct stmmac_ops dwmac410_ops = 
        .get_umac_addr = dwmac4_get_umac_addr,
        .set_eee_mode = dwmac4_set_eee_mode,
        .reset_eee_mode = dwmac4_reset_eee_mode,
 +      .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
        .set_eee_timer = dwmac4_set_eee_timer,
        .set_eee_pls = dwmac4_set_eee_pls,
        .pcs_ctrl_ane = dwmac4_ctrl_ane,
        .pcs_get_adv_lp = dwmac4_get_adv_lp,
        .debug = dwmac4_debug,
        .set_filter = dwmac4_set_filter,
+       .flex_pps_config = dwmac5_flex_pps_config,
        .set_mac_loopback = dwmac4_set_mac_loopback,
        .update_vlan_hash = dwmac4_update_vlan_hash,
        .sarc_configure = dwmac4_sarc_configure,
@@@ -1272,7 -1249,6 +1272,7 @@@ const struct stmmac_ops dwmac510_ops = 
        .get_umac_addr = dwmac4_get_umac_addr,
        .set_eee_mode = dwmac4_set_eee_mode,
        .reset_eee_mode = dwmac4_reset_eee_mode,
 +      .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
        .set_eee_timer = dwmac4_set_eee_timer,
        .set_eee_pls = dwmac4_set_eee_pls,
        .pcs_ctrl_ane = dwmac4_ctrl_ane,
diff --combined drivers/net/tun.c
index 7c62d82c57db6a703e4db52ad6f456758484dd58,cd06cae760356fc200f1c46a6f9c0c42f52e6dca..2dc1988a89739ae660f3416502420bd7dea13e7d
@@@ -107,6 -107,17 +107,6 @@@ struct tap_filter 
  
  #define TUN_FLOW_EXPIRE (3 * HZ)
  
 -struct tun_pcpu_stats {
 -      u64_stats_t rx_packets;
 -      u64_stats_t rx_bytes;
 -      u64_stats_t tx_packets;
 -      u64_stats_t tx_bytes;
 -      struct u64_stats_sync syncp;
 -      u32 rx_dropped;
 -      u32 tx_dropped;
 -      u32 rx_frame_errors;
 -};
 -
  /* A tun_file connects an open character device to a tuntap netdevice. It
   * also contains all socket related structures (except sock_fprog and tap_filter)
   * to serve as one transmit queue for tuntap device. The sock_fprog and
@@@ -196,7 -207,7 +196,7 @@@ struct tun_struct 
        void *security;
        u32 flow_count;
        u32 rx_batched;
 -      struct tun_pcpu_stats __percpu *pcpu_stats;
 +      atomic_long_t rx_frame_errors;
        struct bpf_prog __rcu *xdp_prog;
        struct tun_prog __rcu *steering_prog;
        struct tun_prog __rcu *filter_prog;
@@@ -1055,7 -1066,7 +1055,7 @@@ static netdev_tx_t tun_net_xmit(struct 
        return NETDEV_TX_OK;
  
  drop:
 -      this_cpu_inc(tun->pcpu_stats->tx_dropped);
 +      atomic_long_inc(&dev->tx_dropped);
        skb_tx_error(skb);
        kfree_skb(skb);
        rcu_read_unlock();
@@@ -1092,12 -1103,37 +1092,12 @@@ static void tun_set_headroom(struct net
  static void
  tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  {
 -      u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
        struct tun_struct *tun = netdev_priv(dev);
 -      struct tun_pcpu_stats *p;
 -      int i;
 -
 -      for_each_possible_cpu(i) {
 -              u64 rxpackets, rxbytes, txpackets, txbytes;
 -              unsigned int start;
 -
 -              p = per_cpu_ptr(tun->pcpu_stats, i);
 -              do {
 -                      start = u64_stats_fetch_begin(&p->syncp);
 -                      rxpackets       = u64_stats_read(&p->rx_packets);
 -                      rxbytes         = u64_stats_read(&p->rx_bytes);
 -                      txpackets       = u64_stats_read(&p->tx_packets);
 -                      txbytes         = u64_stats_read(&p->tx_bytes);
 -              } while (u64_stats_fetch_retry(&p->syncp, start));
  
 -              stats->rx_packets       += rxpackets;
 -              stats->rx_bytes         += rxbytes;
 -              stats->tx_packets       += txpackets;
 -              stats->tx_bytes         += txbytes;
 +      dev_get_tstats64(dev, stats);
  
 -              /* u32 counters */
 -              rx_dropped      += p->rx_dropped;
 -              rx_frame_errors += p->rx_frame_errors;
 -              tx_dropped      += p->tx_dropped;
 -      }
 -      stats->rx_dropped  = rx_dropped;
 -      stats->rx_frame_errors = rx_frame_errors;
 -      stats->tx_dropped = tx_dropped;
 +      stats->rx_frame_errors +=
 +              (unsigned long)atomic_long_read(&tun->rx_frame_errors);
  }
  
  static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
@@@ -1211,7 -1247,7 +1211,7 @@@ resample
                void *frame = tun_xdp_to_ptr(xdp);
  
                if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
 -                      this_cpu_inc(tun->pcpu_stats->tx_dropped);
 +                      atomic_long_inc(&dev->tx_dropped);
                        xdp_return_frame_rx_napi(xdp);
                        drops++;
                }
@@@ -1247,7 -1283,7 +1247,7 @@@ static const struct net_device_ops tap_
        .ndo_select_queue       = tun_select_queue,
        .ndo_features_check     = passthru_features_check,
        .ndo_set_rx_headroom    = tun_set_headroom,
 -      .ndo_get_stats64        = tun_net_get_stats64,
 +      .ndo_get_stats64        = dev_get_tstats64,
        .ndo_bpf                = tun_xdp,
        .ndo_xdp_xmit           = tun_xdp_xmit,
        .ndo_change_carrier     = tun_net_change_carrier,
@@@ -1541,7 -1577,7 +1541,7 @@@ static int tun_xdp_act(struct tun_struc
                trace_xdp_exception(tun->dev, xdp_prog, act);
                fallthrough;
        case XDP_DROP:
 -              this_cpu_inc(tun->pcpu_stats->rx_dropped);
 +              atomic_long_inc(&tun->dev->rx_dropped);
                break;
        }
  
@@@ -1647,6 -1683,7 +1647,6 @@@ static ssize_t tun_get_user(struct tun_
        size_t total_len = iov_iter_count(from);
        size_t len = total_len, align = tun->align, linear;
        struct virtio_net_hdr gso = { 0 };
 -      struct tun_pcpu_stats *stats;
        int good_linear;
        int copylen;
        bool zerocopy = false;
                 */
                skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
                if (IS_ERR(skb)) {
 -                      this_cpu_inc(tun->pcpu_stats->rx_dropped);
 +                      atomic_long_inc(&tun->dev->rx_dropped);
                        return PTR_ERR(skb);
                }
                if (!skb)
  
                if (IS_ERR(skb)) {
                        if (PTR_ERR(skb) != -EAGAIN)
 -                              this_cpu_inc(tun->pcpu_stats->rx_dropped);
 +                              atomic_long_inc(&tun->dev->rx_dropped);
                        if (frags)
                                mutex_unlock(&tfile->napi_mutex);
                        return PTR_ERR(skb);
                if (err) {
                        err = -EFAULT;
  drop:
 -                      this_cpu_inc(tun->pcpu_stats->rx_dropped);
 +                      atomic_long_inc(&tun->dev->rx_dropped);
                        kfree_skb(skb);
                        if (frags) {
                                tfile->napi.skb = NULL;
        }
  
        if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
 -              this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
 +              atomic_long_inc(&tun->rx_frame_errors);
                kfree_skb(skb);
                if (frags) {
                        tfile->napi.skb = NULL;
                                pi.proto = htons(ETH_P_IPV6);
                                break;
                        default:
 -                              this_cpu_inc(tun->pcpu_stats->rx_dropped);
 +                              atomic_long_inc(&tun->dev->rx_dropped);
                                kfree_skb(skb);
                                return -EINVAL;
                        }
                                          skb_headlen(skb));
  
                if (unlikely(headlen > skb_headlen(skb))) {
 -                      this_cpu_inc(tun->pcpu_stats->rx_dropped);
 +                      atomic_long_inc(&tun->dev->rx_dropped);
                        napi_free_frags(&tfile->napi);
                        rcu_read_unlock();
                        mutex_unlock(&tfile->napi_mutex);
        }
        rcu_read_unlock();
  
 -      stats = get_cpu_ptr(tun->pcpu_stats);
 -      u64_stats_update_begin(&stats->syncp);
 -      u64_stats_inc(&stats->rx_packets);
 -      u64_stats_add(&stats->rx_bytes, len);
 -      u64_stats_update_end(&stats->syncp);
 -      put_cpu_ptr(stats);
 +      preempt_disable();
 +      dev_sw_netstats_rx_add(tun->dev, len);
 +      preempt_enable();
  
        if (rxhash)
                tun_flow_update(tun, rxhash, tfile);
@@@ -1921,12 -1961,15 +1921,15 @@@ static ssize_t tun_chr_write_iter(struc
        struct tun_file *tfile = file->private_data;
        struct tun_struct *tun = tun_get(tfile);
        ssize_t result;
+       int noblock = 0;
  
        if (!tun)
                return -EBADFD;
  
-       result = tun_get_user(tun, tfile, NULL, from,
-                             file->f_flags & O_NONBLOCK, false);
+       if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
+               noblock = 1;
+       result = tun_get_user(tun, tfile, NULL, from, noblock, false);
  
        tun_put(tun);
        return result;
@@@ -1939,6 -1982,7 +1942,6 @@@ static ssize_t tun_put_user_xdp(struct 
  {
        int vnet_hdr_sz = 0;
        size_t size = xdp_frame->len;
 -      struct tun_pcpu_stats *stats;
        size_t ret;
  
        if (tun->flags & IFF_VNET_HDR) {
  
        ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
  
 -      stats = get_cpu_ptr(tun->pcpu_stats);
 -      u64_stats_update_begin(&stats->syncp);
 -      u64_stats_inc(&stats->tx_packets);
 -      u64_stats_add(&stats->tx_bytes, ret);
 -      u64_stats_update_end(&stats->syncp);
 -      put_cpu_ptr(tun->pcpu_stats);
 +      preempt_disable();
 +      dev_sw_netstats_tx_add(tun->dev, 1, ret);
 +      preempt_enable();
  
        return ret;
  }
@@@ -1969,6 -2016,7 +1972,6 @@@ static ssize_t tun_put_user(struct tun_
                            struct iov_iter *iter)
  {
        struct tun_pi pi = { 0, skb->protocol };
 -      struct tun_pcpu_stats *stats;
        ssize_t total;
        int vlan_offset = 0;
        int vlan_hlen = 0;
  
  done:
        /* caller is in process context, */
 -      stats = get_cpu_ptr(tun->pcpu_stats);
 -      u64_stats_update_begin(&stats->syncp);
 -      u64_stats_inc(&stats->tx_packets);
 -      u64_stats_add(&stats->tx_bytes, skb->len + vlan_hlen);
 -      u64_stats_update_end(&stats->syncp);
 -      put_cpu_ptr(tun->pcpu_stats);
 +      preempt_disable();
 +      dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
 +      preempt_enable();
  
        return total;
  }
@@@ -2137,10 -2188,15 +2140,15 @@@ static ssize_t tun_chr_read_iter(struc
        struct tun_file *tfile = file->private_data;
        struct tun_struct *tun = tun_get(tfile);
        ssize_t len = iov_iter_count(to), ret;
+       int noblock = 0;
  
        if (!tun)
                return -EBADFD;
-       ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
+       if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
+               noblock = 1;
+       ret = tun_do_read(tun, tfile, to, noblock, NULL);
        ret = min_t(ssize_t, ret, len);
        if (ret > 0)
                iocb->ki_pos = ret;
@@@ -2187,11 -2243,11 +2195,11 @@@ static void tun_free_netdev(struct net_
  
        BUG_ON(!(list_empty(&tun->disabled)));
  
 -      free_percpu(tun->pcpu_stats);
 -      /* We clear pcpu_stats so that tun_set_iff() can tell if
 +      free_percpu(dev->tstats);
 +      /* We clear tstats so that tun_set_iff() can tell if
         * tun_free_netdev() has been called from register_netdevice().
         */
 -      tun->pcpu_stats = NULL;
 +      dev->tstats = NULL;
  
        tun_flow_uninit(tun);
        security_tun_dev_free_security(tun->security);
@@@ -2322,6 -2378,7 +2330,6 @@@ static int tun_xdp_one(struct tun_struc
        unsigned int datasize = xdp->data_end - xdp->data;
        struct tun_xdp_hdr *hdr = xdp->data_hard_start;
        struct virtio_net_hdr *gso = &hdr->gso;
 -      struct tun_pcpu_stats *stats;
        struct bpf_prog *xdp_prog;
        struct sk_buff *skb = NULL;
        u32 rxhash = 0, act;
@@@ -2379,7 -2436,7 +2387,7 @@@ build
        skb_put(skb, xdp->data_end - xdp->data);
  
        if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
 -              this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
 +              atomic_long_inc(&tun->rx_frame_errors);
                kfree_skb(skb);
                err = -EINVAL;
                goto out;
  
        netif_receive_skb(skb);
  
 -      /* No need for get_cpu_ptr() here since this function is
 +      /* No need to disable preemption here since this function is
         * always called with bh disabled
         */
 -      stats = this_cpu_ptr(tun->pcpu_stats);
 -      u64_stats_update_begin(&stats->syncp);
 -      u64_stats_inc(&stats->rx_packets);
 -      u64_stats_add(&stats->rx_bytes, datasize);
 -      u64_stats_update_end(&stats->syncp);
 +      dev_sw_netstats_rx_add(tun->dev, datasize);
  
        if (rxhash)
                tun_flow_update(tun, rxhash, tfile);
@@@ -2698,8 -2759,8 +2706,8 @@@ static int tun_set_iff(struct net *net
                tun->rx_batched = 0;
                RCU_INIT_POINTER(tun->steering_prog, NULL);
  
 -              tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
 -              if (!tun->pcpu_stats) {
 +              dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 +              if (!dev->tstats) {
                        err = -ENOMEM;
                        goto err_free_dev;
                }
@@@ -2754,16 -2815,16 +2762,16 @@@ err_detach
        tun_detach_all(dev);
        /* We are here because register_netdevice() has failed.
         * If register_netdevice() already called tun_free_netdev()
 -       * while dealing with the error, tun->pcpu_stats has been cleared.
 +       * while dealing with the error, dev->stats has been cleared.
         */
 -      if (!tun->pcpu_stats)
 +      if (!dev->tstats)
                goto err_free_dev;
  
  err_free_flow:
        tun_flow_uninit(tun);
        security_tun_dev_free_security(tun->security);
  err_free_stat:
 -      free_percpu(tun->pcpu_stats);
 +      free_percpu(dev->tstats);
  err_free_dev:
        free_netdev(dev);
        return err;
@@@ -3071,19 -3132,10 +3079,19 @@@ static long __tun_chr_ioctl(struct fil
                                   "Linktype set failed because interface is up\n");
                        ret = -EBUSY;
                } else {
 +                      ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
 +                                                     tun->dev);
 +                      ret = notifier_to_errno(ret);
 +                      if (ret) {
 +                              netif_info(tun, drv, tun->dev,
 +                                         "Refused to change device type\n");
 +                              break;
 +                      }
                        tun->dev->type = (int) arg;
                        netif_info(tun, drv, tun->dev, "linktype set to %d\n",
                                   tun->dev->type);
 -                      ret = 0;
 +                      call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
 +                                               tun->dev);
                }
                break;
  
index 0ffa389066a0aca8cdb936d15bc01facebef6312,d0a3bd9ff3c3db6a5b707d7b542852cf653f5e1b..ae26594c7302c962dd1df8270f1ce534cee5181f
  
  #define S3FWRN5_I2C_DRIVER_NAME "s3fwrn5_i2c"
  
 -#define S3FWRN5_I2C_MAX_PAYLOAD 32
  #define S3FWRN5_EN_WAIT_TIME 150
  
  struct s3fwrn5_i2c_phy {
        struct i2c_client *i2c_dev;
        struct nci_dev *ndev;
  
-       unsigned int gpio_en;
-       unsigned int gpio_fw_wake;
+       int gpio_en;
+       int gpio_fw_wake;
  
        struct mutex mutex;
  
@@@ -247,7 -248,8 +247,7 @@@ static int s3fwrn5_i2c_probe(struct i2c
        if (ret < 0)
                return ret;
  
 -      ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops,
 -              S3FWRN5_I2C_MAX_PAYLOAD);
 +      ret = s3fwrn5_probe(&phy->ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops);
        if (ret < 0)
                return ret;
  
index 32ea41b4356b0b384b61926fca7773ee51c694d6,b235393e091caf675ffd193ee52f5d900ec7546c..0e9af2fbaa761736c3f8a15d2a17c53512239999
@@@ -417,10 -417,13 +417,13 @@@ enum qeth_qdio_out_buffer_state 
        QETH_QDIO_BUF_EMPTY,
        /* Filled by driver; owned by hardware in order to be sent. */
        QETH_QDIO_BUF_PRIMED,
-       /* Identified to be pending in TPQ. */
+       /* Discovered by the TX completion code: */
        QETH_QDIO_BUF_PENDING,
-       /* Found in completion queue. */
-       QETH_QDIO_BUF_IN_CQ,
+       /* Finished by the TX completion code: */
+       QETH_QDIO_BUF_NEED_QAOB,
+       /* Received QAOB notification on CQ: */
+       QETH_QDIO_BUF_QAOB_OK,
+       QETH_QDIO_BUF_QAOB_ERROR,
        /* Handled via transfer pending / completion queue. */
        QETH_QDIO_BUF_HANDLED_DELAYED,
  };
@@@ -701,19 -704,6 +704,19 @@@ enum qeth_pnso_mode 
        QETH_PNSO_ADDR_INFO,
  };
  
 +enum qeth_link_mode {
 +      QETH_LINK_MODE_UNKNOWN,
 +      QETH_LINK_MODE_FIBRE_SHORT,
 +      QETH_LINK_MODE_FIBRE_LONG,
 +};
 +
 +struct qeth_link_info {
 +      u32 speed;
 +      u8 duplex;
 +      u8 port;
 +      enum qeth_link_mode link_mode;
 +};
 +
  #define QETH_BROADCAST_WITH_ECHO    0x01
  #define QETH_BROADCAST_WITHOUT_ECHO 0x02
  struct qeth_card_info {
        struct qeth_card_blkt blkt;
        __u32 diagass_support;
        __u32 hwtrap;
 +      struct qeth_link_info link_info;
  };
  
  enum qeth_discipline_id {
@@@ -810,6 -799,12 +813,6 @@@ struct qeth_rx 
        u8 bufs_refill;
  };
  
 -struct carrier_info {
 -      __u8  card_type;
 -      __u16 port_mode;
 -      __u32 port_speed;
 -};
 -
  struct qeth_switch_info {
        __u32 capabilities;
        __u32 settings;
@@@ -1116,7 -1111,7 +1119,7 @@@ void qeth_prepare_ipa_cmd(struct qeth_c
  int qeth_query_switch_attributes(struct qeth_card *card,
                                  struct qeth_switch_info *sw_info);
  int qeth_query_card_info(struct qeth_card *card,
 -                       struct carrier_info *carrier_info);
 +                       struct qeth_link_info *link_info);
  int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
                                     enum qeth_ipa_isolation_modes mode);
  
index d9ba0586373b594c8e0882cd7befb7d1d54d9634,e27319de7b00be04610d6d4f2f427844629e0ecc..319190824cd2a1947c2b11cc58a3ae6c3d5236ca
@@@ -33,6 -33,7 +33,7 @@@
  
  #include <net/iucv/af_iucv.h>
  #include <net/dsfield.h>
+ #include <net/sock.h>
  
  #include <asm/ebcdic.h>
  #include <asm/chpid.h>
@@@ -499,17 -500,12 +500,12 @@@ static void qeth_cleanup_handled_pendin
  
                }
        }
-       if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
-                                       QETH_QDIO_BUF_HANDLED_DELAYED)) {
-               /* for recovery situations */
-               qeth_init_qdio_out_buf(q, bidx);
-               QETH_CARD_TEXT(q->card, 2, "clprecov");
-       }
  }
  
  static void qeth_qdio_handle_aob(struct qeth_card *card,
                                 unsigned long phys_aob_addr)
  {
+       enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
        struct qaob *aob;
        struct qeth_qdio_out_buffer *buffer;
        enum iucv_tx_notify notification;
        buffer = (struct qeth_qdio_out_buffer *) aob->user1;
        QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
  
-       if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
-                          QETH_QDIO_BUF_IN_CQ) == QETH_QDIO_BUF_PRIMED) {
-               notification = TX_NOTIFY_OK;
-       } else {
-               WARN_ON_ONCE(atomic_read(&buffer->state) !=
-                                                       QETH_QDIO_BUF_PENDING);
-               atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
-               notification = TX_NOTIFY_DELAYED_OK;
-       }
-       if (aob->aorc != 0)  {
-               QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
-               notification = qeth_compute_cq_notification(aob->aorc, 1);
-       }
-       qeth_notify_skbs(buffer->q, buffer, notification);
        /* Free dangling allocations. The attached skbs are handled by
         * qeth_cleanup_handled_pending().
         */
                if (data && buffer->is_header[i])
                        kmem_cache_free(qeth_core_header_cache, data);
        }
-       atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
+       if (aob->aorc) {
+               QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
+               new_state = QETH_QDIO_BUF_QAOB_ERROR;
+       }
+       switch (atomic_xchg(&buffer->state, new_state)) {
+       case QETH_QDIO_BUF_PRIMED:
+               /* Faster than TX completion code. */
+               notification = qeth_compute_cq_notification(aob->aorc, 0);
+               qeth_notify_skbs(buffer->q, buffer, notification);
+               atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
+               break;
+       case QETH_QDIO_BUF_PENDING:
+               /* TX completion code is active and will handle the async
+                * completion for us.
+                */
+               break;
+       case QETH_QDIO_BUF_NEED_QAOB:
+               /* TX completion code is already finished. */
+               notification = qeth_compute_cq_notification(aob->aorc, 1);
+               qeth_notify_skbs(buffer->q, buffer, notification);
+               atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
  
        qdio_release_aob(aob);
  }
@@@ -1405,7 -1411,7 +1411,7 @@@ static void qeth_notify_skbs(struct qet
        skb_queue_walk(&buf->skb_list, skb) {
                QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
                QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
-               if (skb->protocol == htons(ETH_P_AF_IUCV) && skb->sk)
+               if (skb->sk && skb->sk->sk_family == PF_IUCV)
                        iucv_sk(skb->sk)->sk_txnotify(skb, notification);
        }
  }
@@@ -1416,9 -1422,6 +1422,6 @@@ static void qeth_tx_complete_buf(struc
        struct qeth_qdio_out_q *queue = buf->q;
        struct sk_buff *skb;
  
-       /* release may never happen from within CQ tasklet scope */
-       WARN_ON_ONCE(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
        if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
                qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
  
@@@ -4868,8 -4871,8 +4871,8 @@@ out_free
  static int qeth_query_card_info_cb(struct qeth_card *card,
                                   struct qeth_reply *reply, unsigned long data)
  {
 -      struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
        struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
 +      struct qeth_link_info *link_info = reply->param;
        struct qeth_query_card_info *card_info;
  
        QETH_CARD_TEXT(card, 2, "qcrdincb");
                return -EIO;
  
        card_info = &cmd->data.setadapterparms.data.card_info;
 -      carrier_info->card_type = card_info->card_type;
 -      carrier_info->port_mode = card_info->port_mode;
 -      carrier_info->port_speed = card_info->port_speed;
 +      netdev_dbg(card->dev,
 +                 "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n",
 +                 card_info->card_type, card_info->port_mode,
 +                 card_info->port_speed);
 +
 +      switch (card_info->port_mode) {
 +      case CARD_INFO_PORTM_FULLDUPLEX:
 +              link_info->duplex = DUPLEX_FULL;
 +              break;
 +      case CARD_INFO_PORTM_HALFDUPLEX:
 +              link_info->duplex = DUPLEX_HALF;
 +              break;
 +      default:
 +              link_info->duplex = DUPLEX_UNKNOWN;
 +      }
 +
 +      switch (card_info->card_type) {
 +      case CARD_INFO_TYPE_1G_COPPER_A:
 +      case CARD_INFO_TYPE_1G_COPPER_B:
 +              link_info->speed = SPEED_1000;
 +              link_info->port = PORT_TP;
 +              break;
 +      case CARD_INFO_TYPE_1G_FIBRE_A:
 +      case CARD_INFO_TYPE_1G_FIBRE_B:
 +              link_info->speed = SPEED_1000;
 +              link_info->port = PORT_FIBRE;
 +              break;
 +      case CARD_INFO_TYPE_10G_FIBRE_A:
 +      case CARD_INFO_TYPE_10G_FIBRE_B:
 +              link_info->speed = SPEED_10000;
 +              link_info->port = PORT_FIBRE;
 +              break;
 +      default:
 +              switch (card_info->port_speed) {
 +              case CARD_INFO_PORTS_10M:
 +                      link_info->speed = SPEED_10;
 +                      break;
 +              case CARD_INFO_PORTS_100M:
 +                      link_info->speed = SPEED_100;
 +                      break;
 +              case CARD_INFO_PORTS_1G:
 +                      link_info->speed = SPEED_1000;
 +                      break;
 +              case CARD_INFO_PORTS_10G:
 +                      link_info->speed = SPEED_10000;
 +                      break;
 +              case CARD_INFO_PORTS_25G:
 +                      link_info->speed = SPEED_25000;
 +                      break;
 +              default:
 +                      link_info->speed = SPEED_UNKNOWN;
 +              }
 +
 +              link_info->port = PORT_OTHER;
 +      }
 +
        return 0;
  }
  
  int qeth_query_card_info(struct qeth_card *card,
 -                       struct carrier_info *carrier_info)
 +                       struct qeth_link_info *link_info)
  {
        struct qeth_cmd_buffer *iob;
  
        iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
        if (!iob)
                return -ENOMEM;
 -      return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
 -                                      (void *)carrier_info);
 +
 +      return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb, link_info);
 +}
 +
 +static int qeth_init_link_info_oat_cb(struct qeth_card *card,
 +                                    struct qeth_reply *reply_priv,
 +                                    unsigned long data)
 +{
 +      struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
 +      struct qeth_link_info *link_info = reply_priv->param;
 +      struct qeth_query_oat_physical_if *phys_if;
 +      struct qeth_query_oat_reply *reply;
 +
 +      if (qeth_setadpparms_inspect_rc(cmd))
 +              return -EIO;
 +
 +      /* Multi-part reply is unexpected, don't bother: */
 +      if (cmd->data.setadapterparms.hdr.used_total > 1)
 +              return -EINVAL;
 +
 +      /* Expect the reply to start with phys_if data: */
 +      reply = &cmd->data.setadapterparms.data.query_oat.reply[0];
 +      if (reply->type != QETH_QOAT_REPLY_TYPE_PHYS_IF ||
 +          reply->length < sizeof(*reply))
 +              return -EINVAL;
 +
 +      phys_if = &reply->phys_if;
 +
 +      switch (phys_if->speed_duplex) {
 +      case QETH_QOAT_PHYS_SPEED_10M_HALF:
 +              link_info->speed = SPEED_10;
 +              link_info->duplex = DUPLEX_HALF;
 +              break;
 +      case QETH_QOAT_PHYS_SPEED_10M_FULL:
 +              link_info->speed = SPEED_10;
 +              link_info->duplex = DUPLEX_FULL;
 +              break;
 +      case QETH_QOAT_PHYS_SPEED_100M_HALF:
 +              link_info->speed = SPEED_100;
 +              link_info->duplex = DUPLEX_HALF;
 +              break;
 +      case QETH_QOAT_PHYS_SPEED_100M_FULL:
 +              link_info->speed = SPEED_100;
 +              link_info->duplex = DUPLEX_FULL;
 +              break;
 +      case QETH_QOAT_PHYS_SPEED_1000M_HALF:
 +              link_info->speed = SPEED_1000;
 +              link_info->duplex = DUPLEX_HALF;
 +              break;
 +      case QETH_QOAT_PHYS_SPEED_1000M_FULL:
 +              link_info->speed = SPEED_1000;
 +              link_info->duplex = DUPLEX_FULL;
 +              break;
 +      case QETH_QOAT_PHYS_SPEED_10G_FULL:
 +              link_info->speed = SPEED_10000;
 +              link_info->duplex = DUPLEX_FULL;
 +              break;
 +      case QETH_QOAT_PHYS_SPEED_25G_FULL:
 +              link_info->speed = SPEED_25000;
 +              link_info->duplex = DUPLEX_FULL;
 +              break;
 +      case QETH_QOAT_PHYS_SPEED_UNKNOWN:
 +      default:
 +              link_info->speed = SPEED_UNKNOWN;
 +              link_info->duplex = DUPLEX_UNKNOWN;
 +              break;
 +      }
 +
 +      switch (phys_if->media_type) {
 +      case QETH_QOAT_PHYS_MEDIA_COPPER:
 +              link_info->port = PORT_TP;
 +              link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
 +              break;
 +      case QETH_QOAT_PHYS_MEDIA_FIBRE_SHORT:
 +              link_info->port = PORT_FIBRE;
 +              link_info->link_mode = QETH_LINK_MODE_FIBRE_SHORT;
 +              break;
 +      case QETH_QOAT_PHYS_MEDIA_FIBRE_LONG:
 +              link_info->port = PORT_FIBRE;
 +              link_info->link_mode = QETH_LINK_MODE_FIBRE_LONG;
 +              break;
 +      default:
 +              link_info->port = PORT_OTHER;
 +              link_info->link_mode = QETH_LINK_MODE_UNKNOWN;
 +              break;
 +      }
 +
 +      return 0;
 +}
 +
 +static void qeth_init_link_info(struct qeth_card *card)
 +{
 +      card->info.link_info.duplex = DUPLEX_FULL;
 +
 +      if (IS_IQD(card) || IS_VM_NIC(card)) {
 +              card->info.link_info.speed = SPEED_10000;
 +              card->info.link_info.port = PORT_FIBRE;
 +              card->info.link_info.link_mode = QETH_LINK_MODE_FIBRE_SHORT;
 +      } else {
 +              switch (card->info.link_type) {
 +              case QETH_LINK_TYPE_FAST_ETH:
 +              case QETH_LINK_TYPE_LANE_ETH100:
 +                      card->info.link_info.speed = SPEED_100;
 +                      card->info.link_info.port = PORT_TP;
 +                      break;
 +              case QETH_LINK_TYPE_GBIT_ETH:
 +              case QETH_LINK_TYPE_LANE_ETH1000:
 +                      card->info.link_info.speed = SPEED_1000;
 +                      card->info.link_info.port = PORT_FIBRE;
 +                      break;
 +              case QETH_LINK_TYPE_10GBIT_ETH:
 +                      card->info.link_info.speed = SPEED_10000;
 +                      card->info.link_info.port = PORT_FIBRE;
 +                      break;
 +              case QETH_LINK_TYPE_25GBIT_ETH:
 +                      card->info.link_info.speed = SPEED_25000;
 +                      card->info.link_info.port = PORT_FIBRE;
 +                      break;
 +              default:
 +                      dev_info(&card->gdev->dev, "Unknown link type %x\n",
 +                               card->info.link_type);
 +                      card->info.link_info.speed = SPEED_UNKNOWN;
 +                      card->info.link_info.port = PORT_OTHER;
 +              }
 +
 +              card->info.link_info.link_mode = QETH_LINK_MODE_UNKNOWN;
 +      }
 +
 +      /* Get more accurate data via QUERY OAT: */
 +      if (qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
 +              struct qeth_link_info link_info;
 +              struct qeth_cmd_buffer *iob;
 +
 +              iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
 +                                         SETADP_DATA_SIZEOF(query_oat));
 +              if (iob) {
 +                      struct qeth_ipa_cmd *cmd = __ipa_cmd(iob);
 +                      struct qeth_query_oat *oat_req;
 +
 +                      oat_req = &cmd->data.setadapterparms.data.query_oat;
 +                      oat_req->subcmd_code = QETH_QOAT_SCOPE_INTERFACE;
 +
 +                      if (!qeth_send_ipa_cmd(card, iob,
 +                                             qeth_init_link_info_oat_cb,
 +                                             &link_info)) {
 +                              if (link_info.speed != SPEED_UNKNOWN)
 +                                      card->info.link_info.speed = link_info.speed;
 +                              if (link_info.duplex != DUPLEX_UNKNOWN)
 +                                      card->info.link_info.duplex = link_info.duplex;
 +                              if (link_info.port != PORT_OTHER)
 +                                      card->info.link_info.port = link_info.port;
 +                              if (link_info.link_mode != QETH_LINK_MODE_UNKNOWN)
 +                                      card->info.link_info.link_mode = link_info.link_mode;
 +                      }
 +              }
 +      }
  }
  
  /**
@@@ -5489,8 -5285,6 +5492,8 @@@ retriable
                        goto out;
        }
  
 +      qeth_init_link_info(card);
 +
        rc = qeth_init_qdio_queues(card);
        if (rc) {
                QETH_CARD_TEXT_(card, 2, "9err%d", rc);
@@@ -6078,9 -5872,32 +6081,32 @@@ static void qeth_iqd_tx_complete(struc
  
                if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
                                                   QETH_QDIO_BUF_PENDING) ==
-                   QETH_QDIO_BUF_PRIMED)
+                   QETH_QDIO_BUF_PRIMED) {
                        qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
  
+                       /* Handle race with qeth_qdio_handle_aob(): */
+                       switch (atomic_xchg(&buffer->state,
+                                           QETH_QDIO_BUF_NEED_QAOB)) {
+                       case QETH_QDIO_BUF_PENDING:
+                               /* No concurrent QAOB notification. */
+                               break;
+                       case QETH_QDIO_BUF_QAOB_OK:
+                               qeth_notify_skbs(queue, buffer,
+                                                TX_NOTIFY_DELAYED_OK);
+                               atomic_set(&buffer->state,
+                                          QETH_QDIO_BUF_HANDLED_DELAYED);
+                               break;
+                       case QETH_QDIO_BUF_QAOB_ERROR:
+                               qeth_notify_skbs(queue, buffer,
+                                                TX_NOTIFY_DELAYED_GENERALERROR);
+                               atomic_set(&buffer->state,
+                                          QETH_QDIO_BUF_HANDLED_DELAYED);
+                               break;
+                       default:
+                               WARN_ON_ONCE(1);
+                       }
+               }
                QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
  
                /* prepare the queue slot for re-use: */
index daeb837abec34ad9d93d60863d797d23ff87eb4a,79939ba5d523563a66b3f7ee79d8dc7071a9537e..393aef681e44802017cc6efb00cc868628f4b521
@@@ -737,6 -737,8 +737,6 @@@ static void qeth_l2_dev2br_an_set_cb(vo
   *
   *    On enable, emits a series of address notifications for all
   *    currently registered hosts.
 - *
 - *    Must be called under rtnl_lock
   */
  static int qeth_l2_dev2br_an_set(struct qeth_card *card, bool enable)
  {
@@@ -983,32 -985,19 +983,19 @@@ static void qeth_l2_setup_bridgeport_at
   *    change notification' and thus can support the learning_sync bridgeport
   *    attribute
   *    @card: qeth_card structure pointer
-  *
-  *    This is a destructive test and must be called before dev2br or
-  *    bridgeport address notification is enabled!
   */
  static void qeth_l2_detect_dev2br_support(struct qeth_card *card)
  {
        struct qeth_priv *priv = netdev_priv(card->dev);
        bool dev2br_supported;
-       int rc;
  
        QETH_CARD_TEXT(card, 2, "d2brsup");
        if (!IS_IQD(card))
                return;
  
        /* dev2br requires valid cssid,iid,chid */
-       if (!card->info.ids_valid) {
-               dev2br_supported = false;
-       } else if (css_general_characteristics.enarf) {
-               dev2br_supported = true;
-       } else {
-               /* Old machines don't have the feature bit:
-                * Probe by testing whether a disable succeeds
-                */
-               rc = qeth_l2_pnso(card, PNSO_OC_NET_ADDR_INFO, 0, NULL, NULL);
-               dev2br_supported = !rc;
-       }
+       dev2br_supported = card->info.ids_valid &&
+                          css_general_characteristics.enarf;
        QETH_CARD_TEXT_(card, 2, "D2Bsup%02x", dev2br_supported);
  
        if (dev2br_supported)
@@@ -1287,19 -1276,16 +1274,19 @@@ static void qeth_l2_dev2br_worker(struc
        if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
                goto free;
  
 -      /* Potential re-config in progress, try again later: */
 -      if (!rtnl_trylock()) {
 -              queue_delayed_work(card->event_wq, dwork,
 -                                 msecs_to_jiffies(100));
 -              return;
 -      }
 -      if (!netif_device_present(card->dev))
 -              goto out_unlock;
 -
        if (data->ac_event.lost_event_mask) {
 +              /* Potential re-config in progress, try again later: */
 +              if (!rtnl_trylock()) {
 +                      queue_delayed_work(card->event_wq, dwork,
 +                                         msecs_to_jiffies(100));
 +                      return;
 +              }
 +
 +              if (!netif_device_present(card->dev)) {
 +                      rtnl_unlock();
 +                      goto free;
 +              }
 +
                QETH_DBF_MESSAGE(3,
                                 "Address change notification overflow on device %x\n",
                                 CARD_DEVID(card));
                                         "Address Notification resynced on device %x\n",
                                         CARD_DEVID(card));
                }
 +
 +              rtnl_unlock();
        } else {
                for (i = 0; i < data->ac_event.num_entries; i++) {
                        struct qeth_ipacmd_addr_change_entry *entry =
                }
        }
  
 -out_unlock:
 -      rtnl_unlock();
 -
  free:
        kfree(data);
  }
@@@ -2233,7 -2220,6 +2220,6 @@@ static int qeth_l2_set_online(struct qe
        struct net_device *dev = card->dev;
        int rc = 0;
  
-       /* query before bridgeport_notification may be enabled */
        qeth_l2_detect_dev2br_support(card);
  
        mutex_lock(&card->sbp_lock);
@@@ -2310,8 -2296,11 +2296,8 @@@ static void qeth_l2_set_offline(struct 
                card->state = CARD_STATE_DOWN;
  
        qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
 -      if (priv->brport_features & BR_LEARNING_SYNC) {
 -              rtnl_lock();
 +      if (priv->brport_features & BR_LEARNING_SYNC)
                qeth_l2_dev2br_fdb_flush(card);
 -              rtnl_unlock();
 -      }
  }
  
  /* Returns zero if the command is successfully "consumed" */
index 0049e8fe49055e5e3634b93b654d62d0efa071c9,fa275a054f46221a933ee2097394ba13ebc439d0..8eeb73ac58bde7695faddb21f11e7dbfe8305af9
@@@ -34,6 -34,7 +34,6 @@@
  #include <linux/workqueue.h>
  #include <linux/dynamic_queue_limits.h>
  
 -#include <linux/ethtool.h>
  #include <net/net_namespace.h>
  #ifdef CONFIG_DCB
  #include <net/dcbnl.h>
@@@ -50,7 -51,6 +50,7 @@@
  
  struct netpoll_info;
  struct device;
 +struct ethtool_ops;
  struct phy_device;
  struct dsa_port;
  struct ip_tunnel_parm;
@@@ -1490,7 -1490,7 +1490,7 @@@ struct net_device_ops 
  };
  
  /**
 - * enum net_device_priv_flags - &struct net_device priv_flags
 + * enum netdev_priv_flags - &struct net_device priv_flags
   *
   * These are the &struct net_device, they are only set internally
   * by drivers and used in the kernel. These flags are invisible to
@@@ -2557,18 -2557,6 +2557,18 @@@ static inline void dev_sw_netstats_rx_a
        u64_stats_update_end(&tstats->syncp);
  }
  
 +static inline void dev_sw_netstats_tx_add(struct net_device *dev,
 +                                        unsigned int packets,
 +                                        unsigned int len)
 +{
 +      struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
 +
 +      u64_stats_update_begin(&tstats->syncp);
 +      tstats->tx_bytes += len;
 +      tstats->tx_packets += packets;
 +      u64_stats_update_end(&tstats->syncp);
 +}
 +
  static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
  {
        struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
  #define netdev_alloc_pcpu_stats(type)                                 \
        __netdev_alloc_pcpu_stats(type, GFP_KERNEL)
  
 +#define devm_netdev_alloc_pcpu_stats(dev, type)                               \
 +({                                                                    \
 +      typeof(type) __percpu *pcpu_stats = devm_alloc_percpu(dev, type);\
 +      if (pcpu_stats) {                                               \
 +              int __cpu;                                              \
 +              for_each_possible_cpu(__cpu) {                          \
 +                      typeof(type) *stat;                             \
 +                      stat = per_cpu_ptr(pcpu_stats, __cpu);          \
 +                      u64_stats_init(&stat->syncp);                   \
 +              }                                                       \
 +      }                                                               \
 +      pcpu_stats;                                                     \
 +})
 +
  enum netdev_lag_tx_type {
        NETDEV_LAG_TX_TYPE_UNKNOWN,
        NETDEV_LAG_TX_TYPE_RANDOM,
@@@ -3163,6 -3137,11 +3163,11 @@@ static inline bool dev_validate_header(
        return false;
  }
  
+ static inline bool dev_has_header(const struct net_device *dev)
+ {
+       return dev->header_ops && dev->header_ops->create;
+ }
  typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr,
                           int len, int size);
  int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
@@@ -3602,7 -3581,7 +3607,7 @@@ static inline void netif_stop_subqueue(
  }
  
  /**
 - *    netif_subqueue_stopped - test status of subqueue
 + *    __netif_subqueue_stopped - test status of subqueue
   *    @dev: network device
   *    @queue_index: sub queue index
   *
@@@ -3616,13 -3595,6 +3621,13 @@@ static inline bool __netif_subqueue_sto
        return netif_tx_queue_stopped(txq);
  }
  
 +/**
 + *    netif_subqueue_stopped - test status of subqueue
 + *    @dev: network device
 + *    @skb: sub queue buffer pointer
 + *
 + * Check individual transmit queue of a device with multiple transmit queues.
 + */
  static inline bool netif_subqueue_stopped(const struct net_device *dev,
                                          struct sk_buff *skb)
  {
@@@ -4534,7 -4506,6 +4539,7 @@@ void netdev_stats_to_stats64(struct rtn
                             const struct net_device_stats *netdev_stats);
  void dev_fetch_sw_netstats(struct rtnl_link_stats64 *s,
                           const struct pcpu_sw_netstats __percpu *netstats);
 +void dev_get_tstats64(struct net_device *dev, struct rtnl_link_stats64 *s);
  
  extern int            netdev_max_backlog;
  extern int            netdev_tstamp_prequeue;
diff --combined include/net/tls.h
index b2637edc272675cfc7367a58a16aa97ac0dbd233,2bdd802212fe00325322ee0a8b16abd13f06c72b..3eccb525e8f7954c43cec1b8c4212c903a35e76e
@@@ -199,6 -199,12 +199,12 @@@ enum tls_context_flags 
         * to be atomic.
         */
        TLS_TX_SYNC_SCHED = 1,
+       /* tls_dev_del was called for the RX side, device state was released,
+        * but tls_ctx->netdev might still be kept, because TX-side driver
+        * resources might not be released yet. Used to prevent the second
+        * tls_dev_del call in tls_device_down if it happens simultaneously.
+        */
+       TLS_RX_DEV_CLOSED = 2,
  };
  
  struct cipher_context {
@@@ -211,7 -217,6 +217,7 @@@ union tls_crypto_context 
        union {
                struct tls12_crypto_info_aes_gcm_128 aes_gcm_128;
                struct tls12_crypto_info_aes_gcm_256 aes_gcm_256;
 +              struct tls12_crypto_info_chacha20_poly1305 chacha20_poly1305;
        };
  };
  
@@@ -502,33 -507,32 +508,33 @@@ static inline void tls_advance_record_s
        if (tls_bigint_increment(ctx->rec_seq, prot->rec_seq_size))
                tls_err_abort(sk, EBADMSG);
  
 -      if (prot->version != TLS_1_3_VERSION)
 -              tls_bigint_increment(ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
 +      if (prot->version != TLS_1_3_VERSION &&
 +          prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
 +              tls_bigint_increment(ctx->iv + prot->salt_size,
                                     prot->iv_size);
  }
  
  static inline void tls_fill_prepend(struct tls_context *ctx,
                             char *buf,
                             size_t plaintext_len,
 -                           unsigned char record_type,
 -                           int version)
 +                           unsigned char record_type)
  {
        struct tls_prot_info *prot = &ctx->prot_info;
        size_t pkt_len, iv_size = prot->iv_size;
  
        pkt_len = plaintext_len + prot->tag_size;
 -      if (version != TLS_1_3_VERSION) {
 +      if (prot->version != TLS_1_3_VERSION &&
 +          prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305) {
                pkt_len += iv_size;
  
                memcpy(buf + TLS_NONCE_OFFSET,
 -                     ctx->tx.iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE, iv_size);
 +                     ctx->tx.iv + prot->salt_size, iv_size);
        }
  
        /* we cover nonce explicit here as well, so buf should be of
         * size KTLS_DTLS_HEADER_SIZE + KTLS_DTLS_NONCE_EXPLICIT_SIZE
         */
 -      buf[0] = version == TLS_1_3_VERSION ?
 +      buf[0] = prot->version == TLS_1_3_VERSION ?
                   TLS_RECORD_TYPE_DATA : record_type;
        /* Note that VERSION must be TLS_1_2 for both TLS1.2 and TLS1.3 */
        buf[1] = TLS_1_2_VERSION_MINOR;
  static inline void tls_make_aad(char *buf,
                                size_t size,
                                char *record_sequence,
 -                              int record_sequence_size,
                                unsigned char record_type,
 -                              int version)
 +                              struct tls_prot_info *prot)
  {
 -      if (version != TLS_1_3_VERSION) {
 -              memcpy(buf, record_sequence, record_sequence_size);
 +      if (prot->version != TLS_1_3_VERSION) {
 +              memcpy(buf, record_sequence, prot->rec_seq_size);
                buf += 8;
        } else {
 -              size += TLS_CIPHER_AES_GCM_128_TAG_SIZE;
 +              size += prot->tag_size;
        }
  
 -      buf[0] = version == TLS_1_3_VERSION ?
 +      buf[0] = prot->version == TLS_1_3_VERSION ?
                  TLS_RECORD_TYPE_DATA : record_type;
        buf[1] = TLS_1_2_VERSION_MAJOR;
        buf[2] = TLS_1_2_VERSION_MINOR;
        buf[4] = size & 0xFF;
  }
  
 -static inline void xor_iv_with_seq(int version, char *iv, char *seq)
 +static inline void xor_iv_with_seq(struct tls_prot_info *prot, char *iv, char *seq)
  {
        int i;
  
 -      if (version == TLS_1_3_VERSION) {
 +      if (prot->version == TLS_1_3_VERSION ||
 +          prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305) {
                for (i = 0; i < 8; i++)
                        iv[i + 4] ^= seq[i];
        }
diff --combined net/can/af_can.c
index 963bd714551791a3aa3515d8c67a61ac00b3b4e8,4c343b43067f64d9fe5d62840216fdd09c3b1dc6..837bb8af0ec307c212c985a39c63d4e1cb407b34
@@@ -541,10 -541,13 +541,13 @@@ void can_rx_unregister(struct net *net
  
        /* Check for bugs in CAN protocol implementations using af_can.c:
         * 'rcv' will be NULL if no matching list item was found for removal.
+        * As this case may potentially happen when closing a socket while
+        * the notifier for removing the CAN netdev is running we just print
+        * a warning here.
         */
        if (!rcv) {
-               WARN(1, "BUG: receive list entry not found for dev %s, id %03X, mask %03X\n",
-                    DNAME(dev), can_id, mask);
+               pr_warn("can: receive list entry not found for dev %s, id %03X, mask %03X\n",
+                       DNAME(dev), can_id, mask);
                goto out;
        }
  
@@@ -888,7 -891,7 +891,7 @@@ static __init int can_init(void
        int err;
  
        /* check for correct padding to be able to use the structs similarly */
 -      BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) !=
 +      BUILD_BUG_ON(offsetof(struct can_frame, len) !=
                     offsetof(struct canfd_frame, len) ||
                     offsetof(struct can_frame, data) !=
                     offsetof(struct canfd_frame, data));
diff --combined net/core/devlink.c
index 7c05e8603bffff5692c7ee38ce59cbd5cff79255,8c5ddffd707defd66da8c7b6cda530c61a37196d..88c0ac8ed444a57729f7a118fbf7ac9874c9a036
@@@ -517,7 -517,7 +517,7 @@@ devlink_reload_limit_is_supported(struc
        return test_bit(limit, &devlink->ops->reload_limits);
  }
  
- static int devlink_reload_stat_put(struct sk_buff *msg, enum devlink_reload_action action,
+ static int devlink_reload_stat_put(struct sk_buff *msg,
                                   enum devlink_reload_limit limit, u32 value)
  {
        struct nlattr *reload_stats_entry;
        if (!reload_stats_entry)
                return -EMSGSIZE;
  
-       if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, action) ||
-           nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
+       if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_STATS_LIMIT, limit) ||
            nla_put_u32(msg, DEVLINK_ATTR_RELOAD_STATS_VALUE, value))
                goto nla_put_failure;
        nla_nest_end(msg, reload_stats_entry);
@@@ -540,7 -539,7 +539,7 @@@ nla_put_failure
  
  static int devlink_reload_stats_put(struct sk_buff *msg, struct devlink *devlink, bool is_remote)
  {
-       struct nlattr *reload_stats_attr;
+       struct nlattr *reload_stats_attr, *act_info, *act_stats;
        int i, j, stat_idx;
        u32 value;
  
        if (!reload_stats_attr)
                return -EMSGSIZE;
  
-       for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
-               /* Remote stats are shown even if not locally supported. Stats
-                * of actions with unspecified limit are shown though drivers
-                * don't need to register unspecified limit.
-                */
-               if (!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
-                   !devlink_reload_limit_is_supported(devlink, j))
+       for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
+               if ((!is_remote &&
+                    !devlink_reload_action_is_supported(devlink, i)) ||
+                   i == DEVLINK_RELOAD_ACTION_UNSPEC)
                        continue;
-               for (i = 0; i <= DEVLINK_RELOAD_ACTION_MAX; i++) {
-                       if ((!is_remote && !devlink_reload_action_is_supported(devlink, i)) ||
-                           i == DEVLINK_RELOAD_ACTION_UNSPEC ||
+               act_info = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_INFO);
+               if (!act_info)
+                       goto nla_put_failure;
+               if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_ACTION, i))
+                       goto action_info_nest_cancel;
+               act_stats = nla_nest_start(msg, DEVLINK_ATTR_RELOAD_ACTION_STATS);
+               if (!act_stats)
+                       goto action_info_nest_cancel;
+               for (j = 0; j <= DEVLINK_RELOAD_LIMIT_MAX; j++) {
+                       /* Remote stats are shown even if not locally supported.
+                        * Stats of actions with unspecified limit are shown
+                        * though drivers don't need to register unspecified
+                        * limit.
+                        */
+                       if ((!is_remote && j != DEVLINK_RELOAD_LIMIT_UNSPEC &&
+                            !devlink_reload_limit_is_supported(devlink, j)) ||
                            devlink_reload_combination_is_invalid(i, j))
                                continue;
  
                                value = devlink->stats.reload_stats[stat_idx];
                        else
                                value = devlink->stats.remote_reload_stats[stat_idx];
-                       if (devlink_reload_stat_put(msg, i, j, value))
-                               goto nla_put_failure;
+                       if (devlink_reload_stat_put(msg, j, value))
+                               goto action_stats_nest_cancel;
                }
+               nla_nest_end(msg, act_stats);
+               nla_nest_end(msg, act_info);
        }
        nla_nest_end(msg, reload_stats_attr);
        return 0;
  
+ action_stats_nest_cancel:
+       nla_nest_cancel(msg, act_stats);
+ action_info_nest_cancel:
+       nla_nest_cancel(msg, act_info);
  nla_put_failure:
        nla_nest_cancel(msg, reload_stats_attr);
        return -EMSGSIZE;
@@@ -755,6 -772,8 +772,8 @@@ static int devlink_nl_port_fill(struct 
        if (nla_put_u32(msg, DEVLINK_ATTR_PORT_INDEX, devlink_port->index))
                goto nla_put_failure;
  
+       /* Hold rtnl lock while accessing port's netdev attributes. */
+       rtnl_lock();
        spin_lock_bh(&devlink_port->type_lock);
        if (nla_put_u16(msg, DEVLINK_ATTR_PORT_TYPE, devlink_port->type))
                goto nla_put_failure_type_locked;
                        devlink_port->desired_type))
                goto nla_put_failure_type_locked;
        if (devlink_port->type == DEVLINK_PORT_TYPE_ETH) {
+               struct net *net = devlink_net(devlink_port->devlink);
                struct net_device *netdev = devlink_port->type_dev;
  
-               if (netdev &&
+               if (netdev && net_eq(net, dev_net(netdev)) &&
                    (nla_put_u32(msg, DEVLINK_ATTR_PORT_NETDEV_IFINDEX,
                                 netdev->ifindex) ||
                     nla_put_string(msg, DEVLINK_ATTR_PORT_NETDEV_NAME,
                        goto nla_put_failure_type_locked;
        }
        spin_unlock_bh(&devlink_port->type_lock);
+       rtnl_unlock();
        if (devlink_nl_port_attrs_put(msg, devlink_port))
                goto nla_put_failure;
        if (devlink_nl_port_function_attrs_put(msg, devlink_port, extack))
  
  nla_put_failure_type_locked:
        spin_unlock_bh(&devlink_port->type_lock);
+       rtnl_unlock();
  nla_put_failure:
        genlmsg_cancel(msg, hdr);
        return -EMSGSIZE;
@@@ -3372,7 -3394,7 +3394,7 @@@ out_free_msg
        nlmsg_free(msg);
  }
  
 -void devlink_flash_update_begin_notify(struct devlink *devlink)
 +static void devlink_flash_update_begin_notify(struct devlink *devlink)
  {
        struct devlink_flash_notify params = { 0 };
  
                                      DEVLINK_CMD_FLASH_UPDATE,
                                      &params);
  }
 -EXPORT_SYMBOL_GPL(devlink_flash_update_begin_notify);
  
 -void devlink_flash_update_end_notify(struct devlink *devlink)
 +static void devlink_flash_update_end_notify(struct devlink *devlink)
  {
        struct devlink_flash_notify params = { 0 };
  
                                      DEVLINK_CMD_FLASH_UPDATE_END,
                                      &params);
  }
 -EXPORT_SYMBOL_GPL(devlink_flash_update_end_notify);
  
  void devlink_flash_update_status_notify(struct devlink *devlink,
                                        const char *status_msg,
@@@ -3429,12 -3453,10 +3451,12 @@@ EXPORT_SYMBOL_GPL(devlink_flash_update_
  static int devlink_nl_cmd_flash_update(struct sk_buff *skb,
                                       struct genl_info *info)
  {
 -      struct nlattr *nla_component, *nla_overwrite_mask;
 +      struct nlattr *nla_component, *nla_overwrite_mask, *nla_file_name;
        struct devlink_flash_update_params params = {};
        struct devlink *devlink = info->user_ptr[0];
 +      const char *file_name;
        u32 supported_params;
 +      int ret;
  
        if (!devlink->ops->flash_update)
                return -EOPNOTSUPP;
  
        supported_params = devlink->ops->supported_flash_update_params;
  
 -      params.file_name = nla_data(info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME]);
 -
        nla_component = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_COMPONENT];
        if (nla_component) {
                if (!(supported_params & DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT)) {
                params.overwrite_mask = sections.value & sections.selector;
        }
  
 -      return devlink->ops->flash_update(devlink, &params, info->extack);
 +      nla_file_name = info->attrs[DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME];
 +      file_name = nla_data(nla_file_name);
 +      ret = request_firmware(&params.fw, file_name, devlink->dev);
 +      if (ret) {
 +              NL_SET_ERR_MSG_ATTR(info->extack, nla_file_name, "failed to locate the requested firmware file");
 +              return ret;
 +      }
 +
 +      devlink_flash_update_begin_notify(devlink);
 +      ret = devlink->ops->flash_update(devlink, &params, info->extack);
 +      devlink_flash_update_end_notify(devlink);
 +
 +      release_firmware(params.fw);
 +
 +      return ret;
  }
  
  static const struct devlink_param devlink_param_generic[] = {
@@@ -9490,7 -9500,6 +9512,7 @@@ static const struct devlink_trap devlin
        DEVLINK_TRAP(DCCP_PARSING, DROP),
        DEVLINK_TRAP(GTP_PARSING, DROP),
        DEVLINK_TRAP(ESP_PARSING, DROP),
 +      DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
  };
  
  #define DEVLINK_TRAP_GROUP(_id)                                                     \
@@@ -10240,18 -10249,12 +10262,18 @@@ int devlink_compat_flash_update(struct 
                goto out;
        }
  
 -      params.file_name = file_name;
 +      ret = request_firmware(&params.fw, file_name, devlink->dev);
 +      if (ret)
 +              goto out;
  
        mutex_lock(&devlink->lock);
 +      devlink_flash_update_begin_notify(devlink);
        ret = devlink->ops->flash_update(devlink, &params, NULL);
 +      devlink_flash_update_end_notify(devlink);
        mutex_unlock(&devlink->lock);
  
 +      release_firmware(params.fw);
 +
  out:
        rtnl_lock();
        dev_put(dev);
diff --combined net/core/skbuff.c
index effa19da8681e3729e3a4031be83b26b1518abfb,06c526e0d810e4cd948c4d7954d8b5283345e370..ad98265f1dd165998dae9338d90e25f6752f5044
@@@ -249,9 -249,6 +249,9 @@@ struct sk_buff *__alloc_skb(unsigned in
  
                fclones->skb2.fclone = SKB_FCLONE_CLONE;
        }
 +
 +      skb_set_kcov_handle(skb, kcov_common_handle());
 +
  out:
        return skb;
  nodata:
@@@ -285,8 -282,6 +285,8 @@@ static struct sk_buff *__build_skb_arou
        memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
        atomic_set(&shinfo->dataref, 1);
  
 +      skb_set_kcov_handle(skb, kcov_common_handle());
 +
        return skb;
  }
  
@@@ -842,7 -837,7 +842,7 @@@ EXPORT_SYMBOL(consume_skb)
  #endif
  
  /**
 - *    consume_stateless_skb - free an skbuff, assuming it is stateless
 + *    __consume_stateless_skb - free an skbuff, assuming it is stateless
   *    @skb: buffer to free
   *
   *    Alike consume_skb(), but this variant assumes that this is the last
@@@ -902,8 -897,6 +902,8 @@@ void napi_consume_skb(struct sk_buff *s
                return;
        }
  
 +      lockdep_assert_in_softirq();
 +
        if (!skb_unref(skb))
                return;
  
@@@ -4210,9 -4203,6 +4210,9 @@@ static const u8 skb_ext_type_len[] = 
  #if IS_ENABLED(CONFIG_MPTCP)
        [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
  #endif
 +#if IS_ENABLED(CONFIG_KCOV)
 +      [SKB_EXT_KCOV_HANDLE] = SKB_EXT_CHUNKSIZEOF(u64),
 +#endif
  };
  
  static __always_inline unsigned int skb_ext_total_length(void)
  #endif
  #if IS_ENABLED(CONFIG_MPTCP)
                skb_ext_type_len[SKB_EXT_MPTCP] +
 +#endif
 +#if IS_ENABLED(CONFIG_KCOV)
 +              skb_ext_type_len[SKB_EXT_KCOV_HANDLE] +
  #endif
                0;
  }
@@@ -4562,7 -4549,7 +4562,7 @@@ struct sk_buff *sock_dequeue_err_skb(st
        if (skb && (skb_next = skb_peek(q))) {
                icmp_next = is_icmp_err_skb(skb_next);
                if (icmp_next)
-                       sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
+                       sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
        }
        spin_unlock_irqrestore(&q->lock, flags);
  
@@@ -5443,8 -5430,7 +5443,8 @@@ struct sk_buff *skb_vlan_untag(struct s
                goto err_free;
  
        skb_reset_network_header(skb);
 -      skb_reset_transport_header(skb);
 +      if (!skb_transport_header_was_set(skb))
 +              skb_reset_transport_header(skb);
        skb_reset_mac_len(skb);
  
        return skb;
diff --combined net/ipv4/tcp_ipv4.c
index c2d5132c523c8d4258e407c4cc5112840b07805e,8391aa29e7a41ecb06c3feed13e038b827a7ab11..e4b31e70bd301cd07f520d07684e5e5f94b18f74
@@@ -980,17 -980,22 +980,22 @@@ static int tcp_v4_send_synack(const str
  
        skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
  
-       tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
-                       tcp_rsk(req)->syn_tos : inet_sk(sk)->tos;
        if (skb) {
                __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
  
+               tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
+                               tcp_rsk(req)->syn_tos & ~INET_ECN_MASK :
+                               inet_sk(sk)->tos;
+               if (!INET_ECN_is_capable(tos) &&
+                   tcp_bpf_ca_needs_ecn((struct sock *)req))
+                       tos |= INET_ECN_ECT_0;
                rcu_read_lock();
                err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
                                            ireq->ir_rmt_addr,
                                            rcu_dereference(ireq->ireq_opt),
-                                           tos & ~INET_ECN_MASK);
+                                           tos);
                rcu_read_unlock();
                err = net_xmit_eval(err);
        }
@@@ -1498,6 -1503,7 +1503,7 @@@ struct sock *tcp_v4_syn_recv_sock(cons
                                  bool *own_req)
  {
        struct inet_request_sock *ireq;
+       bool found_dup_sk = false;
        struct inet_sock *newinet;
        struct tcp_sock *newtp;
        struct sock *newsk;
  
        if (__inet_inherit_port(sk, newsk) < 0)
                goto put_and_exit;
-       *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
+       *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
+                                      &found_dup_sk);
        if (likely(*own_req)) {
                tcp_move_syn(newtp, req);
                ireq->ireq_opt = NULL;
        } else {
-               newinet->inet_opt = NULL;
+               if (!req_unhash && found_dup_sk) {
+                       /* This code path should only be executed in the
+                        * syncookie case only
+                        */
+                       bh_unlock_sock(newsk);
+                       sock_put(newsk);
+                       newsk = NULL;
+               } else {
+                       newinet->inet_opt = NULL;
+               }
        }
        return newsk;
  
@@@ -2740,20 -2756,6 +2756,20 @@@ void tcp4_proc_exit(void
  }
  #endif /* CONFIG_PROC_FS */
  
 +/* @wake is one when sk_stream_write_space() calls us.
 + * This sends EPOLLOUT only if notsent_bytes is half the limit.
 + * This mimics the strategy used in sock_def_write_space().
 + */
 +bool tcp_stream_memory_free(const struct sock *sk, int wake)
 +{
 +      const struct tcp_sock *tp = tcp_sk(sk);
 +      u32 notsent_bytes = READ_ONCE(tp->write_seq) -
 +                          READ_ONCE(tp->snd_nxt);
 +
 +      return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
 +}
 +EXPORT_SYMBOL(tcp_stream_memory_free);
 +
  struct proto tcp_prot = {
        .name                   = "TCP",
        .owner                  = THIS_MODULE,
diff --combined net/mptcp/subflow.c
index 4d8abff1be1830adef97534de701829d49b3939b,953906e407428a6432bc35a945dade5047ee1bfd..2e5c3f4da3a4bf36db0110758e6fe4fad58c560b
@@@ -543,9 -543,8 +543,8 @@@ create_msk
                        fallback = true;
        } else if (subflow_req->mp_join) {
                mptcp_get_options(skb, &mp_opt);
-               if (!mp_opt.mp_join ||
-                   !mptcp_can_accept_new_subflow(subflow_req->msk) ||
-                   !subflow_hmac_valid(req, &mp_opt)) {
+               if (!mp_opt.mp_join || !subflow_hmac_valid(req, &mp_opt) ||
+                   !mptcp_can_accept_new_subflow(subflow_req->msk)) {
                        SUBFLOW_REQ_INC_STATS(req, MPTCP_MIB_JOINACKMAC);
                        fallback = true;
                }
@@@ -578,10 -577,6 +577,10 @@@ create_child
                         */
                        inet_sk_state_store((void *)new_msk, TCP_ESTABLISHED);
  
 +                      /* link the newly created socket to the msk */
 +                      mptcp_add_pending_subflow(mptcp_sk(new_msk), ctx);
 +                      WRITE_ONCE(mptcp_sk(new_msk)->first, child);
 +
                        /* new mpc subflow takes ownership of the newly
                         * created mptcp socket
                         */
@@@ -850,6 -845,8 +849,6 @@@ static void mptcp_subflow_discard_data(
                sk_eat_skb(ssk, skb);
        if (mptcp_subflow_get_map_offset(subflow) >= subflow->map_data_len)
                subflow->map_valid = 0;
 -      if (incr)
 -              tcp_cleanup_rbuf(ssk, incr);
  }
  
  static bool subflow_check_data_avail(struct sock *ssk)
@@@ -971,7 -968,7 +970,7 @@@ void mptcp_space(const struct sock *ssk
        const struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk);
        const struct sock *sk = subflow->conn;
  
 -      *space = tcp_space(sk);
 +      *space = __mptcp_space(sk);
        *full_space = tcp_full_space(sk);
  }
  
@@@ -999,16 -996,17 +998,16 @@@ static void subflow_data_ready(struct s
  static void subflow_write_space(struct sock *sk)
  {
        struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
 +      struct socket *sock = READ_ONCE(sk->sk_socket);
        struct sock *parent = subflow->conn;
  
        if (!sk_stream_is_writeable(sk))
                return;
  
 -      if (sk_stream_is_writeable(parent)) {
 -              set_bit(MPTCP_SEND_SPACE, &mptcp_sk(parent)->flags);
 -              smp_mb__after_atomic();
 -              /* set SEND_SPACE before sk_stream_write_space clears NOSPACE */
 -              sk_stream_write_space(parent);
 -      }
 +      if (sock && sk_stream_is_writeable(parent))
 +              clear_bit(SOCK_NOSPACE, &sock->flags);
 +
 +      sk_stream_write_space(parent);
  }
  
  static struct inet_connection_sock_af_ops *
@@@ -1126,11 -1124,13 +1125,11 @@@ int __mptcp_subflow_connect(struct soc
        if (err && err != -EINPROGRESS)
                goto failed;
  
 -      spin_lock_bh(&msk->join_list_lock);
 -      list_add_tail(&subflow->node, &msk->join_list);
 -      spin_unlock_bh(&msk->join_list_lock);
 -
 +      mptcp_add_pending_subflow(msk, subflow);
        return err;
  
  failed:
 +      subflow->disposable = 1;
        sock_release(sf);
        return err;
  }
@@@ -1253,6 -1253,7 +1252,6 @@@ static void subflow_state_change(struc
                mptcp_data_ready(parent, sk);
  
        if (__mptcp_check_fallback(mptcp_sk(parent)) &&
 -          !(parent->sk_shutdown & RCV_SHUTDOWN) &&
            !subflow->rx_eof && subflow_is_done(sk)) {
                subflow->rx_eof = 1;
                mptcp_subflow_eof(parent);
        return err;
  }
  
 -static void subflow_ulp_release(struct sock *sk)
 +static void subflow_ulp_release(struct sock *ssk)
  {
 -      struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(sk);
 +      struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk);
 +      bool release = true;
 +      struct sock *sk;
  
        if (!ctx)
                return;
  
 -      if (ctx->conn)
 -              sock_put(ctx->conn);
 +      sk = ctx->conn;
 +      if (sk) {
 +              /* if the msk has been orphaned, keep the ctx
 +               * alive, will be freed by mptcp_done()
 +               */
 +              release = ctx->disposable;
 +              sock_put(sk);
 +      }
  
 -      kfree_rcu(ctx, rcu);
 +      if (release)
 +              kfree_rcu(ctx, rcu);
  }
  
  static void subflow_ulp_clone(const struct request_sock *req,
diff --combined net/packet/af_packet.c
index 48a0ed836b463aa34b89c4718daa9653e4e7d5d0,7a18ffff855140e07dd64565f480dc0e8b95bd54..a667b19eab7870d599b13b1d3b7a9287ba8d1ee4
@@@ -46,7 -46,6 +46,7 @@@
   *                                    Copyright (C) 2011, <[email protected]>
   */
  
 +#include <linux/ethtool.h>
  #include <linux/types.h>
  #include <linux/mm.h>
  #include <linux/capability.h>
@@@ -94,8 -93,8 +94,8 @@@
  
  /*
     Assumptions:
-    - If the device has no dev->header_ops, there is no LL header visible
-      above the device. In this case, its hard_header_len should be 0.
+    - If the device has no dev->header_ops->create, there is no LL header
+      visible above the device. In this case, its hard_header_len should be 0.
       The device may prepend its own header internally. In this case, its
       needed_headroom should be set to the space needed for it to add its
       internal header.
  On receive:
  -----------
  
- Incoming, dev->header_ops != NULL
+ Incoming, dev_has_header(dev) == true
     mac_header -> ll header
     data       -> data
  
- Outgoing, dev->header_ops != NULL
+ Outgoing, dev_has_header(dev) == true
     mac_header -> ll header
     data       -> ll header
  
- Incoming, dev->header_ops == NULL
+ Incoming, dev_has_header(dev) == false
     mac_header -> data
       However drivers often make it point to the ll header.
       This is incorrect because the ll header should be invisible to us.
     data       -> data
  
- Outgoing, dev->header_ops == NULL
+ Outgoing, dev_has_header(dev) == false
     mac_header -> data. ll header is invisible to us.
     data       -> data
  
  Resume
-   If dev->header_ops == NULL we are unable to restore the ll header,
+   If dev_has_header(dev) == false we are unable to restore the ll header,
      because it is invisible to us.
  
  
@@@ -1637,15 -1636,13 +1637,15 @@@ static bool fanout_find_new_id(struct s
        return false;
  }
  
 -static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 +static int fanout_add(struct sock *sk, struct fanout_args *args)
  {
        struct packet_rollover *rollover = NULL;
        struct packet_sock *po = pkt_sk(sk);
 +      u16 type_flags = args->type_flags;
        struct packet_fanout *f, *match;
        u8 type = type_flags & 0xff;
        u8 flags = type_flags >> 8;
 +      u16 id = args->id;
        int err;
  
        switch (type) {
                }
        }
        err = -EINVAL;
 -      if (match && match->flags != flags)
 -              goto out;
 -      if (!match) {
 +      if (match) {
 +              if (match->flags != flags)
 +                      goto out;
 +              if (args->max_num_members &&
 +                  args->max_num_members != match->max_num_members)
 +                      goto out;
 +      } else {
 +              if (args->max_num_members > PACKET_FANOUT_MAX)
 +                      goto out;
 +              if (!args->max_num_members)
 +                      /* legacy PACKET_FANOUT_MAX */
 +                      args->max_num_members = 256;
                err = -ENOMEM;
 -              match = kzalloc(sizeof(*match), GFP_KERNEL);
 +              match = kvzalloc(struct_size(match, arr, args->max_num_members),
 +                               GFP_KERNEL);
                if (!match)
                        goto out;
                write_pnet(&match->net, sock_net(sk));
                match->prot_hook.func = packet_rcv_fanout;
                match->prot_hook.af_packet_priv = match;
                match->prot_hook.id_match = match_fanout_group;
 +              match->max_num_members = args->max_num_members;
                list_add(&match->list, &fanout_list);
        }
        err = -EINVAL;
            match->prot_hook.type == po->prot_hook.type &&
            match->prot_hook.dev == po->prot_hook.dev) {
                err = -ENOSPC;
 -              if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
 +              if (refcount_read(&match->sk_ref) < match->max_num_members) {
                        __dev_remove_pack(&po->prot_hook);
                        po->fanout = match;
                        po->rollover = rollover;
  
        if (err && !refcount_read(&match->sk_ref)) {
                list_del(&match->list);
 -              kfree(match);
 +              kvfree(match);
        }
  
  out:
@@@ -2083,7 -2069,7 +2083,7 @@@ static int packet_rcv(struct sk_buff *s
  
        skb->dev = dev;
  
-       if (dev->header_ops) {
+       if (dev_has_header(dev)) {
                /* The device has an explicit notion of ll header,
                 * exported to higher levels.
                 *
@@@ -2212,7 -2198,7 +2212,7 @@@ static int tpacket_rcv(struct sk_buff *
        if (!net_eq(dev_net(dev), sock_net(sk)))
                goto drop;
  
-       if (dev->header_ops) {
+       if (dev_has_header(dev)) {
                if (sk->sk_type != SOCK_DGRAM)
                        skb_push(skb, skb->data - skb_mac_header(skb));
                else if (skb->pkt_type == PACKET_OUTGOING) {
@@@ -3089,7 -3075,7 +3089,7 @@@ static int packet_release(struct socke
        kfree(po->rollover);
        if (f) {
                fanout_release_data(f);
 -              kfree(f);
 +              kvfree(f);
        }
        /*
         *      Now the socket is dead. No more input will appear.
@@@ -3880,14 -3866,14 +3880,14 @@@ packet_setsockopt(struct socket *sock, 
        }
        case PACKET_FANOUT:
        {
 -              int val;
 +              struct fanout_args args = { 0 };
  
 -              if (optlen != sizeof(val))
 +              if (optlen != sizeof(int) && optlen != sizeof(args))
                        return -EINVAL;
 -              if (copy_from_sockptr(&val, optval, sizeof(val)))
 +              if (copy_from_sockptr(&args, optval, optlen))
                        return -EFAULT;
  
 -              return fanout_add(sk, val & 0xffff, val >> 16);
 +              return fanout_add(sk, &args);
        }
        case PACKET_FANOUT_DATA:
        {
diff --combined net/tls/tls_device.c
index 6f93ad5b7200a3c92bcd7906890a7186b47b6f32,a3ab2d3d4e4eac56426b23d87d83fafb18c4c0d7..6cc9fe77835613e7a070a0b20d6bc1240d2bc1c5
@@@ -327,7 -327,7 +327,7 @@@ static int tls_device_record_close(stru
        /* fill prepend */
        tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]),
                         record->len - prot->overhead_size,
 -                       record_type, prot->version);
 +                       record_type);
        return ret;
  }
  
@@@ -1262,6 -1262,8 +1262,8 @@@ void tls_device_offload_cleanup_rx(stru
        if (tls_ctx->tx_conf != TLS_HW) {
                dev_put(netdev);
                tls_ctx->netdev = NULL;
+       } else {
+               set_bit(TLS_RX_DEV_CLOSED, &tls_ctx->flags);
        }
  out:
        up_read(&device_offload_lock);
@@@ -1291,7 -1293,8 +1293,8 @@@ static int tls_device_down(struct net_d
                if (ctx->tx_conf == TLS_HW)
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_TX);
-               if (ctx->rx_conf == TLS_HW)
+               if (ctx->rx_conf == TLS_HW &&
+                   !test_bit(TLS_RX_DEV_CLOSED, &ctx->flags))
                        netdev->tlsdev_ops->tls_dev_del(netdev, ctx,
                                                        TLS_OFFLOAD_CTX_DIR_RX);
                WRITE_ONCE(ctx->netdev, NULL);
diff --combined net/tls/tls_sw.c
index 53106f02def2deaa1c85d2a91d557ef21b63bce4,845c628ac1b2717f6c9a4ef855c64924f0c847a0..01d933ae5f164ef3343bc2a5813bb4d13b065c38
@@@ -505,7 -505,7 +505,7 @@@ static int tls_do_encryption(struct soc
        memcpy(&rec->iv_data[iv_offset], tls_ctx->tx.iv,
               prot->iv_size + prot->salt_size);
  
 -      xor_iv_with_seq(prot->version, rec->iv_data, tls_ctx->tx.rec_seq);
 +      xor_iv_with_seq(prot, rec->iv_data, tls_ctx->tx.rec_seq);
  
        sge->offset += prot->prepend_size;
        sge->length -= prot->prepend_size;
@@@ -748,13 -748,14 +748,13 @@@ static int tls_push_record(struct sock 
        sg_chain(rec->sg_aead_out, 2, &msg_en->sg.data[i]);
  
        tls_make_aad(rec->aad_space, msg_pl->sg.size + prot->tail_size,
 -                   tls_ctx->tx.rec_seq, prot->rec_seq_size,
 -                   record_type, prot->version);
 +                   tls_ctx->tx.rec_seq, record_type, prot);
  
        tls_fill_prepend(tls_ctx,
                         page_address(sg_page(&msg_en->sg.data[i])) +
                         msg_en->sg.data[i].offset,
                         msg_pl->sg.size + prot->tail_size,
 -                       record_type, prot->version);
 +                       record_type);
  
        tls_ctx->pending_open_record_frags = false;
  
@@@ -1294,6 -1295,12 +1294,12 @@@ static struct sk_buff *tls_wait_data(st
                        return NULL;
                }
  
+               if (!skb_queue_empty(&sk->sk_receive_queue)) {
+                       __strp_unpause(&ctx->strp);
+                       if (ctx->recv_pkt)
+                               return ctx->recv_pkt;
+               }
                if (sk->sk_shutdown & RCV_SHUTDOWN)
                        return NULL;
  
@@@ -1464,19 -1471,19 +1470,19 @@@ static int decrypt_internal(struct soc
                kfree(mem);
                return err;
        }
 -      if (prot->version == TLS_1_3_VERSION)
 +      if (prot->version == TLS_1_3_VERSION ||
 +          prot->cipher_type == TLS_CIPHER_CHACHA20_POLY1305)
                memcpy(iv + iv_offset, tls_ctx->rx.iv,
                       crypto_aead_ivsize(ctx->aead_recv));
        else
                memcpy(iv + iv_offset, tls_ctx->rx.iv, prot->salt_size);
  
 -      xor_iv_with_seq(prot->version, iv, tls_ctx->rx.rec_seq);
 +      xor_iv_with_seq(prot, iv, tls_ctx->rx.rec_seq);
  
        /* Prepare AAD */
        tls_make_aad(aad, rxm->full_len - prot->overhead_size +
                     prot->tail_size,
 -                   tls_ctx->rx.rec_seq, prot->rec_seq_size,
 -                   ctx->control, prot->version);
 +                   tls_ctx->rx.rec_seq, ctx->control, prot);
  
        /* Prepare sgin */
        sg_init_table(sgin, n_sgin);
@@@ -2069,8 -2076,7 +2075,8 @@@ static int tls_read_size(struct strpars
        data_len = ((header[4] & 0xFF) | (header[3] << 8));
  
        cipher_overhead = prot->tag_size;
 -      if (prot->version != TLS_1_3_VERSION)
 +      if (prot->version != TLS_1_3_VERSION &&
 +          prot->cipher_type != TLS_CIPHER_CHACHA20_POLY1305)
                cipher_overhead += prot->iv_size;
  
        if (data_len > TLS_MAX_PAYLOAD_SIZE + cipher_overhead +
@@@ -2290,7 -2296,6 +2296,7 @@@ int tls_set_sw_offload(struct sock *sk
        struct tls12_crypto_info_aes_gcm_128 *gcm_128_info;
        struct tls12_crypto_info_aes_gcm_256 *gcm_256_info;
        struct tls12_crypto_info_aes_ccm_128 *ccm_128_info;
 +      struct tls12_crypto_info_chacha20_poly1305 *chacha20_poly1305_info;
        struct tls_sw_context_tx *sw_ctx_tx = NULL;
        struct tls_sw_context_rx *sw_ctx_rx = NULL;
        struct cipher_context *cctx;
                cipher_name = "ccm(aes)";
                break;
        }
 +      case TLS_CIPHER_CHACHA20_POLY1305: {
 +              chacha20_poly1305_info = (void *)crypto_info;
 +              nonce_size = 0;
 +              tag_size = TLS_CIPHER_CHACHA20_POLY1305_TAG_SIZE;
 +              iv_size = TLS_CIPHER_CHACHA20_POLY1305_IV_SIZE;
 +              iv = chacha20_poly1305_info->iv;
 +              rec_seq_size = TLS_CIPHER_CHACHA20_POLY1305_REC_SEQ_SIZE;
 +              rec_seq = chacha20_poly1305_info->rec_seq;
 +              keysize = TLS_CIPHER_CHACHA20_POLY1305_KEY_SIZE;
 +              key = chacha20_poly1305_info->key;
 +              salt = chacha20_poly1305_info->salt;
 +              salt_size = TLS_CIPHER_CHACHA20_POLY1305_SALT_SIZE;
 +              cipher_name = "rfc7539(chacha20,poly1305)";
 +              break;
 +      }
        default:
                rc = -EINVAL;
                goto free_priv;
This page took 0.381573 seconds and 4 git commands to generate.