]> Git Repo - linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorDavid S. Miller <[email protected]>
Tue, 7 Feb 2017 21:29:30 +0000 (16:29 -0500)
committerDavid S. Miller <[email protected]>
Tue, 7 Feb 2017 21:29:30 +0000 (16:29 -0500)
The conflict was an interaction between a bug fix in the
netvsc driver in 'net' and an optimization of the RX path
in 'net-next'.

Signed-off-by: David S. Miller <[email protected]>
23 files changed:
1  2 
MAINTAINERS
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/hyperv/netvsc.c
drivers/net/tun.c
drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
drivers/vhost/vhost.c
include/net/sock.h
include/uapi/linux/seg6.h
net/core/dev.c
net/core/ethtool.c
net/ipv4/ip_sockglue.c
net/ipv4/tcp.c
net/ipv4/udp.c
net/ipv6/addrconf.c
net/ipv6/ip6_gre.c
net/ipv6/seg6_hmac.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/mesh.c
net/sctp/socket.c
net/wireless/nl80211.c

diff --combined MAINTAINERS
index a9368bba9b37de1079b9394f9446c9a145d4ab9b,187b9615e31a85d49fcd10ca96f4f20e19272652..93a41ef97f18cb0ac7fcc4a713211c087f3cfc10
  S:    Supported
  F:    drivers/net/ethernet/broadcom/bnx2x/
  
 +BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
 +M:    Michael Chan <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    drivers/net/ethernet/broadcom/bnxt/
 +
  BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
  M:    Florian Fainelli <[email protected]>
  M:    Ray Jui <[email protected]>
@@@ -6250,13 -6244,6 +6250,13 @@@ F:    include/net/cfg802154.
  F:    include/net/ieee802154_netdev.h
  F:    Documentation/networking/ieee802154.txt
  
 +IFE PROTOCOL
 +M:    Yotam Gigi <[email protected]>
 +M:    Jamal Hadi Salim <[email protected]>
 +F:    net/ife
 +F:    include/net/ife.h
 +F:    include/uapi/linux/ife.h
 +
  IGORPLUG-USB IR RECEIVER
  M:    Sean Young <[email protected]>
  L:    [email protected]
@@@ -9382,14 -9369,6 +9382,14 @@@ F:    drivers/video/fbdev/sti
  F:    drivers/video/console/sti*
  F:    drivers/video/logo/logo_parisc*
  
 +PARMAN
 +M:    Jiri Pirko <[email protected]>
 +L:    [email protected]
 +S:    Supported
 +F:    lib/parman.c
 +F:    lib/test_parman.c
 +F:    include/linux/parman.h
 +
  PC87360 HARDWARE MONITORING DRIVER
  M:    Jim Cromie <[email protected]>
  L:    [email protected]
  S:    Maintained
  F:    drivers/block/ps3vram.c
  
 +PSAMPLE PACKET SAMPLING SUPPORT:
 +M:    Yotam Gigi <[email protected]>
 +S:    Maintained
 +F:    net/psample
 +F:    include/net/psample.h
 +F:    include/uapi/linux/psample.h
 +
  PSTORE FILESYSTEM
  M:    Anton Vorontsov <[email protected]>
  M:    Colin Cross <[email protected]>
@@@ -10621,7 -10593,7 +10621,7 @@@ F:   drivers/net/wireless/realtek/rtlwifi
  F:    drivers/net/wireless/realtek/rtlwifi/rtl8192ce/
  
  RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
 -M:    Jes Sorensen <Jes.Sorensen@redhat.com>
 +M:    Jes Sorensen <Jes.Sorensen@gmail.com>
  L:    [email protected]
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jes/linux.git rtl8xxxu-devel
  S:    Maintained
@@@ -10887,13 -10859,6 +10887,13 @@@ S: Maintaine
  F:    drivers/staging/media/st-cec/
  F:    Documentation/devicetree/bindings/media/stih-cec.txt
  
 +SHARED MEMORY COMMUNICATIONS (SMC) SOCKETS
 +M:    Ursula Braun <[email protected]>
 +L:    [email protected]
 +W:    http://www.ibm.com/developerworks/linux/linux390/
 +S:    Supported
 +F:    net/smc/
 +
  SYNOPSYS DESIGNWARE DMAC DRIVER
  M:    Viresh Kumar <[email protected]>
  M:    Andy Shevchenko <[email protected]>
@@@ -10902,6 -10867,13 +10902,6 @@@ F:  include/linux/dma/dw.
  F:    include/linux/platform_data/dma-dw.h
  F:    drivers/dma/dw/
  
 -SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
 -M: Lars Persson <[email protected]>
 -S: Supported
 -F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
 -F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
 -
  SYNOPSYS DESIGNWARE I2C DRIVER
  M:    Jarkko Nikula <[email protected]>
  R:    Andy Shevchenko <[email protected]>
@@@ -11344,13 -11316,6 +11344,13 @@@ F: arch/arm/mach-s3c24xx/mach-bast.
  F:    arch/arm/mach-s3c24xx/bast-ide.c
  F:    arch/arm/mach-s3c24xx/bast-irq.c
  
 +SIPHASH PRF ROUTINES
 +M:    Jason A. Donenfeld <[email protected]>
 +S:    Maintained
 +F:    lib/siphash.c
 +F:    lib/test_siphash.c
 +F:    include/linux/siphash.h
 +
  TI DAVINCI MACHINE SUPPORT
  M:    Sekhar Nori <[email protected]>
  M:    Kevin Hilman <[email protected]>
@@@ -11922,7 -11887,6 +11922,7 @@@ F:   include/linux/swiotlb.
  
  SWITCHDEV
  M:    Jiri Pirko <[email protected]>
 +M:    Ivan Vecera <[email protected]>
  L:    [email protected]
  S:    Supported
  F:    net/switchdev/
@@@ -13101,7 -13065,7 +13101,7 @@@ F:   drivers/input/serio/userio.
  F:    include/uapi/linux/userio.h
  
  VIRTIO CONSOLE DRIVER
- M:    Amit Shah <amit[email protected]>
+ M:    Amit Shah <amit@kernel.org>
  L:    [email protected]
  S:    Maintained
  F:    drivers/char/virtio_console.c
index ca730d4abbb4bde8b80d635d7e86a0669480e20e,9aa4226919542f6496fedce45a09e09685433efe..c4d714fcc7dae759998a49a1f90f9ab1ee9bdda3
@@@ -902,7 -902,6 +902,7 @@@ mlx4_en_set_link_ksettings(struct net_d
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_ptys_reg ptys_reg;
        __be32 proto_admin;
 +      u8 cur_autoneg;
        int ret;
  
        u32 ptys_adv = ethtool2ptys_link_modes(
                return 0;
        }
  
 -      proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
 -              cpu_to_be32(ptys_adv) :
 -              speed_set_ptys_admin(priv, speed,
 -                                   ptys_reg.eth_proto_cap);
 +      cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
 +                              AUTONEG_DISABLE : AUTONEG_ENABLE;
 +
 +      if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
 +              proto_admin = speed_set_ptys_admin(priv, speed,
 +                                                 ptys_reg.eth_proto_cap);
 +              if ((be32_to_cpu(proto_admin) &
 +                   (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
 +                    MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
 +                  (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
 +                      ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
 +      } else {
 +              proto_admin = cpu_to_be32(ptys_adv);
 +              ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
 +      }
  
        proto_admin &= ptys_reg.eth_proto_cap;
        if (!proto_admin) {
                return -EINVAL; /* nothing to change due to bad input */
        }
  
 -      if (proto_admin == ptys_reg.eth_proto_admin)
 +      if ((proto_admin == ptys_reg.eth_proto_admin) &&
 +          ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
 +           (link_ksettings->base.autoneg == cur_autoneg)))
                return 0; /* Nothing to change */
  
        en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
@@@ -1113,7 -1099,7 +1113,7 @@@ static int mlx4_en_set_ringparam(struc
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        new_prof.tx_ring_size = tx_size;
        new_prof.rx_ring_size = rx_size;
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
  
@@@ -1788,7 -1774,7 +1788,7 @@@ static int mlx4_en_set_channels(struct 
        new_prof.tx_ring_num[TX_XDP] = xdp_count;
        new_prof.rx_ring_num = channel->rx_count;
  
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
  
        netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
        netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  
 -      if (dev->num_tc)
 +      if (netdev_get_num_tc(dev))
                mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
  
        en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
@@@ -1994,7 -1980,7 +1994,7 @@@ static int mlx4_en_get_module_info(stru
                modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
                break;
        default:
 -              return -ENOSYS;
 +              return -EINVAL;
        }
  
        return 0;
index 60a021c34881fecd1e1b3c6104051323e4413232,3b4961a8e8e44d6987ebd23f9239e747c7fc6cd5..748e9f65c386b6e49ce8112cda6f47af6047cba8
@@@ -1321,7 -1321,7 +1321,7 @@@ static void mlx4_en_tx_timeout(struct n
  }
  
  
 -static struct rtnl_link_stats64 *
 +static void
  mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        mlx4_en_fold_software_stats(dev);
        netdev_stats_to_stats64(stats, &dev->stats);
        spin_unlock_bh(&priv->stats_lock);
 -
 -      return stats;
  }
  
  static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@@ -1695,14 -1697,6 +1695,14 @@@ int mlx4_en_start_port(struct net_devic
                       priv->port, err);
                goto tx_err;
        }
 +
 +      err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
 +      if (err) {
 +              en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
 +                     dev->mtu, priv->port, err);
 +              goto tx_err;
 +      }
 +
        /* Set default qp number */
        err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
        if (err) {
@@@ -2048,6 -2042,8 +2048,8 @@@ static void mlx4_en_free_resources(stru
                        if (priv->tx_cq[t] && priv->tx_cq[t][i])
                                mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
                }
+               kfree(priv->tx_ring[t]);
+               kfree(priv->tx_cq[t]);
        }
  
        for (i = 0; i < priv->rx_ring_num; i++) {
@@@ -2190,9 -2186,11 +2192,11 @@@ static void mlx4_en_update_priv(struct 
  
  int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
                                struct mlx4_en_priv *tmp,
-                               struct mlx4_en_port_profile *prof)
+                               struct mlx4_en_port_profile *prof,
+                               bool carry_xdp_prog)
  {
-       int t;
+       struct bpf_prog *xdp_prog;
+       int i, t;
  
        mlx4_en_copy_priv(tmp, priv, prof);
  
                }
                return -ENOMEM;
        }
+       /* All rx_rings has the same xdp_prog.  Pick the first one. */
+       xdp_prog = rcu_dereference_protected(
+               priv->rx_ring[0]->xdp_prog,
+               lockdep_is_held(&priv->mdev->state_lock));
+       if (xdp_prog && carry_xdp_prog) {
+               xdp_prog = bpf_prog_add(xdp_prog, tmp->rx_ring_num);
+               if (IS_ERR(xdp_prog)) {
+                       mlx4_en_free_resources(tmp);
+                       return PTR_ERR(xdp_prog);
+               }
+               for (i = 0; i < tmp->rx_ring_num; i++)
+                       rcu_assign_pointer(tmp->rx_ring[i]->xdp_prog,
+                                          xdp_prog);
+       }
        return 0;
  }
  
@@@ -2220,7 -2235,6 +2241,6 @@@ void mlx4_en_destroy_netdev(struct net_
  {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
-       int t;
  
        en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
  
        mlx4_en_free_resources(priv);
        mutex_unlock(&mdev->state_lock);
  
-       for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
-               kfree(priv->tx_ring[t]);
-               kfree(priv->tx_cq[t]);
-       }
        free_netdev(dev);
  }
  
@@@ -2761,7 -2770,7 +2776,7 @@@ static int mlx4_xdp_set(struct net_devi
                en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
        }
  
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, false);
        if (err) {
                if (prog)
                        bpf_prog_sub(prog, priv->rx_ring_num - 1);
@@@ -3505,7 -3514,7 +3520,7 @@@ int mlx4_en_reset_config(struct net_dev
        memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
        memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
  
-       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+       err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true);
        if (err)
                goto out;
  
index f15ddba3659aac38471059c6bcbf050717946c4e,cc003fdf0ed929a981b1403f6a7d0099825fec4b..d85e6446f9d99e38c75b97d7fba29bd057e0a16f
@@@ -33,7 -33,6 +33,7 @@@
  
  #include <net/busy_poll.h>
  #include <linux/bpf.h>
 +#include <linux/bpf_trace.h>
  #include <linux/mlx4/cq.h>
  #include <linux/slab.h>
  #include <linux/mlx4/qp.h>
@@@ -515,8 -514,11 +515,11 @@@ void mlx4_en_recover_from_oom(struct ml
                return;
  
        for (ring = 0; ring < priv->rx_ring_num; ring++) {
-               if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+               if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
+                       local_bh_disable();
                        napi_reschedule(&priv->rx_cq[ring]->napi);
+                       local_bh_enable();
+               }
        }
  }
  
@@@ -707,8 -709,7 +710,8 @@@ static bool mlx4_en_refill_rx_buffers(s
        do {
                if (mlx4_en_prepare_rx_desc(priv, ring,
                                            ring->prod & ring->size_mask,
 -                                          GFP_ATOMIC | __GFP_COLD))
 +                                          GFP_ATOMIC | __GFP_COLD |
 +                                          __GFP_MEMALLOC))
                        break;
                ring->prod++;
        } while (--missing);
@@@ -927,12 -928,10 +930,12 @@@ int mlx4_en_process_rx_cq(struct net_de
                                                        length, cq->ring,
                                                        &doorbell_pending)))
                                        goto consumed;
 +                              trace_xdp_exception(dev, xdp_prog, act);
                                goto xdp_drop_no_cnt; /* Drop on xmit failure */
                        default:
                                bpf_warn_invalid_xdp_action(act);
                        case XDP_ABORTED:
 +                              trace_xdp_exception(dev, xdp_prog, act);
                        case XDP_DROP:
                                ring->xdp_drop++;
  xdp_drop_no_cnt:
index 5cfdb1a1b4c17c793c4b5e605b38919c6840904f,86e5749226ef4cf65d6070bca1ab0d4be35bf2e0..fd6ebbefd919344e0c7dab94316488a43bc3e2bc
@@@ -67,8 -67,14 +67,8 @@@ static struct netvsc_device *alloc_net_
        if (!net_device)
                return NULL;
  
 -      net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
 -      if (!net_device->cb_buffer) {
 -              kfree(net_device);
 -              return NULL;
 -      }
 -
 -      net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
 -                                       sizeof(struct recv_comp_data));
 +      net_device->chan_table[0].mrc.buf
 +              = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
  
        init_waitqueue_head(&net_device->wait_drain);
        net_device->destroy = false;
@@@ -85,28 -91,35 +85,28 @@@ static void free_netvsc_device(struct n
        int i;
  
        for (i = 0; i < VRSS_CHANNEL_MAX; i++)
 -              vfree(nvdev->mrc[i].buf);
 +              vfree(nvdev->chan_table[i].mrc.buf);
  
 -      kfree(nvdev->cb_buffer);
        kfree(nvdev);
  }
  
 -static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
 -{
 -      struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
  
 -      if (net_device && net_device->destroy)
 -              net_device = NULL;
 +static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
 +                                     u16 q_idx)
 +{
 +      const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
  
 -      return net_device;
 +      return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
 +              atomic_read(&nvchan->queue_sends) == 0;
  }
  
 -static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
 +static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
  {
        struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
  
 -      if (!net_device)
 -              goto get_in_err;
 -
 -      if (net_device->destroy &&
 -          atomic_read(&net_device->num_outstanding_sends) == 0 &&
 -          atomic_read(&net_device->num_outstanding_recvs) == 0)
 +      if (net_device && net_device->destroy)
                net_device = NULL;
  
 -get_in_err:
        return net_device;
  }
  
@@@ -571,6 -584,7 +571,6 @@@ void netvsc_device_remove(struct hv_dev
        vmbus_close(device->channel);
  
        /* Release all resources */
 -      vfree(net_device->sub_cb_buf);
        free_netvsc_device(net_device);
  }
  
@@@ -606,35 -620,29 +606,35 @@@ static void netvsc_send_tx_complete(str
        struct net_device *ndev = hv_get_drvdata(device);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
        struct vmbus_channel *channel = device->channel;
 -      int num_outstanding_sends;
        u16 q_idx = 0;
        int queue_sends;
  
        /* Notify the layer above us */
        if (likely(skb)) {
 -              struct hv_netvsc_packet *nvsc_packet
 +              const struct hv_netvsc_packet *packet
                        = (struct hv_netvsc_packet *)skb->cb;
 -              u32 send_index = nvsc_packet->send_buf_index;
 +              u32 send_index = packet->send_buf_index;
 +              struct netvsc_stats *tx_stats;
  
                if (send_index != NETVSC_INVALID_INDEX)
                        netvsc_free_send_slot(net_device, send_index);
 -              q_idx = nvsc_packet->q_idx;
 +              q_idx = packet->q_idx;
                channel = incoming_channel;
  
 +              tx_stats = &net_device->chan_table[q_idx].tx_stats;
 +
 +              u64_stats_update_begin(&tx_stats->syncp);
 +              tx_stats->packets += packet->total_packets;
 +              tx_stats->bytes += packet->total_bytes;
 +              u64_stats_update_end(&tx_stats->syncp);
 +
                dev_consume_skb_any(skb);
        }
  
 -      num_outstanding_sends =
 -              atomic_dec_return(&net_device->num_outstanding_sends);
 -      queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
 +      queue_sends =
 +              atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
  
 -      if (net_device->destroy && num_outstanding_sends == 0)
 +      if (net_device->destroy && queue_sends == 0)
                wake_up(&net_device->wait_drain);
  
        if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@@@ -680,15 -688,27 +680,15 @@@ static void netvsc_send_completion(stru
  
  static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
  {
 -      unsigned long index;
 -      u32 max_words = net_device->map_words;
 -      unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
 -      u32 section_cnt = net_device->send_section_cnt;
 -      int ret_val = NETVSC_INVALID_INDEX;
 -      int i;
 -      int prev_val;
 -
 -      for (i = 0; i < max_words; i++) {
 -              if (!~(map_addr[i]))
 -                      continue;
 -              index = ffz(map_addr[i]);
 -              prev_val = sync_test_and_set_bit(index, &map_addr[i]);
 -              if (prev_val)
 -                      continue;
 -              if ((index + (i * BITS_PER_LONG)) >= section_cnt)
 -                      break;
 -              ret_val = (index + (i * BITS_PER_LONG));
 -              break;
 +      unsigned long *map_addr = net_device->send_section_map;
 +      unsigned int i;
 +
 +      for_each_clear_bit(i, map_addr, net_device->map_words) {
 +              if (sync_test_and_set_bit(i, map_addr) == 0)
 +                      return i;
        }
 -      return ret_val;
 +
 +      return NETVSC_INVALID_INDEX;
  }
  
  static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
@@@ -745,11 -765,9 +745,11 @@@ static inline int netvsc_send_pkt
        struct sk_buff *skb)
  {
        struct nvsp_message nvmsg;
 -      u16 q_idx = packet->q_idx;
 -      struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
 +      struct netvsc_channel *nvchan
 +              = &net_device->chan_table[packet->q_idx];
 +      struct vmbus_channel *out_channel = nvchan->channel;
        struct net_device *ndev = hv_get_drvdata(device);
 +      struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
        u64 req_id;
        int ret;
        struct hv_page_buffer *pgbuf;
        }
  
        if (ret == 0) {
 -              atomic_inc(&net_device->num_outstanding_sends);
 -              atomic_inc(&net_device->queue_sends[q_idx]);
 -
 -              if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
 -                      netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
 +              atomic_inc_return(&nvchan->queue_sends);
  
 -                      if (atomic_read(&net_device->
 -                              queue_sends[q_idx]) < 1)
 -                              netif_tx_wake_queue(netdev_get_tx_queue(
 -                                                  ndev, q_idx));
 -              }
 +              if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
 +                      netif_tx_stop_queue(txq);
        } else if (ret == -EAGAIN) {
 -              netif_tx_stop_queue(netdev_get_tx_queue(
 -                                  ndev, q_idx));
 -              if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
 -                      netif_tx_wake_queue(netdev_get_tx_queue(
 -                                          ndev, q_idx));
 +              netif_tx_stop_queue(txq);
 +              if (atomic_read(&nvchan->queue_sends) < 1) {
 +                      netif_tx_wake_queue(txq);
                        ret = -ENOSPC;
                }
        } else {
@@@ -847,7 -874,8 +847,7 @@@ int netvsc_send(struct hv_device *devic
  {
        struct netvsc_device *net_device;
        int ret = 0;
 -      struct vmbus_channel *out_channel;
 -      u16 q_idx = packet->q_idx;
 +      struct netvsc_channel *nvchan;
        u32 pktlen = packet->total_data_buflen, msd_len = 0;
        unsigned int section_index = NETVSC_INVALID_INDEX;
        struct multi_send_data *msdp;
        if (!net_device->send_section_map)
                return -EAGAIN;
  
 -      out_channel = net_device->chn_table[q_idx];
 -
 +      nvchan = &net_device->chan_table[packet->q_idx];
        packet->send_buf_index = NETVSC_INVALID_INDEX;
        packet->cp_partial = false;
  
                goto send_now;
        }
  
 -      msdp = &net_device->msd[q_idx];
 -
        /* batch packets in send buffer if possible */
 +      msdp = &nvchan->msd;
        if (msdp->pkt)
                msd_len = msdp->pkt->total_data_buflen;
  
                        packet->total_data_buflen += msd_len;
                }
  
 +              if (msdp->pkt) {
 +                      packet->total_packets += msdp->pkt->total_packets;
 +                      packet->total_bytes += msdp->pkt->total_bytes;
 +              }
 +
                if (msdp->skb)
                        dev_consume_skb_any(msdp->skb);
  
@@@ -986,9 -1011,8 +986,9 @@@ static int netvsc_send_recv_completion(
  static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
                                        u32 *filled, u32 *avail)
  {
 -      u32 first = nvdev->mrc[q_idx].first;
 -      u32 next = nvdev->mrc[q_idx].next;
 +      struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
 +      u32 first = mrc->first;
 +      u32 next = mrc->next;
  
        *filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
                  next - first;
  static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
                                                         *nvdev, u16 q_idx)
  {
 +      struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        u32 filled, avail;
  
 -      if (!nvdev->mrc[q_idx].buf)
 +      if (unlikely(!mrc->buf))
                return NULL;
  
        count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
        if (!filled)
                return NULL;
  
 -      return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
 -             sizeof(struct recv_comp_data);
 +      return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
  }
  
  /* Put the first filled slot back to available pool */
  static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
  {
 +      struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        int num_recv;
  
 -      nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
 -                                NETVSC_RECVSLOT_MAX;
 +      mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
  
        num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
  
@@@ -1054,14 -1078,13 +1054,14 @@@ static void netvsc_chk_recv_comp(struc
  static inline struct recv_comp_data *get_recv_comp_slot(
        struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
  {
 +      struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
        u32 filled, avail, next;
        struct recv_comp_data *rcd;
  
 -      if (!nvdev->recv_section)
 +      if (unlikely(!nvdev->recv_section))
                return NULL;
  
 -      if (!nvdev->mrc[q_idx].buf)
 +      if (unlikely(!mrc->buf))
                return NULL;
  
        if (atomic_read(&nvdev->num_outstanding_recvs) >
        if (!avail)
                return NULL;
  
 -      next = nvdev->mrc[q_idx].next;
 -      rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
 -      nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
 +      next = mrc->next;
 +      rcd = mrc->buf + next * sizeof(struct recv_comp_data);
 +      mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
  
        atomic_inc(&nvdev->num_outstanding_recvs);
  
        return rcd;
  }
  
 -static void netvsc_receive(struct netvsc_device *net_device,
 -                      struct vmbus_channel *channel,
 -                      struct hv_device *device,
 -                      struct vmpacket_descriptor *packet)
 +static void netvsc_receive(struct net_device *ndev,
 +                 struct netvsc_device *net_device,
 +                 struct net_device_context *net_device_ctx,
 +                 struct hv_device *device,
 +                 struct vmbus_channel *channel,
 +                 struct vmtransfer_page_packet_header *vmxferpage_packet,
 +                 struct nvsp_message *nvsp)
  {
 -      struct vmtransfer_page_packet_header *vmxferpage_packet;
 -      struct nvsp_message *nvsp_packet;
 -      struct hv_netvsc_packet nv_pkt;
 -      struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
 +      char *recv_buf = net_device->recv_buf;
        u32 status = NVSP_STAT_SUCCESS;
        int i;
        int count = 0;
 -      struct net_device *ndev = hv_get_drvdata(device);
 -      void *data;
        int ret;
        struct recv_comp_data *rcd;
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
  
 -      /*
 -       * All inbound packets other than send completion should be xfer page
 -       * packet
 -       */
 -      if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
 -              netdev_err(ndev, "Unknown packet type received - %d\n",
 -                         packet->type);
 -              return;
 -      }
 -
 -      nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
 -                      (packet->offset8 << 3));
 -
        /* Make sure this is a valid nvsp packet */
 -      if (nvsp_packet->hdr.msg_type !=
 -          NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
 -              netdev_err(ndev, "Unknown nvsp packet type received-"
 -                      " %d\n", nvsp_packet->hdr.msg_type);
 +      if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
 +              netif_err(net_device_ctx, rx_err, ndev,
 +                        "Unknown nvsp packet type received %u\n",
 +                        nvsp->hdr.msg_type);
                return;
        }
  
 -      vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
 -
 -      if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
 -              netdev_err(ndev, "Invalid xfer page set id - "
 -                         "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
 -                         vmxferpage_packet->xfer_pageset_id);
 +      if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
 +              netif_err(net_device_ctx, rx_err, ndev,
 +                        "Invalid xfer page set id - expecting %x got %x\n",
 +                        NETVSC_RECEIVE_BUFFER_ID,
 +                        vmxferpage_packet->xfer_pageset_id);
                return;
        }
  
  
        /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
        for (i = 0; i < count; i++) {
 -              /* Initialize the netvsc packet */
 -              data = (void *)((unsigned long)net_device->
 -                      recv_buf + vmxferpage_packet->ranges[i].byte_offset);
 -              netvsc_packet->total_data_buflen =
 -                                      vmxferpage_packet->ranges[i].byte_count;
 +              void *data = recv_buf
 +                      + vmxferpage_packet->ranges[i].byte_offset;
 +              u32 buflen = vmxferpage_packet->ranges[i].byte_count;
  
                /* Pass it to the upper layer */
 -              status = rndis_filter_receive(device, netvsc_packet, &data,
 -                                            channel);
 +              status = rndis_filter_receive(ndev, net_device, device,
 +                                            channel, data, buflen);
        }
  
 -      if (!net_device->mrc[q_idx].buf) {
 +      if (!net_device->chan_table[q_idx].mrc.buf) {
                ret = netvsc_send_recv_completion(channel,
                                                  vmxferpage_packet->d.trans_id,
                                                  status);
@@@ -1202,10 -1243,11 +1202,10 @@@ static void netvsc_process_raw_pkt(stru
                                   u64 request_id,
                                   struct vmpacket_descriptor *desc)
  {
 -      struct nvsp_message *nvmsg;
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
 -
 -      nvmsg = (struct nvsp_message *)((unsigned long)
 -              desc + (desc->offset8 << 3));
 +      struct nvsp_message *nvmsg
 +              = (struct nvsp_message *)((unsigned long)desc
 +                                        + (desc->offset8 << 3));
  
        switch (desc->type) {
        case VM_PKT_COMP:
                break;
  
        case VM_PKT_DATA_USING_XFER_PAGES:
 -              netvsc_receive(net_device, channel, device, desc);
 +              netvsc_receive(ndev, net_device, net_device_ctx,
 +                             device, channel,
 +                             (struct vmtransfer_page_packet_header *)desc,
 +                             nvmsg);
                break;
  
        case VM_PKT_DATA_INBAND:
  
  void netvsc_channel_cb(void *context)
  {
 -      int ret;
 -      struct vmbus_channel *channel = (struct vmbus_channel *)context;
 +      struct vmbus_channel *channel = context;
        u16 q_idx = channel->offermsg.offer.sub_channel_index;
        struct hv_device *device;
        struct netvsc_device *net_device;
 -      u32 bytes_recvd;
 -      u64 request_id;
        struct vmpacket_descriptor *desc;
 -      unsigned char *buffer;
 -      int bufferlen = NETVSC_PACKET_SIZE;
        struct net_device *ndev;
        bool need_to_commit = false;
  
        else
                device = channel->device_obj;
  
 -      net_device = get_inbound_net_device(device);
 -      if (!net_device)
 -              return;
        ndev = hv_get_drvdata(device);
 -      buffer = get_per_channel_state(channel);
 +      if (unlikely(!ndev))
 +              return;
 +
 +      net_device = net_device_to_netvsc_device(ndev);
 +      if (unlikely(net_device->destroy) &&
 +          netvsc_channel_idle(net_device, q_idx))
 +              return;
  
 -      do {
 -              desc = get_next_pkt_raw(channel);
 -              if (desc != NULL) {
 -                      netvsc_process_raw_pkt(device,
 -                                             channel,
 -                                             net_device,
 -                                             ndev,
 -                                             desc->trans_id,
 -                                             desc);
 -
 -                      put_pkt_raw(channel, desc);
 -                      need_to_commit = true;
 -                      continue;
 -              }
 -              if (need_to_commit) {
 -                      need_to_commit = false;
 -                      commit_rd_index(channel);
 -              }
 -
 -              ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
 -                                         &bytes_recvd, &request_id);
 -              if (ret == 0) {
 -                      if (bytes_recvd > 0) {
 -                              desc = (struct vmpacket_descriptor *)buffer;
 -                              netvsc_process_raw_pkt(device,
 -                                                     channel,
 -                                                     net_device,
 -                                                     ndev,
 -                                                     request_id,
 -                                                     desc);
 -                      } else {
 -                              /*
 -                               * We are done for this pass.
 -                               */
 -                              break;
 -                      }
 -
 -              } else if (ret == -ENOBUFS) {
 -                      if (bufferlen > NETVSC_PACKET_SIZE)
 -                              kfree(buffer);
 -                      /* Handle large packet */
 -                      buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
 -                      if (buffer == NULL) {
 -                              /* Try again next time around */
 -                              netdev_err(ndev,
 -                                         "unable to allocate buffer of size "
 -                                         "(%d)!!\n", bytes_recvd);
 -                              break;
 -                      }
 -
 -                      bufferlen = bytes_recvd;
 -              }
 -
 -              init_cached_read_index(channel);
+       /* commit_rd_index() -> hv_signal_on_read() needs this. */
+       init_cached_read_index(channel);
 +      while ((desc = get_next_pkt_raw(channel)) != NULL) {
 +              netvsc_process_raw_pkt(device, channel, net_device,
 +                                     ndev, desc->trans_id, desc);
  
 -      } while (1);
 +              put_pkt_raw(channel, desc);
 +              need_to_commit = true;
 +      }
  
 -      if (bufferlen > NETVSC_PACKET_SIZE)
 -              kfree(buffer);
 +      if (need_to_commit)
 +              commit_rd_index(channel);
  
        netvsc_chk_recv_comp(net_device, channel, q_idx);
  }
   * netvsc_device_add - Callback when the device belonging to this
   * driver is added
   */
 -int netvsc_device_add(struct hv_device *device, void *additional_info)
 +int netvsc_device_add(struct hv_device *device,
 +                    const struct netvsc_device_info *device_info)
  {
        int i, ret = 0;
 -      int ring_size =
 -      ((struct netvsc_device_info *)additional_info)->ring_size;
 +      int ring_size = device_info->ring_size;
        struct netvsc_device *net_device;
        struct net_device *ndev = hv_get_drvdata(device);
        struct net_device_context *net_device_ctx = netdev_priv(ndev);
  
        net_device->ring_size = ring_size;
  
 -      set_per_channel_state(device->channel, net_device->cb_buffer);
 -
        /* Open the channel */
        ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
                         ring_size * PAGE_SIZE, NULL, 0,
         * opened.
         */
        for (i = 0; i < VRSS_CHANNEL_MAX; i++)
 -              net_device->chn_table[i] = device->channel;
 +              net_device->chan_table[i].channel = device->channel;
  
        /* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
         * populated.
diff --combined drivers/net/tun.c
index 8a7d6b905362389ace72731c2a8e1f163c298c47,bfabe180053e414dee777e0e56b24eceef05c918..30863e378925b3555dea6eadb99a02678779cfd5
@@@ -218,7 -218,6 +218,7 @@@ struct tun_struct 
        struct list_head disabled;
        void *security;
        u32 flow_count;
 +      u32 rx_batched;
        struct tun_pcpu_stats __percpu *pcpu_stats;
  };
  
@@@ -523,7 -522,6 +523,7 @@@ static void tun_queue_purge(struct tun_
        while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
                kfree_skb(skb);
  
 +      skb_queue_purge(&tfile->sk.sk_write_queue);
        skb_queue_purge(&tfile->sk.sk_error_queue);
  }
  
@@@ -955,7 -953,7 +955,7 @@@ static void tun_set_headroom(struct net
        tun->align = new_hr;
  }
  
 -static struct rtnl_link_stats64 *
 +static void
  tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  {
        u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
        stats->rx_dropped  = rx_dropped;
        stats->rx_frame_errors = rx_frame_errors;
        stats->tx_dropped = tx_dropped;
 -      return stats;
  }
  
  static const struct net_device_ops tun_netdev_ops = {
@@@ -1141,46 -1140,10 +1141,46 @@@ static struct sk_buff *tun_alloc_skb(st
        return skb;
  }
  
 +static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
 +                         struct sk_buff *skb, int more)
 +{
 +      struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
 +      struct sk_buff_head process_queue;
 +      u32 rx_batched = tun->rx_batched;
 +      bool rcv = false;
 +
 +      if (!rx_batched || (!more && skb_queue_empty(queue))) {
 +              local_bh_disable();
 +              netif_receive_skb(skb);
 +              local_bh_enable();
 +              return;
 +      }
 +
 +      spin_lock(&queue->lock);
 +      if (!more || skb_queue_len(queue) == rx_batched) {
 +              __skb_queue_head_init(&process_queue);
 +              skb_queue_splice_tail_init(queue, &process_queue);
 +              rcv = true;
 +      } else {
 +              __skb_queue_tail(queue, skb);
 +      }
 +      spin_unlock(&queue->lock);
 +
 +      if (rcv) {
 +              struct sk_buff *nskb;
 +
 +              local_bh_disable();
 +              while ((nskb = __skb_dequeue(&process_queue)))
 +                      netif_receive_skb(nskb);
 +              netif_receive_skb(skb);
 +              local_bh_enable();
 +      }
 +}
 +
  /* Get packet from user space buffer */
  static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
                            void *msg_control, struct iov_iter *from,
 -                          int noblock)
 +                          int noblock, bool more)
  {
        struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
        struct sk_buff *skb;
        }
  
        if (tun->flags & IFF_VNET_HDR) {
-               if (len < tun->vnet_hdr_sz)
+               int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
+               if (len < vnet_hdr_sz)
                        return -EINVAL;
-               len -= tun->vnet_hdr_sz;
+               len -= vnet_hdr_sz;
  
                if (!copy_from_iter_full(&gso, sizeof(gso), from))
                        return -EFAULT;
  
                if (tun16_to_cpu(tun, gso.hdr_len) > len)
                        return -EINVAL;
-               iov_iter_advance(from, tun->vnet_hdr_sz - sizeof(gso));
+               iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
        }
  
        if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
  
        rxhash = skb_get_hash(skb);
  #ifndef CONFIG_4KSTACKS
 -      local_bh_disable();
 -      netif_receive_skb(skb);
 -      local_bh_enable();
 +      tun_rx_batched(tun, tfile, skb, more);
  #else
        netif_rx_ni(skb);
  #endif
@@@ -1347,8 -1314,7 +1349,8 @@@ static ssize_t tun_chr_write_iter(struc
        if (!tun)
                return -EBADFD;
  
 -      result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
 +      result = tun_get_user(tun, tfile, NULL, from,
 +                            file->f_flags & O_NONBLOCK, false);
  
        tun_put(tun);
        return result;
@@@ -1371,7 -1337,7 +1373,7 @@@ static ssize_t tun_put_user(struct tun_
                vlan_hlen = VLAN_HLEN;
  
        if (tun->flags & IFF_VNET_HDR)
-               vnet_hdr_sz = tun->vnet_hdr_sz;
+               vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
  
        total = skb->len + vlan_hlen + vnet_hdr_sz;
  
@@@ -1606,8 -1572,7 +1608,8 @@@ static int tun_sendmsg(struct socket *s
                return -EBADFD;
  
        ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
 -                         m->msg_flags & MSG_DONTWAIT);
 +                         m->msg_flags & MSG_DONTWAIT,
 +                         m->msg_flags & MSG_MORE);
        tun_put(tun);
        return ret;
  }
@@@ -1808,7 -1773,6 +1810,7 @@@ static int tun_set_iff(struct net *net
                tun->align = NET_SKB_PAD;
                tun->filter_attached = false;
                tun->sndbuf = tfile->socket.sk->sk_sndbuf;
 +              tun->rx_batched = 0;
  
                tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
                if (!tun->pcpu_stats) {
@@@ -2477,29 -2441,6 +2479,29 @@@ static void tun_set_msglevel(struct net
  #endif
  }
  
 +static int tun_get_coalesce(struct net_device *dev,
 +                          struct ethtool_coalesce *ec)
 +{
 +      struct tun_struct *tun = netdev_priv(dev);
 +
 +      ec->rx_max_coalesced_frames = tun->rx_batched;
 +
 +      return 0;
 +}
 +
 +static int tun_set_coalesce(struct net_device *dev,
 +                          struct ethtool_coalesce *ec)
 +{
 +      struct tun_struct *tun = netdev_priv(dev);
 +
 +      if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
 +              tun->rx_batched = NAPI_POLL_WEIGHT;
 +      else
 +              tun->rx_batched = ec->rx_max_coalesced_frames;
 +
 +      return 0;
 +}
 +
  static const struct ethtool_ops tun_ethtool_ops = {
        .get_settings   = tun_get_settings,
        .get_drvinfo    = tun_get_drvinfo,
        .set_msglevel   = tun_set_msglevel,
        .get_link       = ethtool_op_get_link,
        .get_ts_info    = ethtool_op_get_ts_info,
 +      .get_coalesce   = tun_get_coalesce,
 +      .set_coalesce   = tun_set_coalesce,
  };
  
  static int tun_queue_resize(struct tun_struct *tun)
index efedd918d10ad13fdd08b4b8b70bd418d3275078,a33a06d58a9ae8496e9baa2cd7a6e2ac1f3087e2..bcbb0c60f1f12bc9d1cd706510d5a97dbc5e3ae3
@@@ -92,7 -92,7 +92,7 @@@ int rtl92c_init_sw_vars(struct ieee8021
        struct rtl_priv *rtlpriv = rtl_priv(hw);
        struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
        struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
-       char *fw_name = "rtlwifi/rtl8192cfwU.bin";
+       char *fw_name;
  
        rtl8192ce_bt_reg_init(hw);
  
  
        rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0);
  
 -      /* for debug level */
 -      rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
        /* for LPS & IPS */
        rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
        rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
        /* for firmware buf */
        rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
        if (!rtlpriv->rtlhal.pfirmware) {
 -              RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 -                       "Can't alloc buffer for fw\n");
 +              pr_err("Can't alloc buffer for fw\n");
                return 1;
        }
  
        /* request fw */
-       if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
+       if (IS_VENDOR_UMC_A_CUT(rtlhal->version) &&
+           !IS_92C_SERIAL(rtlhal->version))
+               fw_name = "rtlwifi/rtl8192cfwU.bin";
+       else if (IS_81XXC_VENDOR_UMC_B_CUT(rtlhal->version))
                fw_name = "rtlwifi/rtl8192cfwU_B.bin";
+       else
+               fw_name = "rtlwifi/rtl8192cfw.bin";
  
        rtlpriv->max_fw_size = 0x4000;
        pr_info("Using firmware %s\n", fw_name);
                                      rtlpriv->io.dev, GFP_KERNEL, hw,
                                      rtl_fw_cb);
        if (err) {
 -              RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
 -                       "Failed to request firmware!\n");
 +              pr_err("Failed to request firmware!\n");
                return 1;
        }
  
@@@ -245,8 -254,7 +250,8 @@@ static struct rtl_mod_params rtl92ce_mo
        .inactiveps = true,
        .swctrl_lps = false,
        .fwctrl_lps = true,
 -      .debug = DBG_EMERG,
 +      .debug_level = 0,
 +      .debug_mask = 0,
  };
  
  static const struct rtl_hal_cfg rtl92ce_hal_cfg = {
@@@ -363,8 -371,7 +368,8 @@@ MODULE_FIRMWARE("rtlwifi/rtl8192cfwU.bi
  MODULE_FIRMWARE("rtlwifi/rtl8192cfwU_B.bin");
  
  module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444);
 -module_param_named(debug, rtl92ce_mod_params.debug, int, 0444);
 +module_param_named(debug_level, rtl92ce_mod_params.debug_level, int, 0644);
 +module_param_named(debug_mask, rtl92ce_mod_params.debug_mask, ullong, 0644);
  module_param_named(ips, rtl92ce_mod_params.inactiveps, bool, 0444);
  module_param_named(swlps, rtl92ce_mod_params.swctrl_lps, bool, 0444);
  module_param_named(fwlps, rtl92ce_mod_params.fwctrl_lps, bool, 0444);
@@@ -372,8 -379,7 +377,8 @@@ MODULE_PARM_DESC(swenc, "Set to 1 for s
  MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
  MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
  MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
 -MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
 +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)");
 +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)");
  
  static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
  
diff --combined drivers/vhost/vhost.c
index 9f118388a5b7cc6f70b727f8bafdfadf9a401ac2,8f99fe08de02e7b48725a99d682055c03056b82a..4269e621e254ab7acc38c81f2aafe1775e9dbd81
@@@ -130,14 -130,14 +130,14 @@@ static long vhost_get_vring_endian(stru
  
  static void vhost_init_is_le(struct vhost_virtqueue *vq)
  {
-       if (vhost_has_feature(vq, VIRTIO_F_VERSION_1))
-               vq->is_le = true;
+       vq->is_le = vhost_has_feature(vq, VIRTIO_F_VERSION_1)
+               || virtio_legacy_is_little_endian();
  }
  #endif /* CONFIG_VHOST_CROSS_ENDIAN_LEGACY */
  
  static void vhost_reset_is_le(struct vhost_virtqueue *vq)
  {
-       vq->is_le = virtio_legacy_is_little_endian();
+       vhost_init_is_le(vq);
  }
  
  struct vhost_flush_struct {
@@@ -1714,10 -1714,8 +1714,8 @@@ int vhost_vq_init_access(struct vhost_v
        int r;
        bool is_le = vq->is_le;
  
-       if (!vq->private_data) {
-               vhost_reset_is_le(vq);
+       if (!vq->private_data)
                return 0;
-       }
  
        vhost_init_is_le(vq);
  
@@@ -2241,15 -2239,11 +2239,15 @@@ bool vhost_vq_avail_empty(struct vhost_
        __virtio16 avail_idx;
        int r;
  
 +      if (vq->avail_idx != vq->last_avail_idx)
 +              return false;
 +
        r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
 -      if (r)
 +      if (unlikely(r))
                return false;
 +      vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
  
 -      return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
 +      return vq->avail_idx == vq->last_avail_idx;
  }
  EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
  
diff --combined include/net/sock.h
index 6f83e78eaa5a8928b34ecc63d49b1baf19722c10,c4f5e6fca17cf4e0029080410202cb66ce0fad37..9ccefa5c5487863e69831c3bf45e4f0edf78eb65
@@@ -70,7 -70,6 +70,7 @@@
  #include <net/checksum.h>
  #include <net/tcp_states.h>
  #include <linux/net_tstamp.h>
 +#include <net/smc.h>
  
  /*
   * This structure really needs to be cleaned up.
@@@ -240,7 -239,6 +240,7 @@@ struct sock_common 
    *   @sk_wq: sock wait queue and async head
    *   @sk_rx_dst: receive input route used by early demux
    *   @sk_dst_cache: destination cache
 +  *   @sk_dst_pending_confirm: need to confirm neighbour
    *   @sk_policy: flow policy
    *   @sk_receive_queue: incoming packets
    *   @sk_wmem_alloc: transmit queue bytes committed
@@@ -394,8 -392,6 +394,8 @@@ struct sock 
        struct sk_buff_head     sk_write_queue;
        __s32                   sk_peek_off;
        int                     sk_write_pending;
 +      __u32                   sk_dst_pending_confirm;
 +      /* Note: 32bit hole on 64bit arches */
        long                    sk_sndtimeo;
        struct timer_list       sk_timer;
        __u32                   sk_priority;
@@@ -547,7 -543,8 +547,7 @@@ static inline struct sock *sk_nulls_hea
  
  static inline struct sock *sk_next(const struct sock *sk)
  {
 -      return sk->sk_node.next ?
 -              hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
 +      return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
  }
  
  static inline struct sock *sk_nulls_next(const struct sock *sk)
@@@ -989,7 -986,6 +989,7 @@@ struct request_sock_ops
  struct timewait_sock_ops;
  struct inet_hashinfo;
  struct raw_hashinfo;
 +struct smc_hashinfo;
  struct module;
  
  /*
@@@ -1028,7 -1024,6 +1028,7 @@@ struct proto 
        int                     (*getsockopt)(struct sock *sk, int level,
                                        int optname, char __user *optval,
                                        int __user *option);
 +      void                    (*keepalive)(struct sock *sk, int valbool);
  #ifdef CONFIG_COMPAT
        int                     (*compat_setsockopt)(struct sock *sk,
                                        int level,
                struct inet_hashinfo    *hashinfo;
                struct udp_table        *udp_table;
                struct raw_hashinfo     *raw_hash;
 +              struct smc_hashinfo     *smc_hash;
        } h;
  
        struct module           *owner;
@@@ -1537,7 -1531,7 +1537,7 @@@ void sock_efree(struct sk_buff *skb)
  #ifdef CONFIG_INET
  void sock_edemux(struct sk_buff *skb);
  #else
 -#define sock_edemux(skb) sock_efree(skb)
 +#define sock_edemux sock_efree
  #endif
  
  int sock_setsockopt(struct socket *sock, int level, int op,
@@@ -1767,7 -1761,6 +1767,7 @@@ static inline void dst_negative_advice(
                if (ndst != dst) {
                        rcu_assign_pointer(sk->sk_dst_cache, ndst);
                        sk_tx_queue_clear(sk);
 +                      sk->sk_dst_pending_confirm = 0;
                }
        }
  }
@@@ -1778,7 -1771,6 +1778,7 @@@ __sk_dst_set(struct sock *sk, struct ds
        struct dst_entry *old_dst;
  
        sk_tx_queue_clear(sk);
 +      sk->sk_dst_pending_confirm = 0;
        /*
         * This can be called while sk is owned by the caller only,
         * with no state that can be checked in a rcu_dereference_check() cond
@@@ -1794,7 -1786,6 +1794,7 @@@ sk_dst_set(struct sock *sk, struct dst_
        struct dst_entry *old_dst;
  
        sk_tx_queue_clear(sk);
 +      sk->sk_dst_pending_confirm = 0;
        old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
        dst_release(old_dst);
  }
@@@ -1815,26 -1806,6 +1815,26 @@@ struct dst_entry *__sk_dst_check(struc
  
  struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
  
 +static inline void sk_dst_confirm(struct sock *sk)
 +{
 +      if (!sk->sk_dst_pending_confirm)
 +              sk->sk_dst_pending_confirm = 1;
 +}
 +
 +static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
 +{
 +      if (skb_get_dst_pending_confirm(skb)) {
 +              struct sock *sk = skb->sk;
 +              unsigned long now = jiffies;
 +
 +              /* avoid dirtying neighbour */
 +              if (n->confirmed != now)
 +                      n->confirmed = now;
 +              if (sk && sk->sk_dst_pending_confirm)
 +                      sk->sk_dst_pending_confirm = 0;
 +      }
 +}
 +
  bool sk_mc_loop(struct sock *sk);
  
  static inline bool sk_can_gso(const struct sock *sk)
@@@ -2035,7 -2006,9 +2035,9 @@@ void sk_reset_timer(struct sock *sk, st
  void sk_stop_timer(struct sock *sk, struct timer_list *timer);
  
  int __sk_queue_drop_skb(struct sock *sk, struct sk_buff *skb,
-                       unsigned int flags);
+                       unsigned int flags,
+                       void (*destructor)(struct sock *sk,
+                                          struct sk_buff *skb));
  int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
  int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
  
index 33496595064c1e5a7b656e06347aae5ef4e54562,052799e4d751c805de01bc8ed47c3d0a1ecde936..61df8d392f41f4c197419b2260481d1a41897318
@@@ -14,8 -14,6 +14,8 @@@
  #ifndef _UAPI_LINUX_SEG6_H
  #define _UAPI_LINUX_SEG6_H
  
 +#include <linux/types.h>
 +
  /*
   * SRH
   */
@@@ -25,14 -23,12 +25,12 @@@ struct ipv6_sr_hdr 
        __u8    type;
        __u8    segments_left;
        __u8    first_segment;
-       __u8    flag_1;
-       __u8    flag_2;
-       __u8    reserved;
+       __u8    flags;
+       __u16   reserved;
  
        struct in6_addr segments[0];
  };
  
- #define SR6_FLAG1_CLEANUP     (1 << 7)
  #define SR6_FLAG1_PROTECTED   (1 << 6)
  #define SR6_FLAG1_OAM         (1 << 5)
  #define SR6_FLAG1_ALERT               (1 << 4)
@@@ -44,8 -40,7 +42,7 @@@
  #define SR6_TLV_PADDING               4
  #define SR6_TLV_HMAC          5
  
- #define sr_has_cleanup(srh) ((srh)->flag_1 & SR6_FLAG1_CLEANUP)
- #define sr_has_hmac(srh) ((srh)->flag_1 & SR6_FLAG1_HMAC)
+ #define sr_has_hmac(srh) ((srh)->flags & SR6_FLAG1_HMAC)
  
  struct sr6_tlv {
        __u8 type;
diff --combined net/core/dev.c
index 3e1a60102e64d1040867181a03605b8eca508557,29101c98399f40b6b8e42c31a255d8f1fb6bd7a1..0921609dfa81b70a13e0b4ca7852fde6ded7ed82
@@@ -1695,24 -1695,19 +1695,19 @@@ EXPORT_SYMBOL_GPL(net_dec_egress_queue)
  
  static struct static_key netstamp_needed __read_mostly;
  #ifdef HAVE_JUMP_LABEL
- /* We are not allowed to call static_key_slow_dec() from irq context
-  * If net_disable_timestamp() is called from irq context, defer the
-  * static_key_slow_dec() calls.
-  */
  static atomic_t netstamp_needed_deferred;
- #endif
- void net_enable_timestamp(void)
+ static void netstamp_clear(struct work_struct *work)
  {
- #ifdef HAVE_JUMP_LABEL
        int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
  
-       if (deferred) {
-               while (--deferred)
-                       static_key_slow_dec(&netstamp_needed);
-               return;
-       }
+       while (deferred--)
+               static_key_slow_dec(&netstamp_needed);
+ }
+ static DECLARE_WORK(netstamp_work, netstamp_clear);
  #endif
+ void net_enable_timestamp(void)
+ {
        static_key_slow_inc(&netstamp_needed);
  }
  EXPORT_SYMBOL(net_enable_timestamp);
  void net_disable_timestamp(void)
  {
  #ifdef HAVE_JUMP_LABEL
-       if (in_interrupt()) {
-               atomic_inc(&netstamp_needed_deferred);
-               return;
-       }
- #endif
+       /* net_disable_timestamp() can be called from non process context */
+       atomic_inc(&netstamp_needed_deferred);
+       schedule_work(&netstamp_work);
+ #else
        static_key_slow_dec(&netstamp_needed);
+ #endif
  }
  EXPORT_SYMBOL(net_disable_timestamp);
  
@@@ -2408,6 -2403,28 +2403,6 @@@ void netif_schedule_queue(struct netdev
  }
  EXPORT_SYMBOL(netif_schedule_queue);
  
 -/**
 - *    netif_wake_subqueue - allow sending packets on subqueue
 - *    @dev: network device
 - *    @queue_index: sub queue index
 - *
 - * Resume individual transmit queue of a device with multiple transmit queues.
 - */
 -void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
 -{
 -      struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
 -
 -      if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
 -              struct Qdisc *q;
 -
 -              rcu_read_lock();
 -              q = rcu_dereference(txq->qdisc);
 -              __netif_schedule(q);
 -              rcu_read_unlock();
 -      }
 -}
 -EXPORT_SYMBOL(netif_wake_subqueue);
 -
  void netif_tx_wake_queue(struct netdev_queue *dev_queue)
  {
        if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
@@@ -2637,10 -2654,9 +2632,10 @@@ EXPORT_SYMBOL(skb_mac_gso_segment)
  static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
  {
        if (tx_path)
 -              return skb->ip_summed != CHECKSUM_PARTIAL;
 -      else
 -              return skb->ip_summed == CHECKSUM_NONE;
 +              return skb->ip_summed != CHECKSUM_PARTIAL &&
 +                     skb->ip_summed != CHECKSUM_NONE;
 +
 +      return skb->ip_summed == CHECKSUM_NONE;
  }
  
  /**
  struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
                                  netdev_features_t features, bool tx_path)
  {
 +      struct sk_buff *segs;
 +
        if (unlikely(skb_needs_check(skb, tx_path))) {
                int err;
  
 -              skb_warn_bad_offload(skb);
 -
 +              /* We're going to init ->check field in TCP or UDP header */
                err = skb_cow_head(skb, 0);
                if (err < 0)
                        return ERR_PTR(err);
        skb_reset_mac_header(skb);
        skb_reset_mac_len(skb);
  
 -      return skb_mac_gso_segment(skb, features);
 +      segs = skb_mac_gso_segment(skb, features);
 +
 +      if (unlikely(skb_needs_check(skb, tx_path)))
 +              skb_warn_bad_offload(skb);
 +
 +      return segs;
  }
  EXPORT_SYMBOL(__skb_gso_segment);
  
@@@ -3138,7 -3148,9 +3133,7 @@@ sch_handle_egress(struct sk_buff *skb, 
        if (!cl)
                return skb;
  
 -      /* skb->tc_verd and qdisc_skb_cb(skb)->pkt_len were already set
 -       * earlier by the caller.
 -       */
 +      /* qdisc_skb_cb(skb)->pkt_len was already set by the caller. */
        qdisc_bstats_cpu_update(cl->q, skb);
  
        switch (tc_classify(skb, cl, &cl_res, false)) {
@@@ -3303,7 -3315,7 +3298,7 @@@ static int __dev_queue_xmit(struct sk_b
  
        qdisc_pkt_len_init(skb);
  #ifdef CONFIG_NET_CLS_ACT
 -      skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
 +      skb->tc_at_ingress = 0;
  # ifdef CONFIG_NET_EGRESS
        if (static_key_false(&egress_needed)) {
                skb = sch_handle_egress(skb, &rc, dev);
@@@ -3410,11 -3422,7 +3405,11 @@@ EXPORT_SYMBOL(netdev_max_backlog)
  
  int netdev_tstamp_prequeue __read_mostly = 1;
  int netdev_budget __read_mostly = 300;
 -int weight_p __read_mostly = 64;            /* old backlog weight */
 +int weight_p __read_mostly = 64;           /* old backlog weight */
 +int dev_weight_rx_bias __read_mostly = 1;  /* bias for backlog weight */
 +int dev_weight_tx_bias __read_mostly = 1;  /* bias for output_queue quota */
 +int dev_rx_weight __read_mostly = 64;
 +int dev_tx_weight __read_mostly = 64;
  
  /* Called with irq disabled */
  static inline void ____napi_schedule(struct softnet_data *sd,
@@@ -3903,7 -3911,7 +3898,7 @@@ sch_handle_ingress(struct sk_buff *skb
        }
  
        qdisc_skb_cb(skb)->pkt_len = skb->len;
 -      skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
 +      skb->tc_at_ingress = 1;
        qdisc_bstats_cpu_update(cl->q, skb);
  
        switch (tc_classify(skb, cl, &cl_res, false)) {
@@@ -3968,7 -3976,9 +3963,7 @@@ int netdev_rx_handler_register(struct n
                               rx_handler_func_t *rx_handler,
                               void *rx_handler_data)
  {
 -      ASSERT_RTNL();
 -
 -      if (dev->rx_handler)
 +      if (netdev_is_rx_handler_busy(dev))
                return -EBUSY;
  
        /* Note: rx_handler_data must be set before rx_handler */
@@@ -4074,8 -4084,12 +4069,8 @@@ another_round
                        goto out;
        }
  
 -#ifdef CONFIG_NET_CLS_ACT
 -      if (skb->tc_verd & TC_NCLS) {
 -              skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
 -              goto ncls;
 -      }
 -#endif
 +      if (skb_skip_tc_classify(skb))
 +              goto skip_classify;
  
        if (pfmemalloc)
                goto skip_taps;
@@@ -4103,8 -4117,10 +4098,8 @@@ skip_taps
                        goto out;
        }
  #endif
 -#ifdef CONFIG_NET_CLS_ACT
 -      skb->tc_verd = 0;
 -ncls:
 -#endif
 +      skb_reset_tc(skb);
 +skip_classify:
        if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
                goto drop;
  
@@@ -4600,7 -4616,6 +4595,7 @@@ static gro_result_t napi_skb_finish(gro
        case GRO_MERGED_FREE:
                if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
                        skb_dst_drop(skb);
 +                      secpath_reset(skb);
                        kmem_cache_free(skbuff_head_cache, skb);
                } else {
                        __kfree_skb(skb);
@@@ -4641,7 -4656,6 +4636,7 @@@ static void napi_reuse_skb(struct napi_
        skb->encapsulation = 0;
        skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
 +      secpath_reset(skb);
  
        napi->skb = skb;
  }
@@@ -4816,7 -4830,7 +4811,7 @@@ static int process_backlog(struct napi_
                net_rps_action_and_irq_enable(sd);
        }
  
 -      napi->weight = weight_p;
 +      napi->weight = dev_rx_weight;
        while (again) {
                struct sk_buff *skb;
  
@@@ -4883,6 -4897,23 +4878,6 @@@ void __napi_schedule_irqoff(struct napi
  }
  EXPORT_SYMBOL(__napi_schedule_irqoff);
  
 -bool __napi_complete(struct napi_struct *n)
 -{
 -      BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
 -
 -      /* Some drivers call us directly, instead of calling
 -       * napi_complete_done().
 -       */
 -      if (unlikely(test_bit(NAPI_STATE_IN_BUSY_POLL, &n->state)))
 -              return false;
 -
 -      list_del_init(&n->poll_list);
 -      smp_mb__before_atomic();
 -      clear_bit(NAPI_STATE_SCHED, &n->state);
 -      return true;
 -}
 -EXPORT_SYMBOL(__napi_complete);
 -
  bool napi_complete_done(struct napi_struct *n, int work_done)
  {
        unsigned long flags;
                else
                        napi_gro_flush(n, false);
        }
 -      if (likely(list_empty(&n->poll_list))) {
 -              WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
 -      } else {
 +      if (unlikely(!list_empty(&n->poll_list))) {
                /* If n->poll_list is not empty, we need to mask irqs */
                local_irq_save(flags);
 -              __napi_complete(n);
 +              list_del_init(&n->poll_list);
                local_irq_restore(flags);
        }
 +      WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
        return true;
  }
  EXPORT_SYMBOL(napi_complete_done);
@@@ -4961,6 -4993,7 +4956,6 @@@ bool sk_busy_loop(struct sock *sk, int 
  {
        unsigned long end_time = !nonblock ? sk_busy_loop_end_time(sk) : 0;
        int (*napi_poll)(struct napi_struct *napi, int budget);
 -      int (*busy_poll)(struct napi_struct *dev);
        void *have_poll_lock = NULL;
        struct napi_struct *napi;
        int rc;
@@@ -4975,10 -5008,17 +4970,10 @@@ restart
        if (!napi)
                goto out;
  
 -      /* Note: ndo_busy_poll method is optional in linux-4.5 */
 -      busy_poll = napi->dev->netdev_ops->ndo_busy_poll;
 -
        preempt_disable();
        for (;;) {
                rc = 0;
                local_bh_disable();
 -              if (busy_poll) {
 -                      rc = busy_poll(napi);
 -                      goto count;
 -              }
                if (!napi_poll) {
                        unsigned long val = READ_ONCE(napi->state);
  
@@@ -6111,6 -6151,50 +6106,6 @@@ void netdev_lower_state_changed(struct 
  }
  EXPORT_SYMBOL(netdev_lower_state_changed);
  
 -int netdev_default_l2upper_neigh_construct(struct net_device *dev,
 -                                         struct neighbour *n)
 -{
 -      struct net_device *lower_dev, *stop_dev;
 -      struct list_head *iter;
 -      int err;
 -
 -      netdev_for_each_lower_dev(dev, lower_dev, iter) {
 -              if (!lower_dev->netdev_ops->ndo_neigh_construct)
 -                      continue;
 -              err = lower_dev->netdev_ops->ndo_neigh_construct(lower_dev, n);
 -              if (err) {
 -                      stop_dev = lower_dev;
 -                      goto rollback;
 -              }
 -      }
 -      return 0;
 -
 -rollback:
 -      netdev_for_each_lower_dev(dev, lower_dev, iter) {
 -              if (lower_dev == stop_dev)
 -                      break;
 -              if (!lower_dev->netdev_ops->ndo_neigh_destroy)
 -                      continue;
 -              lower_dev->netdev_ops->ndo_neigh_destroy(lower_dev, n);
 -      }
 -      return err;
 -}
 -EXPORT_SYMBOL_GPL(netdev_default_l2upper_neigh_construct);
 -
 -void netdev_default_l2upper_neigh_destroy(struct net_device *dev,
 -                                        struct neighbour *n)
 -{
 -      struct net_device *lower_dev;
 -      struct list_head *iter;
 -
 -      netdev_for_each_lower_dev(dev, lower_dev, iter) {
 -              if (!lower_dev->netdev_ops->ndo_neigh_destroy)
 -                      continue;
 -              lower_dev->netdev_ops->ndo_neigh_destroy(lower_dev, n);
 -      }
 -}
 -EXPORT_SYMBOL_GPL(netdev_default_l2upper_neigh_destroy);
 -
  static void dev_change_rx_flags(struct net_device *dev, int flags)
  {
        const struct net_device_ops *ops = dev->netdev_ops;
@@@ -6887,6 -6971,13 +6882,6 @@@ static netdev_features_t netdev_fix_fea
                features &= ~dev->gso_partial_features;
        }
  
 -#ifdef CONFIG_NET_RX_BUSY_POLL
 -      if (dev->netdev_ops->ndo_busy_poll)
 -              features |= NETIF_F_BUSY_POLL;
 -      else
 -#endif
 -              features &= ~NETIF_F_BUSY_POLL;
 -
        return features;
  }
  
diff --combined net/core/ethtool.c
index d5f412b3093d4b3ccef40d4808bedf353f483594,d92de0a1f0a49d51ec8329c65d46a4f2ae304ebd..be7bab1adcde3d2f3228191163c55ca898d08e2f
@@@ -102,6 -102,7 +102,6 @@@ static const char netdev_features_strin
        [NETIF_F_RXFCS_BIT] =            "rx-fcs",
        [NETIF_F_RXALL_BIT] =            "rx-all",
        [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload",
 -      [NETIF_F_BUSY_POLL_BIT] =        "busy-poll",
        [NETIF_F_HW_TC_BIT] =            "hw-tc-offload",
  };
  
@@@ -1404,9 -1405,12 +1404,12 @@@ static int ethtool_get_regs(struct net_
        if (regs.len > reglen)
                regs.len = reglen;
  
-       regbuf = vzalloc(reglen);
-       if (reglen && !regbuf)
-               return -ENOMEM;
+       regbuf = NULL;
+       if (reglen) {
+               regbuf = vzalloc(reglen);
+               if (!regbuf)
+                       return -ENOMEM;
+       }
  
        ops->get_regs(dev, &regs, regbuf);
  
@@@ -1816,13 -1820,11 +1819,13 @@@ static int ethtool_get_strings(struct n
        ret = __ethtool_get_sset_count(dev, gstrings.string_set);
        if (ret < 0)
                return ret;
 +      if (ret > S32_MAX / ETH_GSTRING_LEN)
 +              return -ENOMEM;
 +      WARN_ON_ONCE(!ret);
  
        gstrings.len = ret;
 -
 -      data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
 -      if (!data)
 +      data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
 +      if (gstrings.len && !data)
                return -ENOMEM;
  
        __ethtool_get_strings(dev, gstrings.string_set, data);
        if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
                goto out;
        useraddr += sizeof(gstrings);
 -      if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
 +      if (gstrings.len &&
 +          copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
                goto out;
        ret = 0;
  
  out:
 -      kfree(data);
 +      vfree(data);
        return ret;
  }
  
@@@ -1914,15 -1915,14 +1917,15 @@@ static int ethtool_get_stats(struct net
        n_stats = ops->get_sset_count(dev, ETH_SS_STATS);
        if (n_stats < 0)
                return n_stats;
 -      WARN_ON(n_stats == 0);
 -
 +      if (n_stats > S32_MAX / sizeof(u64))
 +              return -ENOMEM;
 +      WARN_ON_ONCE(!n_stats);
        if (copy_from_user(&stats, useraddr, sizeof(stats)))
                return -EFAULT;
  
        stats.n_stats = n_stats;
 -      data = kmalloc(n_stats * sizeof(u64), GFP_USER);
 -      if (!data)
 +      data = vzalloc(n_stats * sizeof(u64));
 +      if (n_stats && !data)
                return -ENOMEM;
  
        ops->get_ethtool_stats(dev, &stats, data);
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
                goto out;
        useraddr += sizeof(stats);
 -      if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
 +      if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
                goto out;
        ret = 0;
  
   out:
 -      kfree(data);
 +      vfree(data);
        return ret;
  }
  
@@@ -1951,18 -1951,17 +1954,18 @@@ static int ethtool_get_phy_stats(struc
                return -EOPNOTSUPP;
  
        n_stats = phy_get_sset_count(phydev);
 -
        if (n_stats < 0)
                return n_stats;
 -      WARN_ON(n_stats == 0);
 +      if (n_stats > S32_MAX / sizeof(u64))
 +              return -ENOMEM;
 +      WARN_ON_ONCE(!n_stats);
  
        if (copy_from_user(&stats, useraddr, sizeof(stats)))
                return -EFAULT;
  
        stats.n_stats = n_stats;
 -      data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
 -      if (!data)
 +      data = vzalloc(n_stats * sizeof(u64));
 +      if (n_stats && !data)
                return -ENOMEM;
  
        mutex_lock(&phydev->lock);
        if (copy_to_user(useraddr, &stats, sizeof(stats)))
                goto out;
        useraddr += sizeof(stats);
 -      if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
 +      if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
                goto out;
        ret = 0;
  
   out:
 -      kfree(data);
 +      vfree(data);
        return ret;
  }
  
diff --combined net/ipv4/ip_sockglue.c
index 8a4409dd390aac7fcb88383af1550a2f967dc3df,900011709e3b8e4807daaa6bf537c3871a7d9306..ce1386a67e2434203fdffe126958483ffc69fabd
@@@ -272,7 -272,7 +272,7 @@@ int ip_cmsg_send(struct sock *sk, struc
                        continue;
                switch (cmsg->cmsg_type) {
                case IP_RETOPTS:
 -                      err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
 +                      err = cmsg->cmsg_len - sizeof(struct cmsghdr);
  
                        /* Our caller is responsible for freeing ipc->opt */
                        err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
@@@ -843,7 -843,6 +843,7 @@@ static int do_ip_setsockopt(struct soc
        {
                struct ip_mreqn mreq;
                struct net_device *dev = NULL;
 +              int midx;
  
                if (sk->sk_type == SOCK_STREAM)
                        goto e_inval;
                err = -EADDRNOTAVAIL;
                if (!dev)
                        break;
 +
 +              midx = l3mdev_master_ifindex(dev);
 +
                dev_put(dev);
  
                err = -EINVAL;
                if (sk->sk_bound_dev_if &&
 -                  mreq.imr_ifindex != sk->sk_bound_dev_if)
 +                  mreq.imr_ifindex != sk->sk_bound_dev_if &&
 +                  (!midx || midx != sk->sk_bound_dev_if))
                        break;
  
                inet->mc_index = mreq.imr_ifindex;
@@@ -1243,7 -1238,14 +1243,14 @@@ void ipv4_pktinfo_prepare(const struct 
                pktinfo->ipi_ifindex = 0;
                pktinfo->ipi_spec_dst.s_addr = 0;
        }
-       skb_dst_drop(skb);
+       /* We need to keep the dst for __ip_options_echo()
+        * We could restrict the test to opt.ts_needtime || opt.srr,
+        * but the following is good enough as IP options are not often used.
+        */
+       if (unlikely(IPCB(skb)->opt.optlen))
+               skb_dst_force(skb);
+       else
+               skb_dst_drop(skb);
  }
  
  int ip_setsockopt(struct sock *sk, int level,
diff --combined net/ipv4/tcp.c
index b751abc56935efa4e7e3ca9aa71e478f60b4b3fd,0efb4c7f6704f662b6c762e48698a41564add2a4..d44a6989e76d69aa44e2a26d37b4204376c94966
@@@ -406,6 -406,7 +406,6 @@@ void tcp_init_sock(struct sock *sk
        tp->mss_cache = TCP_MSS_DEFAULT;
  
        tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
 -      tcp_enable_early_retrans(tp);
        tcp_assign_congestion_control(sk);
  
        tp->tsoffset = 0;
        sk->sk_sndbuf = sysctl_tcp_wmem[1];
        sk->sk_rcvbuf = sysctl_tcp_rmem[1];
  
 -      local_bh_disable();
        sk_sockets_allocated_inc(sk);
 -      local_bh_enable();
  }
  EXPORT_SYMBOL(tcp_init_sock);
  
  static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb)
  {
 -      if (tsflags) {
 +      if (tsflags && skb) {
                struct skb_shared_info *shinfo = skb_shinfo(skb);
                struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
  
@@@ -533,12 -536,6 +533,12 @@@ unsigned int tcp_poll(struct file *file
  
                if (tp->urg_data & TCP_URG_VALID)
                        mask |= POLLPRI;
 +      } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
 +              /* Active TCP fastopen socket with defer_connect
 +               * Return POLLOUT so application can call write()
 +               * in order for kernel to generate SYN+data
 +               */
 +              mask |= POLLOUT | POLLWRNORM;
        }
        /* This barrier is coupled with smp_wmb() in tcp_reset() */
        smp_rmb();
@@@ -773,6 -770,12 +773,12 @@@ ssize_t tcp_splice_read(struct socket *
                                ret = -EAGAIN;
                                break;
                        }
+                       /* if __tcp_splice_read() got nothing while we have
+                        * an skb in receive queue, we do not want to loop.
+                        * This might happen with URG data.
+                        */
+                       if (!skb_queue_empty(&sk->sk_receive_queue))
+                               break;
                        sk_wait_data(sk, &timeo, NULL);
                        if (signal_pending(current)) {
                                ret = sock_intr_errno(timeo);
@@@ -961,8 -964,10 +967,8 @@@ new_segment
                copied += copy;
                offset += copy;
                size -= copy;
 -              if (!size) {
 -                      tcp_tx_timestamp(sk, sk->sk_tsflags, skb);
 +              if (!size)
                        goto out;
 -              }
  
                if (skb->len < size_goal || (flags & MSG_OOB))
                        continue;
@@@ -988,11 -993,8 +994,11 @@@ wait_for_memory
        }
  
  out:
 -      if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
 -              tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
 +      if (copied) {
 +              tcp_tx_timestamp(sk, sk->sk_tsflags, tcp_write_queue_tail(sk));
 +              if (!(flags & MSG_SENDPAGE_NOTLAST))
 +                      tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
 +      }
        return copied;
  
  do_error:
@@@ -1077,7 -1079,6 +1083,7 @@@ static int tcp_sendmsg_fastopen(struct 
                                int *copied, size_t size)
  {
        struct tcp_sock *tp = tcp_sk(sk);
 +      struct inet_sock *inet = inet_sk(sk);
        int err, flags;
  
        if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
        tp->fastopen_req->data = msg;
        tp->fastopen_req->size = size;
  
 +      if (inet->defer_connect) {
 +              err = tcp_connect(sk);
 +              /* Same failure procedure as in tcp_v4/6_connect */
 +              if (err) {
 +                      tcp_set_state(sk, TCP_CLOSE);
 +                      inet->inet_dport = 0;
 +                      sk->sk_route_caps = 0;
 +              }
 +      }
        flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
        err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
 -                                  msg->msg_namelen, flags);
 +                                  msg->msg_namelen, flags, 1);
 +      inet->defer_connect = 0;
        *copied = tp->fastopen_req->copied;
        tcp_free_fastopen_req(tp);
        return err;
@@@ -1124,7 -1115,7 +1130,7 @@@ int tcp_sendmsg(struct sock *sk, struc
        lock_sock(sk);
  
        flags = msg->msg_flags;
 -      if (flags & MSG_FASTOPEN) {
 +      if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
                err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
                if (err == -EINPROGRESS && copied_syn > 0)
                        goto out;
@@@ -1296,6 -1287,7 +1302,6 @@@ new_segment
  
                copied += copy;
                if (!msg_data_left(msg)) {
 -                      tcp_tx_timestamp(sk, sockc.tsflags, skb);
                        if (unlikely(flags & MSG_EOR))
                                TCP_SKB_CB(skb)->eor = 1;
                        goto out;
@@@ -1326,10 -1318,8 +1332,10 @@@ wait_for_memory
        }
  
  out:
 -      if (copied)
 +      if (copied) {
 +              tcp_tx_timestamp(sk, sockc.tsflags, tcp_write_queue_tail(sk));
                tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
 +      }
  out_nopush:
        release_sock(sk);
        return copied + copied_syn;
@@@ -2489,6 -2479,11 +2495,6 @@@ static int do_tcp_setsockopt(struct soc
        case TCP_THIN_DUPACK:
                if (val < 0 || val > 1)
                        err = -EINVAL;
 -              else {
 -                      tp->thin_dupack = val;
 -                      if (tp->thin_dupack)
 -                              tcp_disable_early_retrans(tp);
 -              }
                break;
  
        case TCP_REPAIR:
                        err = -EINVAL;
                }
                break;
 +      case TCP_FASTOPEN_CONNECT:
 +              if (val > 1 || val < 0) {
 +                      err = -EINVAL;
 +              } else if (sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
 +                      if (sk->sk_state == TCP_CLOSE)
 +                              tp->fastopen_connect = val;
 +                      else
 +                              err = -EINVAL;
 +              } else {
 +                      err = -EOPNOTSUPP;
 +              }
 +              break;
        case TCP_TIMESTAMP:
                if (!tp->repair)
                        err = -EPERM;
@@@ -2787,9 -2770,6 +2793,9 @@@ void tcp_get_info(struct sock *sk, stru
                info->tcpi_sacked = sk->sk_max_ack_backlog;
                return;
        }
 +
 +      slow = lock_sock_fast(sk);
 +
        info->tcpi_ca_state = icsk->icsk_ca_state;
        info->tcpi_retransmits = icsk->icsk_retransmits;
        info->tcpi_probes = icsk->icsk_probes_out;
  
        info->tcpi_total_retrans = tp->total_retrans;
  
 -      slow = lock_sock_fast(sk);
 -
        info->tcpi_bytes_acked = tp->bytes_acked;
        info->tcpi_bytes_received = tp->bytes_received;
        info->tcpi_notsent_bytes = max_t(int, 0, tp->write_seq - tp->snd_nxt);
        tcp_get_info_chrono_stats(tp, info);
  
 -      unlock_sock_fast(sk, slow);
 -
        info->tcpi_segs_out = tp->segs_out;
        info->tcpi_segs_in = tp->segs_in;
  
                do_div(rate64, intv);
                info->tcpi_delivery_rate = rate64;
        }
 +      unlock_sock_fast(sk, slow);
  }
  EXPORT_SYMBOL_GPL(tcp_get_info);
  
@@@ -2870,7 -2853,7 +2876,7 @@@ struct sk_buff *tcp_get_timestamping_op
        struct sk_buff *stats;
        struct tcp_info info;
  
 -      stats = alloc_skb(3 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
 +      stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
        if (!stats)
                return NULL;
  
                          info.tcpi_rwnd_limited, TCP_NLA_PAD);
        nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
                          info.tcpi_sndbuf_limited, TCP_NLA_PAD);
 +      nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
 +                        tp->data_segs_out, TCP_NLA_PAD);
 +      nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
 +                        tp->total_retrans, TCP_NLA_PAD);
        return stats;
  }
  
@@@ -2994,9 -2973,8 +3000,9 @@@ static int do_tcp_getsockopt(struct soc
        case TCP_THIN_LINEAR_TIMEOUTS:
                val = tp->thin_lto;
                break;
 +
        case TCP_THIN_DUPACK:
 -              val = tp->thin_dupack;
 +              val = 0;
                break;
  
        case TCP_REPAIR:
                val = icsk->icsk_accept_queue.fastopenq.max_qlen;
                break;
  
 +      case TCP_FASTOPEN_CONNECT:
 +              val = tp->fastopen_connect;
 +              break;
 +
        case TCP_TIMESTAMP:
                val = tcp_time_stamp + tp->tsoffset;
                break;
@@@ -3366,7 -3340,6 +3372,7 @@@ void __init tcp_init(void
  
        percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL);
        percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
 +      inet_hashinfo_init(&tcp_hashinfo);
        tcp_hashinfo.bind_bucket_cachep =
                kmem_cache_create("tcp_bind_bucket",
                                  sizeof(struct inet_bind_bucket), 0,
  
  
        cnt = tcp_hashinfo.ehash_mask + 1;
 -
 -      tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
        sysctl_tcp_max_orphans = cnt / 2;
 -      sysctl_max_syn_backlog = max(128, cnt / 256);
  
        tcp_init_mem();
        /* Set per-socket limits to no more than 1/128 the pressure threshold */
        pr_info("Hash tables configured (established %u bind %u)\n",
                tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
  
 +      tcp_v4_init();
        tcp_metrics_init();
        BUG_ON(tcp_register_congestion_control(&tcp_reno) != 0);
        tcp_tasklet_init();
diff --combined net/ipv4/udp.c
index 4a1ba04565d11539d2be1fd6de968e826eb5cbce,8aab7d78d25bc6eaa42dcc960cdbd5086f614cad..ea6e4cff9fafe99af23fd8ea666cd979d5af9104
@@@ -134,21 -134,14 +134,21 @@@ EXPORT_SYMBOL(udp_memory_allocated)
  #define MAX_UDP_PORTS 65536
  #define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
  
 +/* IPCB reference means this can not be used from early demux */
 +static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
 +{
 +#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
 +      if (!net->ipv4.sysctl_udp_l3mdev_accept &&
 +          skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
 +              return true;
 +#endif
 +      return false;
 +}
 +
  static int udp_lib_lport_inuse(struct net *net, __u16 num,
                               const struct udp_hslot *hslot,
                               unsigned long *bitmap,
 -                             struct sock *sk,
 -                             int (*saddr_comp)(const struct sock *sk1,
 -                                               const struct sock *sk2,
 -                                               bool match_wildcard),
 -                             unsigned int log)
 +                             struct sock *sk, unsigned int log)
  {
        struct sock *sk2;
        kuid_t uid = sock_i_uid(sk);
                    (!sk2->sk_reuse || !sk->sk_reuse) &&
                    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
                     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 -                  (!sk2->sk_reuseport || !sk->sk_reuseport ||
 -                   rcu_access_pointer(sk->sk_reuseport_cb) ||
 -                   !uid_eq(uid, sock_i_uid(sk2))) &&
 -                  saddr_comp(sk, sk2, true)) {
 -                      if (!bitmap)
 -                              return 1;
 -                      __set_bit(udp_sk(sk2)->udp_port_hash >> log, bitmap);
 +                  inet_rcv_saddr_equal(sk, sk2, true)) {
 +                      if (sk2->sk_reuseport && sk->sk_reuseport &&
 +                          !rcu_access_pointer(sk->sk_reuseport_cb) &&
 +                          uid_eq(uid, sock_i_uid(sk2))) {
 +                              if (!bitmap)
 +                                      return 0;
 +                      } else {
 +                              if (!bitmap)
 +                                      return 1;
 +                              __set_bit(udp_sk(sk2)->udp_port_hash >> log,
 +                                        bitmap);
 +                      }
                }
        }
        return 0;
   */
  static int udp_lib_lport_inuse2(struct net *net, __u16 num,
                                struct udp_hslot *hslot2,
 -                              struct sock *sk,
 -                              int (*saddr_comp)(const struct sock *sk1,
 -                                                const struct sock *sk2,
 -                                                bool match_wildcard))
 +                              struct sock *sk)
  {
        struct sock *sk2;
        kuid_t uid = sock_i_uid(sk);
                    (!sk2->sk_reuse || !sk->sk_reuse) &&
                    (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
                     sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
 -                  (!sk2->sk_reuseport || !sk->sk_reuseport ||
 -                   rcu_access_pointer(sk->sk_reuseport_cb) ||
 -                   !uid_eq(uid, sock_i_uid(sk2))) &&
 -                  saddr_comp(sk, sk2, true)) {
 -                      res = 1;
 +                  inet_rcv_saddr_equal(sk, sk2, true)) {
 +                      if (sk2->sk_reuseport && sk->sk_reuseport &&
 +                          !rcu_access_pointer(sk->sk_reuseport_cb) &&
 +                          uid_eq(uid, sock_i_uid(sk2))) {
 +                              res = 0;
 +                      } else {
 +                              res = 1;
 +                      }
                        break;
                }
        }
        return res;
  }
  
 -static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
 -                                int (*saddr_same)(const struct sock *sk1,
 -                                                  const struct sock *sk2,
 -                                                  bool match_wildcard))
 +static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
  {
        struct net *net = sock_net(sk);
        kuid_t uid = sock_i_uid(sk);
                    (udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
                    (sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
                    sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
 -                  (*saddr_same)(sk, sk2, false)) {
 +                  inet_rcv_saddr_equal(sk, sk2, false)) {
                        return reuseport_add_sock(sk, sk2);
                }
        }
   *
   *  @sk:          socket struct in question
   *  @snum:        port number to look up
 - *  @saddr_comp:  AF-dependent comparison of bound local IP addresses
   *  @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
   *                   with NULL address
   */
  int udp_lib_get_port(struct sock *sk, unsigned short snum,
 -                   int (*saddr_comp)(const struct sock *sk1,
 -                                     const struct sock *sk2,
 -                                     bool match_wildcard),
                     unsigned int hash2_nulladdr)
  {
        struct udp_hslot *hslot, *hslot2;
                        bitmap_zero(bitmap, PORTS_PER_CHAIN);
                        spin_lock_bh(&hslot->lock);
                        udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
 -                                          saddr_comp, udptable->log);
 +                                          udptable->log);
  
                        snum = first;
                        /*
                                snum += rand;
                        } while (snum != first);
                        spin_unlock_bh(&hslot->lock);
 +                      cond_resched();
                } while (++first != last);
                goto fail;
        } else {
                        if (hslot->count < hslot2->count)
                                goto scan_primary_hash;
  
 -                      exist = udp_lib_lport_inuse2(net, snum, hslot2,
 -                                                   sk, saddr_comp);
 +                      exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
                        if (!exist && (hash2_nulladdr != slot2)) {
                                hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
                                exist = udp_lib_lport_inuse2(net, snum, hslot2,
 -                                                           sk, saddr_comp);
 +                                                           sk);
                        }
                        if (exist)
                                goto fail_unlock;
                                goto found;
                }
  scan_primary_hash:
 -              if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
 -                                      saddr_comp, 0))
 +              if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
                        goto fail_unlock;
        }
  found:
        udp_sk(sk)->udp_portaddr_hash ^= snum;
        if (sk_unhashed(sk)) {
                if (sk->sk_reuseport &&
 -                  udp_reuseport_add_sock(sk, hslot, saddr_comp)) {
 +                  udp_reuseport_add_sock(sk, hslot)) {
                        inet_sk(sk)->inet_num = 0;
                        udp_sk(sk)->udp_port_hash = 0;
                        udp_sk(sk)->udp_portaddr_hash ^= snum;
@@@ -360,6 -356,24 +360,6 @@@ fail
  }
  EXPORT_SYMBOL(udp_lib_get_port);
  
 -/* match_wildcard == true:  0.0.0.0 equals to any IPv4 addresses
 - * match_wildcard == false: addresses must be exactly the same, i.e.
 - *                          0.0.0.0 only equals to 0.0.0.0
 - */
 -int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2,
 -                       bool match_wildcard)
 -{
 -      struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
 -
 -      if (!ipv6_only_sock(sk2)) {
 -              if (inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)
 -                      return 1;
 -              if (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr)
 -                      return match_wildcard;
 -      }
 -      return 0;
 -}
 -
  static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
                              unsigned int port)
  {
@@@ -375,13 -389,12 +375,13 @@@ int udp_v4_get_port(struct sock *sk, un
  
        /* precompute partial secondary hash */
        udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 -      return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
 +      return udp_lib_get_port(sk, snum, hash2_nulladdr);
  }
  
  static int compute_score(struct sock *sk, struct net *net,
                         __be32 saddr, __be16 sport,
 -                       __be32 daddr, unsigned short hnum, int dif)
 +                       __be32 daddr, unsigned short hnum, int dif,
 +                       bool exact_dif)
  {
        int score;
        struct inet_sock *inet;
                score += 4;
        }
  
 -      if (sk->sk_bound_dev_if) {
 +      if (sk->sk_bound_dev_if || exact_dif) {
                if (sk->sk_bound_dev_if != dif)
                        return -1;
                score += 4;
@@@ -437,7 -450,7 +437,7 @@@ static u32 udp_ehashfn(const struct ne
  /* called with rcu_read_lock() */
  static struct sock *udp4_lib_lookup2(struct net *net,
                __be32 saddr, __be16 sport,
 -              __be32 daddr, unsigned int hnum, int dif,
 +              __be32 daddr, unsigned int hnum, int dif, bool exact_dif,
                struct udp_hslot *hslot2,
                struct sk_buff *skb)
  {
        badness = 0;
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
                score = compute_score(sk, net, saddr, sport,
 -                                    daddr, hnum, dif);
 +                                    daddr, hnum, dif, exact_dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@@ -484,7 -497,6 +484,7 @@@ struct sock *__udp4_lib_lookup(struct n
        unsigned short hnum = ntohs(dport);
        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 +      bool exact_dif = udp_lib_exact_dif_match(net, skb);
        int score, badness, matches = 0, reuseport = 0;
        u32 hash = 0;
  
  
                result = udp4_lib_lookup2(net, saddr, sport,
                                          daddr, hnum, dif,
 -                                        hslot2, skb);
 +                                        exact_dif, hslot2, skb);
                if (!result) {
                        unsigned int old_slot2 = slot2;
                        hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
  
                        result = udp4_lib_lookup2(net, saddr, sport,
                                                  daddr, hnum, dif,
 -                                                hslot2, skb);
 +                                                exact_dif, hslot2, skb);
                }
                return result;
        }
@@@ -521,7 -533,7 +521,7 @@@ begin
        badness = 0;
        sk_for_each_rcu(sk, &hslot->head) {
                score = compute_score(sk, net, saddr, sport,
 -                                    daddr, hnum, dif);
 +                                    daddr, hnum, dif, exact_dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@@ -1101,8 -1113,7 +1101,8 @@@ out
        return err;
  
  do_confirm:
 -      dst_confirm(&rt->dst);
 +      if (msg->msg_flags & MSG_PROBE)
 +              dst_confirm_neigh(&rt->dst, &fl4->daddr);
        if (!(msg->msg_flags&MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
@@@ -1490,7 -1501,7 +1490,7 @@@ try_again
        return err;
  
  csum_copy_err:
-       if (!__sk_queue_drop_skb(sk, skb, flags)) {
+       if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
                UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
                UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
        }
diff --combined net/ipv6/addrconf.c
index 156ed578d3c09547d031963b268bb284d593824e,81f7b4ea42813b02cd5aadb1613f2c675969f45f..a69ae7d4e6f8178a3d476c897a459671559ed85a
@@@ -243,7 -243,6 +243,7 @@@ static struct ipv6_devconf ipv6_devcon
        .seg6_require_hmac      = 0,
  #endif
        .enhanced_dad           = 1,
 +      .addr_gen_mode          = IN6_ADDR_GEN_MODE_EUI64,
  };
  
  static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .seg6_require_hmac      = 0,
  #endif
        .enhanced_dad           = 1,
 +      .addr_gen_mode          = IN6_ADDR_GEN_MODE_EUI64,
  };
  
  /* Check if a valid qdisc is available */
@@@ -388,9 -386,9 +388,9 @@@ static struct inet6_dev *ipv6_add_dev(s
        memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
  
        if (ndev->cnf.stable_secret.initialized)
 -              ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
 +              ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
        else
 -              ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
 +              ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
  
        ndev->cnf.mtu6 = dev->mtu;
        ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@@ -2146,14 -2144,12 +2146,14 @@@ static int ipv6_generate_eui64(u8 *eui
        case ARPHRD_SIT:
                return addrconf_ifid_sit(eui, dev);
        case ARPHRD_IPGRE:
 +      case ARPHRD_TUNNEL:
                return addrconf_ifid_gre(eui, dev);
        case ARPHRD_6LOWPAN:
                return addrconf_ifid_eui64(eui, dev);
        case ARPHRD_IEEE1394:
                return addrconf_ifid_ieee1394(eui, dev);
        case ARPHRD_TUNNEL6:
 +      case ARPHRD_IP6GRE:
                return addrconf_ifid_ip6tnl(eui, dev);
        }
        return -1;
@@@ -2391,8 -2387,8 +2391,8 @@@ static void manage_tempaddrs(struct ine
  
  static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
  {
 -      return idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
 -             idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
 +      return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
 +             idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
  }
  
  int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
@@@ -3156,7 -3152,7 +3156,7 @@@ static void addrconf_addr_gen(struct in
  
        ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
  
 -      switch (idev->addr_gen_mode) {
 +      switch (idev->cnf.addr_gen_mode) {
        case IN6_ADDR_GEN_MODE_RANDOM:
                ipv6_gen_mode_random_init(idev);
                /* fallthrough */
@@@ -3197,9 -3193,6 +3197,9 @@@ static void addrconf_dev_config(struct 
            (dev->type != ARPHRD_IEEE1394) &&
            (dev->type != ARPHRD_TUNNEL6) &&
            (dev->type != ARPHRD_6LOWPAN) &&
 +          (dev->type != ARPHRD_IP6GRE) &&
 +          (dev->type != ARPHRD_IPGRE) &&
 +          (dev->type != ARPHRD_TUNNEL) &&
            (dev->type != ARPHRD_NONE)) {
                /* Alas, we support only Ethernet autoconfiguration. */
                return;
  
        /* this device type has no EUI support */
        if (dev->type == ARPHRD_NONE &&
 -          idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
 -              idev->addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
 +          idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
 +              idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
  
        addrconf_addr_gen(idev, false);
  }
@@@ -3393,9 -3386,15 +3393,15 @@@ static int addrconf_notify(struct notif
                        }
  
                        if (idev) {
-                               if (idev->if_flags & IF_READY)
-                                       /* device is already configured. */
+                               if (idev->if_flags & IF_READY) {
+                                       /* device is already configured -
+                                        * but resend MLD reports, we might
+                                        * have roamed and need to update
+                                        * multicast snooping switches
+                                        */
+                                       ipv6_mc_up(idev);
                                        break;
+                               }
                                idev->if_flags |= IF_READY;
                        }
  
@@@ -4895,13 -4894,6 +4901,13 @@@ static void inet6_ifa_notify(int event
        struct net *net = dev_net(ifa->idev->dev);
        int err = -ENOBUFS;
  
 +      /* Don't send DELADDR notification for TENTATIVE address,
 +       * since NEWADDR notification is sent only after removing
 +       * TENTATIVE flag.
 +       */
 +      if (ifa->flags & IFA_F_TENTATIVE && event == RTM_DELADDR)
 +              return;
 +
        skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
        if (!skb)
                goto errout;
@@@ -4989,7 -4981,6 +4995,7 @@@ static inline void ipv6_store_devconf(s
        array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
  #endif
        array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
 +      array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
  }
  
  static inline size_t inet6_ifla6_size(void)
@@@ -5101,7 -5092,7 +5107,7 @@@ static int inet6_fill_ifla6_attrs(struc
        if (!nla)
                goto nla_put_failure;
  
 -      if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
 +      if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
                goto nla_put_failure;
  
        read_lock_bh(&idev->lock);
@@@ -5219,26 -5210,6 +5225,26 @@@ static int inet6_validate_link_af(cons
        return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
  }
  
 +static int check_addr_gen_mode(int mode)
 +{
 +      if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
 +          mode != IN6_ADDR_GEN_MODE_NONE &&
 +          mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
 +          mode != IN6_ADDR_GEN_MODE_RANDOM)
 +              return -EINVAL;
 +      return 1;
 +}
 +
 +static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
 +                              int mode)
 +{
 +      if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
 +          !idev->cnf.stable_secret.initialized &&
 +          !net->ipv6.devconf_dflt->stable_secret.initialized)
 +              return -EINVAL;
 +      return 1;
 +}
 +
  static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
  {
        int err = -EINVAL;
        if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
                u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
  
 -              if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
 -                  mode != IN6_ADDR_GEN_MODE_NONE &&
 -                  mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
 -                  mode != IN6_ADDR_GEN_MODE_RANDOM)
 -                      return -EINVAL;
 -
 -              if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
 -                  !idev->cnf.stable_secret.initialized &&
 -                  !dev_net(dev)->ipv6.devconf_dflt->stable_secret.initialized)
 +              if (check_addr_gen_mode(mode) < 0 ||
 +                  check_stable_privacy(idev, dev_net(dev), mode) < 0)
                        return -EINVAL;
  
 -              idev->addr_gen_mode = mode;
 +              idev->cnf.addr_gen_mode = mode;
                err = 0;
        }
  
@@@ -5671,47 -5649,6 +5677,47 @@@ int addrconf_sysctl_proxy_ndp(struct ct
        return ret;
  }
  
 +static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
 +                                       void __user *buffer, size_t *lenp,
 +                                       loff_t *ppos)
 +{
 +      int ret = 0;
 +      int new_val;
 +      struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
 +      struct net *net = (struct net *)ctl->extra2;
 +
 +      ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
 +
 +      if (write) {
 +              new_val = *((int *)ctl->data);
 +
 +              if (check_addr_gen_mode(new_val) < 0)
 +                      return -EINVAL;
 +
 +              /* request for default */
 +              if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
 +                      ipv6_devconf_dflt.addr_gen_mode = new_val;
 +
 +              /* request for individual net device */
 +              } else {
 +                      if (!idev)
 +                              return ret;
 +
 +                      if (check_stable_privacy(idev, net, new_val) < 0)
 +                              return -EINVAL;
 +
 +                      if (idev->cnf.addr_gen_mode != new_val) {
 +                              idev->cnf.addr_gen_mode = new_val;
 +                              rtnl_lock();
 +                              addrconf_dev_config(idev->dev);
 +                              rtnl_unlock();
 +                      }
 +              }
 +      }
 +
 +      return ret;
 +}
 +
  static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
                                         void __user *buffer, size_t *lenp,
                                         loff_t *ppos)
                        struct inet6_dev *idev = __in6_dev_get(dev);
  
                        if (idev) {
 -                              idev->addr_gen_mode =
 +                              idev->cnf.addr_gen_mode =
                                        IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
                        }
                }
        } else {
                struct inet6_dev *idev = ctl->extra1;
  
 -              idev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
 +              idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
        }
  
  out:
@@@ -6156,13 -6093,6 +6162,13 @@@ static const struct ctl_table addrconf_
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
 +      {
 +              .procname               = "addr_gen_mode",
 +              .data                   = &ipv6_devconf.addr_gen_mode,
 +              .maxlen                 = sizeof(int),
 +              .mode                   = 0644,
 +              .proc_handler   = addrconf_sysctl_addr_gen_mode,
 +      },
        {
                /* sentinel */
        }
diff --combined net/ipv6/ip6_gre.c
index 51b9835b3176ac2d05639fd318af89788655dd69,630b73be599977599c0021849fc6eb689cfefad7..6fcb7cb49bb20dcfe518177177e3e0ac1c7d1091
@@@ -367,35 -367,37 +367,37 @@@ static void ip6gre_tunnel_uninit(struc
  
  
  static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
-               u8 type, u8 code, int offset, __be32 info)
+                      u8 type, u8 code, int offset, __be32 info)
  {
-       const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
-       __be16 *p = (__be16 *)(skb->data + offset);
-       int grehlen = offset + 4;
+       const struct gre_base_hdr *greh;
+       const struct ipv6hdr *ipv6h;
+       int grehlen = sizeof(*greh);
        struct ip6_tnl *t;
+       int key_off = 0;
        __be16 flags;
+       __be32 key;
  
-       flags = p[0];
-       if (flags&(GRE_CSUM|GRE_KEY|GRE_SEQ|GRE_ROUTING|GRE_VERSION)) {
-               if (flags&(GRE_VERSION|GRE_ROUTING))
-                       return;
-               if (flags&GRE_KEY) {
-                       grehlen += 4;
-                       if (flags&GRE_CSUM)
-                               grehlen += 4;
-               }
+       if (!pskb_may_pull(skb, offset + grehlen))
+               return;
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       flags = greh->flags;
+       if (flags & (GRE_VERSION | GRE_ROUTING))
+               return;
+       if (flags & GRE_CSUM)
+               grehlen += 4;
+       if (flags & GRE_KEY) {
+               key_off = grehlen + offset;
+               grehlen += 4;
        }
  
-       /* If only 8 bytes returned, keyed message will be dropped here */
-       if (!pskb_may_pull(skb, grehlen))
+       if (!pskb_may_pull(skb, offset + grehlen))
                return;
        ipv6h = (const struct ipv6hdr *)skb->data;
-       p = (__be16 *)(skb->data + offset);
+       greh = (const struct gre_base_hdr *)(skb->data + offset);
+       key = key_off ? *(__be32 *)(skb->data + key_off) : 0;
  
        t = ip6gre_tunnel_lookup(skb->dev, &ipv6h->daddr, &ipv6h->saddr,
-                               flags & GRE_KEY ?
-                               *(((__be32 *)p) + (grehlen / 4) - 1) : 0,
-                               p[1]);
+                                key, greh->protocol);
        if (!t)
                return;
  
@@@ -484,6 -486,11 +486,6 @@@ drop
        return 0;
  }
  
 -struct ipv6_tel_txoption {
 -      struct ipv6_txoptions ops;
 -      __u8 dst_opt[8];
 -};
 -
  static int gre_handle_offloads(struct sk_buff *skb, bool csum)
  {
        return iptunnel_handle_offloads(skb,
@@@ -996,9 -1003,6 +998,9 @@@ static void ip6gre_tunnel_setup(struct 
        dev->flags |= IFF_NOARP;
        dev->addr_len = sizeof(struct in6_addr);
        netif_keep_dst(dev);
 +      /* This perm addr will be used as interface identifier by IPv6 */
 +      dev->addr_assign_type = NET_ADDR_RANDOM;
 +      eth_random_addr(dev->perm_addr);
  }
  
  static int ip6gre_tunnel_init_common(struct net_device *dev)
diff --combined net/ipv6/seg6_hmac.c
index b274f1d95e037c93e8e0e6531cfc51709f71297c,6ef3dfb6e811642f1fc9b680e0b255a9399bb024..f950cb53d5e3c9b460bff5f72f108af4f1b2d29a
@@@ -45,7 -45,7 +45,7 @@@
  #include <net/seg6_hmac.h>
  #include <linux/random.h>
  
 -static char * __percpu *hmac_ring;
 +static DEFINE_PER_CPU(char [SEG6_HMAC_RING_SIZE], hmac_ring);
  
  static int seg6_hmac_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
  {
@@@ -174,7 -174,7 +174,7 @@@ int seg6_hmac_compute(struct seg6_hmac_
         * hash function (RadioGatun) with up to 1216 bits
         */
  
-       /* saddr(16) + first_seg(1) + cleanup(1) + keyid(4) + seglist(16n) */
+       /* saddr(16) + first_seg(1) + flags(1) + keyid(4) + seglist(16n) */
        plen = 16 + 1 + 1 + 4 + (hdr->first_segment + 1) * 16;
  
        /* this limit allows for 14 segments */
         *
         * 1. Source IPv6 address (128 bits)
         * 2. first_segment value (8 bits)
-        * 3. cleanup flag (8 bits: highest bit is cleanup value, others are 0)
+        * 3. Flags (8 bits)
         * 4. HMAC Key ID (32 bits)
         * 5. All segments in the segments list (n * 128 bits)
         */
  
        local_bh_disable();
 -      ring = *this_cpu_ptr(hmac_ring);
 +      ring = this_cpu_ptr(hmac_ring);
        off = ring;
  
        /* source address */
        /* first_segment value */
        *off++ = hdr->first_segment;
  
-       /* cleanup flag */
-       *off++ = !!(sr_has_cleanup(hdr)) << 7;
+       /* flags */
+       *off++ = hdr->flags;
  
        /* HMAC Key ID */
        memcpy(off, &hmackeyid, 4);
@@@ -353,6 -353,27 +353,6 @@@ out
  }
  EXPORT_SYMBOL(seg6_push_hmac);
  
 -static int seg6_hmac_init_ring(void)
 -{
 -      int i;
 -
 -      hmac_ring = alloc_percpu(char *);
 -
 -      if (!hmac_ring)
 -              return -ENOMEM;
 -
 -      for_each_possible_cpu(i) {
 -              char *ring = kzalloc(SEG6_HMAC_RING_SIZE, GFP_KERNEL);
 -
 -              if (!ring)
 -                      return -ENOMEM;
 -
 -              *per_cpu_ptr(hmac_ring, i) = ring;
 -      }
 -
 -      return 0;
 -}
 -
  static int seg6_hmac_init_algo(void)
  {
        struct seg6_hmac_algo *algo;
                        return -ENOMEM;
  
                for_each_possible_cpu(cpu) {
 -                      shash = kzalloc(shsize, GFP_KERNEL);
 +                      shash = kzalloc_node(shsize, GFP_KERNEL,
 +                                           cpu_to_node(cpu));
                        if (!shash)
                                return -ENOMEM;
                        *per_cpu_ptr(algo->shashs, cpu) = shash;
  
  int __init seg6_hmac_init(void)
  {
 -      int ret;
 -
 -      ret = seg6_hmac_init_ring();
 -      if (ret < 0)
 -              goto out;
 -
 -      ret = seg6_hmac_init_algo();
 -
 -out:
 -      return ret;
 +      return seg6_hmac_init_algo();
  }
  EXPORT_SYMBOL(seg6_hmac_init);
  
@@@ -421,6 -450,13 +421,6 @@@ void seg6_hmac_exit(void
        struct seg6_hmac_algo *algo = NULL;
        int i, alg_count, cpu;
  
 -      for_each_possible_cpu(i) {
 -              char *ring = *per_cpu_ptr(hmac_ring, i);
 -
 -              kfree(ring);
 -      }
 -      free_percpu(hmac_ring);
 -
        alg_count = sizeof(hmac_algos) / sizeof(struct seg6_hmac_algo);
        for (i = 0; i < alg_count; i++) {
                algo = &hmac_algos[i];
diff --combined net/ipv6/tcp_ipv6.c
index 6b9fc63fd4d23f17a6716b3557098e09f4f1d439,eaad72c3d7462b4af09d632fe88466148964e679..b5d27212db2fe6d2147719c18850263cd9ecb611
@@@ -123,7 -123,6 +123,7 @@@ static int tcp_v6_connect(struct sock *
        struct dst_entry *dst;
        int addr_type;
        int err;
 +      struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
  
        if (addr_len < SIN6_LEN_RFC2133)
                return -EINVAL;
        sk->sk_gso_type = SKB_GSO_TCPV6;
        ip6_dst_store(sk, dst, NULL, NULL);
  
 -      if (tcp_death_row.sysctl_tw_recycle &&
 +      if (tcp_death_row->sysctl_tw_recycle &&
            !tp->rx_opt.ts_recent_stamp &&
            ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr))
                tcp_fetch_timewait_stamp(sk, dst);
        inet->inet_dport = usin->sin6_port;
  
        tcp_set_state(sk, TCP_SYN_SENT);
 -      err = inet6_hash_connect(&tcp_death_row, sk);
 +      err = inet6_hash_connect(tcp_death_row, sk);
        if (err)
                goto late_failure;
  
                                                             inet->inet_dport,
                                                             &tp->tsoffset);
  
 +      if (tcp_fastopen_defer_connect(sk, &err))
 +              return err;
 +      if (err)
 +              goto late_failure;
 +
        err = tcp_connect(sk);
        if (err)
                goto late_failure;
  
  late_failure:
        tcp_set_state(sk, TCP_CLOSE);
 -      __sk_dst_reset(sk);
  failure:
        inet->inet_dport = 0;
        sk->sk_route_caps = 0;
@@@ -996,6 -991,16 +996,16 @@@ drop
        return 0; /* don't send reset */
  }
  
+ static void tcp_v6_restore_cb(struct sk_buff *skb)
+ {
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+ }
  static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
                                         struct request_sock *req,
                                         struct dst_entry *dst,
        tcp_ca_openreq_child(newsk, dst);
  
        tcp_sync_mss(newsk, dst_mtu(dst));
 -      newtp->advmss = dst_metric_advmss(dst);
 -      if (tcp_sk(sk)->rx_opt.user_mss &&
 -          tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
 -              newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
 +      newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
  
        tcp_initialize_rcv_mss(newsk);
  
                                                      sk_gfp_mask(sk, GFP_ATOMIC));
                        consume_skb(ireq->pktopts);
                        ireq->pktopts = NULL;
-                       if (newnp->pktoptions)
+                       if (newnp->pktoptions) {
+                               tcp_v6_restore_cb(newnp->pktoptions);
                                skb_set_owner_r(newnp->pktoptions, newsk);
+                       }
                }
        }
  
@@@ -1200,16 -1210,6 +1212,6 @@@ out
        return NULL;
  }
  
- static void tcp_v6_restore_cb(struct sk_buff *skb)
- {
-       /* We need to move header back to the beginning if xfrm6_policy_check()
-        * and tcp_v6_fill_cb() are going to be called again.
-        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
-        */
-       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-               sizeof(struct inet6_skb_parm));
- }
  /* The socket must have it's spinlock held when we get
   * here, unless it is a TCP_LISTEN socket.
   *
@@@ -1622,6 -1622,7 +1624,6 @@@ static const struct inet_connection_soc
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
 -      .bind_conflict     = inet6_csk_bind_conflict,
  #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
@@@ -1652,6 -1653,7 +1654,6 @@@ static const struct inet_connection_soc
        .getsockopt        = ipv6_getsockopt,
        .addr2sockaddr     = inet6_csk_addr2sockaddr,
        .sockaddr_len      = sizeof(struct sockaddr_in6),
 -      .bind_conflict     = inet6_csk_bind_conflict,
  #ifdef CONFIG_COMPAT
        .compat_setsockopt = compat_ipv6_setsockopt,
        .compat_getsockopt = compat_ipv6_getsockopt,
@@@ -1744,7 -1746,7 +1746,7 @@@ static void get_tcp6_sock(struct seq_fi
        srcp  = ntohs(inet->inet_sport);
  
        if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
 -          icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
 +          icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
            icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
                timer_active    = 1;
                timer_expires   = icsk->icsk_timeout;
@@@ -1888,7 -1890,6 +1890,7 @@@ struct proto tcpv6_prot = 
        .shutdown               = tcp_shutdown,
        .setsockopt             = tcp_setsockopt,
        .getsockopt             = tcp_getsockopt,
 +      .keepalive              = tcp_set_keepalive,
        .recvmsg                = tcp_recvmsg,
        .sendmsg                = tcp_sendmsg,
        .sendpage               = tcp_sendpage,
@@@ -1949,7 -1950,7 +1951,7 @@@ static void __net_exit tcpv6_net_exit(s
  
  static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
  {
 -      inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6);
 +      inet_twsk_purge(&tcp_hashinfo, AF_INET6);
  }
  
  static struct pernet_operations tcpv6_net_ops = {
diff --combined net/ipv6/udp.c
index 51346fa70298a0864e3997a6c94845a5707fa305,8990856f5101eaabaf14d4017df522f37845083b..df71ba05f41d646d461d2b67c5ad2909ac2a74c7
  #include <trace/events/skb.h>
  #include "udp_impl.h"
  
 +static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
 +{
 +#if defined(CONFIG_NET_L3_MASTER_DEV)
 +      if (!net->ipv4.sysctl_udp_l3mdev_accept &&
 +          skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
 +              return true;
 +#endif
 +      return false;
 +}
 +
  static u32 udp6_ehashfn(const struct net *net,
                        const struct in6_addr *laddr,
                        const u16 lport,
@@@ -113,7 -103,7 +113,7 @@@ int udp_v6_get_port(struct sock *sk, un
  
        /* precompute partial secondary hash */
        udp_sk(sk)->udp_portaddr_hash = hash2_partial;
 -      return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
 +      return udp_lib_get_port(sk, snum, hash2_nulladdr);
  }
  
  static void udp_v6_rehash(struct sock *sk)
  static int compute_score(struct sock *sk, struct net *net,
                         const struct in6_addr *saddr, __be16 sport,
                         const struct in6_addr *daddr, unsigned short hnum,
 -                       int dif)
 +                       int dif, bool exact_dif)
  {
        int score;
        struct inet_sock *inet;
                score++;
        }
  
 -      if (sk->sk_bound_dev_if) {
 +      if (sk->sk_bound_dev_if || exact_dif) {
                if (sk->sk_bound_dev_if != dif)
                        return -1;
                score++;
  static struct sock *udp6_lib_lookup2(struct net *net,
                const struct in6_addr *saddr, __be16 sport,
                const struct in6_addr *daddr, unsigned int hnum, int dif,
 -              struct udp_hslot *hslot2,
 +              bool exact_dif, struct udp_hslot *hslot2,
                struct sk_buff *skb)
  {
        struct sock *sk, *result;
        badness = -1;
        udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
                score = compute_score(sk, net, saddr, sport,
 -                                    daddr, hnum, dif);
 +                                    daddr, hnum, dif, exact_dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@@ -222,7 -212,6 +222,7 @@@ struct sock *__udp6_lib_lookup(struct n
        unsigned short hnum = ntohs(dport);
        unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
        struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
 +      bool exact_dif = udp6_lib_exact_dif_match(net, skb);
        int score, badness, matches = 0, reuseport = 0;
        u32 hash = 0;
  
                        goto begin;
  
                result = udp6_lib_lookup2(net, saddr, sport,
 -                                        daddr, hnum, dif,
 +                                        daddr, hnum, dif, exact_dif,
                                          hslot2, skb);
                if (!result) {
                        unsigned int old_slot2 = slot2;
  
                        result = udp6_lib_lookup2(net, saddr, sport,
                                                  daddr, hnum, dif,
 -                                                hslot2, skb);
 +                                                exact_dif, hslot2,
 +                                                skb);
                }
                return result;
        }
@@@ -259,8 -247,7 +259,8 @@@ begin
        result = NULL;
        badness = -1;
        sk_for_each_rcu(sk, &hslot->head) {
 -              score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
 +              score = compute_score(sk, net, saddr, sport, daddr, hnum, dif,
 +                                    exact_dif);
                if (score > badness) {
                        reuseport = sk->sk_reuseport;
                        if (reuseport) {
@@@ -454,7 -441,7 +454,7 @@@ try_again
        return err;
  
  csum_copy_err:
-       if (!__sk_queue_drop_skb(sk, skb, flags)) {
+       if (!__sk_queue_drop_skb(sk, skb, flags, udp_skb_destructor)) {
                if (is_udp4) {
                        UDP_INC_STATS(sock_net(sk),
                                      UDP_MIB_CSUMERRORS, is_udplite);
@@@ -1308,8 -1295,7 +1308,8 @@@ out
        return err;
  
  do_confirm:
 -      dst_confirm(dst);
 +      if (msg->msg_flags & MSG_PROBE)
 +              dst_confirm_neigh(dst, &fl6.daddr);
        if (!(msg->msg_flags&MSG_PROBE) || len)
                goto back_from_confirm;
        err = 0;
diff --combined net/mac80211/mesh.c
index 9c23172feba07abd63d7fdf2ab1530cbca28b8b9,50e1b7f78bd49605d2dbca4c215befecc1d8d001..c28b0af9c1f21735915433aecd632165a4a82580
@@@ -279,6 -279,10 +279,6 @@@ int mesh_add_meshconf_ie(struct ieee802
        /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
        *pos |= ifmsh->ps_peers_deep_sleep ?
                        IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
 -      *pos++ |= ifmsh->adjusting_tbtt ?
 -                      IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
 -      *pos++ = 0x00;
 -
        return 0;
  }
  
@@@ -335,7 -339,7 +335,7 @@@ int mesh_add_vendor_ies(struct ieee8021
        /* fast-forward to vendor IEs */
        offset = ieee80211_ie_split_vendor(ifmsh->ie, ifmsh->ie_len, 0);
  
-       if (offset) {
+       if (offset < ifmsh->ie_len) {
                len = ifmsh->ie_len - offset;
                data = ifmsh->ie + offset;
                if (skb_tailroom(skb) < len)
@@@ -846,6 -850,7 +846,6 @@@ int ieee80211_start_mesh(struct ieee802
        ifmsh->mesh_cc_id = 0;  /* Disabled */
        /* register sync ops from extensible synchronization framework */
        ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
 -      ifmsh->adjusting_tbtt = false;
        ifmsh->sync_offset_clockdrift_max = 0;
        set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
        ieee80211_mesh_root_setup(ifmsh);
@@@ -1344,7 -1349,7 +1344,7 @@@ void ieee80211_mesh_work(struct ieee802
                ieee80211_mesh_rootpath(sdata);
  
        if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
 -              mesh_sync_adjust_tbtt(sdata);
 +              mesh_sync_adjust_tsf(sdata);
  
        if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags))
                mesh_bss_info_changed(sdata);
diff --combined net/sctp/socket.c
index a4609a0be76dd46ec7d1d9eabe5d72b8c1b841b3,1b5d669e30292a57ed57dd920d81be2a57f97b22..a8b4252fe0842882fa4a067a25204689cd1b12cd
@@@ -239,7 -239,7 +239,7 @@@ static struct sctp_transport *sctp_addr
        union sctp_addr *laddr = (union sctp_addr *)addr;
        struct sctp_transport *transport;
  
-       if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+       if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
                return NULL;
  
        addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
@@@ -364,7 -364,7 +364,7 @@@ static int sctp_do_bind(struct sock *sk
                }
        }
  
 -      if (snum && snum < PROT_SOCK &&
 +      if (snum && snum < inet_prot_sock(net) &&
            !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
                return -EACCES;
  
@@@ -592,7 -592,7 +592,7 @@@ static int sctp_send_asconf_add_ip(stru
                        list_for_each_entry(trans,
                            &asoc->peer.transport_addr_list, transports) {
                                /* Clear the source and route cache */
 -                              dst_release(trans->dst);
 +                              sctp_transport_dst_release(trans);
                                trans->cwnd = min(4*asoc->pathmtu, max_t(__u32,
                                    2*asoc->pathmtu, 4380));
                                trans->ssthresh = asoc->peer.i.a_rwnd;
@@@ -843,7 -843,7 +843,7 @@@ skip_mkasconf
                 */
                list_for_each_entry(transport, &asoc->peer.transport_addr_list,
                                        transports) {
 -                      dst_release(transport->dst);
 +                      sctp_transport_dst_release(transport);
                        sctp_transport_route(transport, NULL,
                                             sctp_sk(asoc->base.sk));
                }
@@@ -1156,10 -1156,8 +1156,10 @@@ static int __sctp_connect(struct sock *
                                 * accept new associations, but it SHOULD NOT
                                 * be permitted to open new associations.
                                 */
 -                              if (ep->base.bind_addr.port < PROT_SOCK &&
 -                                  !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
 +                              if (ep->base.bind_addr.port <
 +                                  inet_prot_sock(net) &&
 +                                  !ns_capable(net->user_ns,
 +                                  CAP_NET_BIND_SERVICE)) {
                                        err = -EACCES;
                                        goto out_free;
                                }
@@@ -1824,7 -1822,7 +1824,7 @@@ static int sctp_sendmsg(struct sock *sk
                         * but it SHOULD NOT be permitted to open new
                         * associations.
                         */
 -                      if (ep->base.bind_addr.port < PROT_SOCK &&
 +                      if (ep->base.bind_addr.port < inet_prot_sock(net) &&
                            !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
                                err = -EACCES;
                                goto out_unlock;
@@@ -2436,6 -2434,7 +2436,6 @@@ static int sctp_apply_peer_addr_params(
                        sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
                } else if (asoc) {
                        asoc->pathmtu = params->spp_pathmtu;
 -                      sctp_frag_point(asoc, params->spp_pathmtu);
                } else {
                        sp->pathmtu = params->spp_pathmtu;
                }
        return retval;
  }
  
 +static int sctp_setsockopt_enable_strreset(struct sock *sk,
 +                                         char __user *optval,
 +                                         unsigned int optlen)
 +{
 +      struct sctp_assoc_value params;
 +      struct sctp_association *asoc;
 +      int retval = -EINVAL;
 +
 +      if (optlen != sizeof(params))
 +              goto out;
 +
 +      if (copy_from_user(&params, optval, optlen)) {
 +              retval = -EFAULT;
 +              goto out;
 +      }
 +
 +      if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
 +              goto out;
 +
 +      asoc = sctp_id2assoc(sk, params.assoc_id);
 +      if (asoc) {
 +              asoc->strreset_enable = params.assoc_value;
 +      } else if (!params.assoc_id) {
 +              struct sctp_sock *sp = sctp_sk(sk);
 +
 +              sp->ep->strreset_enable = params.assoc_value;
 +      } else {
 +              goto out;
 +      }
 +
 +      retval = 0;
 +
 +out:
 +      return retval;
 +}
 +
 +static int sctp_setsockopt_reset_streams(struct sock *sk,
 +                                       char __user *optval,
 +                                       unsigned int optlen)
 +{
 +      struct sctp_reset_streams *params;
 +      struct sctp_association *asoc;
 +      int retval = -EINVAL;
 +
 +      if (optlen < sizeof(struct sctp_reset_streams))
 +              return -EINVAL;
 +
 +      params = memdup_user(optval, optlen);
 +      if (IS_ERR(params))
 +              return PTR_ERR(params);
 +
 +      asoc = sctp_id2assoc(sk, params->srs_assoc_id);
 +      if (!asoc)
 +              goto out;
 +
 +      retval = sctp_send_reset_streams(asoc, params);
 +
 +out:
 +      kfree(params);
 +      return retval;
 +}
 +
  /* API 6.2 setsockopt(), getsockopt()
   *
   * Applications use setsockopt() and getsockopt() to set or retrieve
@@@ -3984,12 -3921,6 +3984,12 @@@ static int sctp_setsockopt(struct sock 
        case SCTP_DEFAULT_PRINFO:
                retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
                break;
 +      case SCTP_ENABLE_STREAM_RESET:
 +              retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
 +              break;
 +      case SCTP_RESET_STREAMS:
 +              retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
 +              break;
        default:
                retval = -ENOPROTOOPT;
                break;
        return retval;
  }
  
 +static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
 +                                         char __user *optval,
 +                                         int __user *optlen)
 +{
 +      struct sctp_assoc_value params;
 +      struct sctp_association *asoc;
 +      int retval = -EFAULT;
 +
 +      if (len < sizeof(params)) {
 +              retval = -EINVAL;
 +              goto out;
 +      }
 +
 +      len = sizeof(params);
 +      if (copy_from_user(&params, optval, len))
 +              goto out;
 +
 +      asoc = sctp_id2assoc(sk, params.assoc_id);
 +      if (asoc) {
 +              params.assoc_value = asoc->strreset_enable;
 +      } else if (!params.assoc_id) {
 +              struct sctp_sock *sp = sctp_sk(sk);
 +
 +              params.assoc_value = sp->ep->strreset_enable;
 +      } else {
 +              retval = -EINVAL;
 +              goto out;
 +      }
 +
 +      if (put_user(len, optlen))
 +              goto out;
 +
 +      if (copy_to_user(optval, &params, len))
 +              goto out;
 +
 +      retval = 0;
 +
 +out:
 +      return retval;
 +}
 +
  static int sctp_getsockopt(struct sock *sk, int level, int optname,
                           char __user *optval, int __user *optlen)
  {
                retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
                                                        optlen);
                break;
 +      case SCTP_ENABLE_STREAM_RESET:
 +              retval = sctp_getsockopt_enable_strreset(sk, len, optval,
 +                                                       optlen);
 +              break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@@ -7540,7 -7426,8 +7540,8 @@@ static int sctp_wait_for_sndbuf(struct 
                 */
                release_sock(sk);
                current_timeo = schedule_timeout(current_timeo);
-               BUG_ON(sk != asoc->base.sk);
+               if (sk != asoc->base.sk)
+                       goto do_error;
                lock_sock(sk);
  
                *timeo_p = current_timeo;
diff --combined net/wireless/nl80211.c
index 63dfa60a29ef940cc4a2a0ca92a98bd5757b8dbe,aee396b9f190bb4454844c7282fd4d3f5d85b5e7..3aee94b0c6c51657bb49b9fb878667a7b86ae616
@@@ -405,11 -405,6 +405,11 @@@ static const struct nla_policy nl80211_
        [NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
        [NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, },
        [NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
 +      [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
 +      [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
 +              .len = sizeof(struct nl80211_bss_select_rssi_adjust)
 +      },
 +      [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
  };
  
  /* policy for the key attributes */
@@@ -5921,6 -5916,7 +5921,7 @@@ do {                                                                        
                        break;
                }
                cfg->ht_opmode = ht_opmode;
+               mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
        }
        FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathToRootTimeout,
                                  1, 65535, mask,
@@@ -6795,10 -6791,13 +6796,10 @@@ nl80211_parse_sched_scan_plans(struct w
  
                /*
                 * If scan plans are not specified,
 -               * %NL80211_ATTR_SCHED_SCAN_INTERVAL must be specified. In this
 +               * %NL80211_ATTR_SCHED_SCAN_INTERVAL will be specified. In this
                 * case one scan plan will be set with the specified scan
                 * interval and infinite number of iterations.
                 */
 -              if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
 -                      return -EINVAL;
 -
                interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
                if (!interval)
                        return -EINVAL;
@@@ -6970,12 -6969,6 +6971,12 @@@ nl80211_parse_sched_scan(struct wiphy *
        if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
                return ERR_PTR(-EINVAL);
  
 +      if (!wiphy_ext_feature_isset(
 +                  wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) &&
 +          (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] ||
 +           attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]))
 +              return ERR_PTR(-EINVAL);
 +
        request = kzalloc(sizeof(*request)
                        + sizeof(*request->ssids) * n_ssids
                        + sizeof(*request->match_sets) * n_match_sets
                request->delay =
                        nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
  
 +      if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) {
 +              request->relative_rssi = nla_get_s8(
 +                      attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]);
 +              request->relative_rssi_set = true;
 +      }
 +
 +      if (request->relative_rssi_set &&
 +          attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) {
 +              struct nl80211_bss_select_rssi_adjust *rssi_adjust;
 +
 +              rssi_adjust = nla_data(
 +                      attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]);
 +              request->rssi_adjust.band = rssi_adjust->band;
 +              request->rssi_adjust.delta = rssi_adjust->delta;
 +              if (!is_band_valid(wiphy, request->rssi_adjust.band)) {
 +                      err = -EINVAL;
 +                      goto out_free;
 +              }
 +      }
 +
        err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
        if (err)
                goto out_free;
@@@ -8096,17 -8069,8 +8097,17 @@@ static int nl80211_associate(struct sk_
        err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
        if (!err) {
                wdev_lock(dev->ieee80211_ptr);
 +
                err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
                                          ssid, ssid_len, &req);
 +
 +              if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
 +                      dev->ieee80211_ptr->conn_owner_nlportid =
 +                              info->snd_portid;
 +                      memcpy(dev->ieee80211_ptr->disconnect_bssid,
 +                             bssid, ETH_ALEN);
 +              }
 +
                wdev_unlock(dev->ieee80211_ptr);
        }
  
@@@ -8825,24 -8789,11 +8826,24 @@@ static int nl80211_connect(struct sk_bu
        }
  
        wdev_lock(dev->ieee80211_ptr);
 +
        err = cfg80211_connect(rdev, dev, &connect, connkeys,
                               connect.prev_bssid);
 -      wdev_unlock(dev->ieee80211_ptr);
        if (err)
                kzfree(connkeys);
 +
 +      if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
 +              dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
 +              if (connect.bssid)
 +                      memcpy(dev->ieee80211_ptr->disconnect_bssid,
 +                             connect.bssid, ETH_ALEN);
 +              else
 +                      memset(dev->ieee80211_ptr->disconnect_bssid,
 +                             0, ETH_ALEN);
 +      }
 +
 +      wdev_unlock(dev->ieee80211_ptr);
 +
        return err;
  }
  
@@@ -9738,20 -9689,6 +9739,20 @@@ static int nl80211_send_wowlan_nd(struc
        if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
                return -ENOBUFS;
  
 +      if (req->relative_rssi_set) {
 +              struct nl80211_bss_select_rssi_adjust rssi_adjust;
 +
 +              if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
 +                             req->relative_rssi))
 +                      return -ENOBUFS;
 +
 +              rssi_adjust.band = req->rssi_adjust.band;
 +              rssi_adjust.delta = req->rssi_adjust.delta;
 +              if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
 +                          sizeof(rssi_adjust), &rssi_adjust))
 +                      return -ENOBUFS;
 +      }
 +
        freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
        if (!freqs)
                return -ENOBUFS;
@@@ -11886,6 -11823,9 +11887,6 @@@ static int nl80211_set_multicast_to_uni
        const struct nlattr *nla;
        bool enabled;
  
 -      if (netif_running(dev))
 -              return -EBUSY;
 -
        if (!rdev->ops->set_multicast_to_unicast)
                return -EOPNOTSUPP;
  
@@@ -12886,7 -12826,7 +12887,7 @@@ static int nl80211_add_scan_req(struct 
        return -ENOBUFS;
  }
  
 -static int nl80211_send_scan_msg(struct sk_buff *msg,
 +static int nl80211_prep_scan_msg(struct sk_buff *msg,
                                 struct cfg80211_registered_device *rdev,
                                 struct wireless_dev *wdev,
                                 u32 portid, u32 seq, int flags,
  }
  
  static int
 -nl80211_send_sched_scan_msg(struct sk_buff *msg,
 +nl80211_prep_sched_scan_msg(struct sk_buff *msg,
                            struct cfg80211_registered_device *rdev,
                            struct net_device *netdev,
                            u32 portid, u32 seq, int flags, u32 cmd)
@@@ -12949,7 -12889,7 +12950,7 @@@ void nl80211_send_scan_start(struct cfg
        if (!msg)
                return;
  
 -      if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
 +      if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0,
                                  NL80211_CMD_TRIGGER_SCAN) < 0) {
                nlmsg_free(msg);
                return;
@@@ -12968,7 -12908,7 +12969,7 @@@ struct sk_buff *nl80211_build_scan_msg(
        if (!msg)
                return NULL;
  
 -      if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
 +      if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0,
                                  aborted ? NL80211_CMD_SCAN_ABORTED :
                                            NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
                nlmsg_free(msg);
        return msg;
  }
  
 -void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
 -                            struct sk_buff *msg)
 -{
 -      if (!msg)
 -              return;
 -
 -      genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
 -                              NL80211_MCGRP_SCAN, GFP_KERNEL);
 -}
 -
 -void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
 -                                   struct net_device *netdev)
 +/* send message created by nl80211_build_scan_msg() */
 +void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev,
 +                         struct sk_buff *msg)
  {
 -      struct sk_buff *msg;
 -
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return;
  
 -      if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0,
 -                                      NL80211_CMD_SCHED_SCAN_RESULTS) < 0) {
 -              nlmsg_free(msg);
 -              return;
 -      }
 -
        genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
                                NL80211_MCGRP_SCAN, GFP_KERNEL);
  }
@@@ -12998,7 -12956,7 +12999,7 @@@ void nl80211_send_sched_scan(struct cfg
        if (!msg)
                return;
  
 -      if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
 +      if (nl80211_prep_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
                nlmsg_free(msg);
                return;
        }
@@@ -13100,7 -13058,7 +13101,7 @@@ static void nl80211_send_mlme_event(str
        struct sk_buff *msg;
        void *hdr;
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 +      msg = nlmsg_new(100 + len, gfp);
        if (!msg)
                return;
  
@@@ -13247,14 -13205,12 +13248,14 @@@ void nl80211_send_connect_result(struc
                                 struct net_device *netdev, const u8 *bssid,
                                 const u8 *req_ie, size_t req_ie_len,
                                 const u8 *resp_ie, size_t resp_ie_len,
 -                               int status, gfp_t gfp)
 +                               int status,
 +                               enum nl80211_timeout_reason timeout_reason,
 +                               gfp_t gfp)
  {
        struct sk_buff *msg;
        void *hdr;
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 +      msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
        if (!msg)
                return;
  
            nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
                        status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
                        status) ||
 -          (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) ||
 +          (status < 0 &&
 +           (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
 +            nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, timeout_reason))) ||
            (req_ie &&
             nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
            (resp_ie &&
@@@ -13298,7 -13252,7 +13299,7 @@@ void nl80211_send_roamed(struct cfg8021
        struct sk_buff *msg;
        void *hdr;
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 +      msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
        if (!msg)
                return;
  
@@@ -13335,7 -13289,7 +13336,7 @@@ void nl80211_send_disconnected(struct c
        struct sk_buff *msg;
        void *hdr;
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 +      msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
        if (!msg)
                return;
  
@@@ -13411,7 -13365,7 +13412,7 @@@ void cfg80211_notify_new_peer_candidate
  
        trace_cfg80211_notify_new_peer_candidate(dev, addr);
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 +      msg = nlmsg_new(100 + ie_len, gfp);
        if (!msg)
                return;
  
@@@ -13782,7 -13736,7 +13783,7 @@@ int nl80211_send_mgmt(struct cfg80211_r
        struct sk_buff *msg;
        void *hdr;
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 +      msg = nlmsg_new(100 + len, gfp);
        if (!msg)
                return -ENOMEM;
  
@@@ -13826,7 -13780,7 +13827,7 @@@ void cfg80211_mgmt_tx_status(struct wir
  
        trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
 +      msg = nlmsg_new(100 + len, gfp);
        if (!msg)
                return;
  
@@@ -14581,8 -14535,6 +14582,8 @@@ static int nl80211_netlink_notify(struc
  
                        if (wdev->owner_nlportid == notify->portid)
                                schedule_destroy_work = true;
 +                      else if (wdev->conn_owner_nlportid == notify->portid)
 +                              schedule_work(&wdev->disconnect_wk);
                }
  
                spin_lock_bh(&rdev->beacon_registrations_lock);
@@@ -14637,7 -14589,7 +14638,7 @@@ void cfg80211_ft_event(struct net_devic
        if (!ft_event->target_ap)
                return;
  
 -      msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
 +      msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
        if (!msg)
                return;
  
This page took 0.255125 seconds and 4 git commands to generate.