]> Git Repo - linux.git/commitdiff
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
authorLinus Torvalds <[email protected]>
Thu, 6 Jul 2017 18:45:08 +0000 (11:45 -0700)
committerLinus Torvalds <[email protected]>
Thu, 6 Jul 2017 18:45:08 +0000 (11:45 -0700)
Pull rdma update from Doug Ledford:
 "This includes two bugs against the newly added opa vnic that were
  found by turning on the debug kernel options:

   - sleeping while holding a lock, so a one line fix where they
     switched it from GFP_KERNEL allocation to a GFP_ATOMIC allocation

   - a case where they had an isolated caller of their code that could
     call them in an atomic context so they had to switch their use of a
     mutex to a spinlock to be safe, so this was considerably more lines
     of diff because all uses of that lock had to be switched

  In addition, the bug that was discussed with you already about an out
  of bounds array access in ib_uverbs_modify_qp and ib_uverbs_create_ah
  and is only seven lines of diff.

  And finally, one fix to an earlier fix in the -rc cycle that broke
  hfi1 and qib in regards to IPoIB (this one is, unfortunately, larger
  than I would like for a -rc7 submission, but fixing the problem
  required that we not treat all devices as though they had allocated a
  netdev universally because it isn't true, and it took 70 lines of diff
  to resolve the issue, but the final patch has been vetted by Intel and
  Mellanox and they've both given their approval to the fix).

  Summary:

   - Two fixes for OPA found by debug kernel
   - Fix for user supplied input causing kernel problems
   - Fix for the IPoIB fixes submitted around -rc4"

[ Doug sent this having not noticed the 4.12 release, so I guess I'll be
  getting another rdma pull request with the actuakl merge window
  updates and not just fixes.

  Oh well - it would have been nice if this small update had been the
  merge window one.     - Linus ]

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  IB/core, opa_vnic, hfi1, mlx5: Properly free rdma_netdev
  RDMA/uverbs: Check port number supplied by user verbs cmds
  IB/opa_vnic: Use spinlock instead of mutex for stats_lock
  IB/opa_vnic: Use GFP_ATOMIC while sending trap

1  2 
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/opa_vnic/opa_vnic_netdev.c
include/rdma/ib_verbs.h

index 0ad3b05405d8a1cd2e021745f98861a993abb119,e63f2a13c5e1311230a524600497f3fbb99b49d6..8ba9bfb073d17c3025206fe66995547433c6a9a8
@@@ -1508,10 -1508,6 +1508,10 @@@ static int create_qp(struct ib_uverbs_f
        }
  
        if (cmd->qp_type != IB_QPT_XRC_TGT) {
 +              ret = ib_create_qp_security(qp, device);
 +              if (ret)
 +                      goto err_cb;
 +
                qp->real_qp       = qp;
                qp->device        = device;
                qp->pd            = pd;
@@@ -1935,6 -1931,11 +1935,11 @@@ static int modify_qp(struct ib_uverbs_f
                goto out;
        }
  
+       if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) {
+               ret = -EINVAL;
+               goto release_qp;
+       }
        attr->qp_state            = cmd->base.qp_state;
        attr->cur_qp_state        = cmd->base.cur_qp_state;
        attr->path_mtu            = cmd->base.path_mtu;
                        if (ret)
                                goto release_qp;
                }
 -              ret = qp->device->modify_qp(qp, attr,
 +              ret = ib_security_modify_qp(qp,
 +                                          attr,
                                            modify_qp_mask(qp->qp_type,
                                                           cmd->base.attr_mask),
                                            udata);
        } else {
 -              ret = ib_modify_qp(qp, attr,
 -                                 modify_qp_mask(qp->qp_type,
 -                                                cmd->base.attr_mask));
 +              ret = ib_security_modify_qp(qp,
 +                                          attr,
 +                                          modify_qp_mask(qp->qp_type,
 +                                                         cmd->base.attr_mask),
 +                                          NULL);
        }
  
  release_qp:
@@@ -2548,6 -2546,9 +2553,9 @@@ ssize_t ib_uverbs_create_ah(struct ib_u
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
  
+       if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
+               return -EINVAL;
        INIT_UDATA(&udata, buf + sizeof(cmd),
                   (unsigned long)cmd.response + sizeof(resp),
                   in_len - sizeof(cmd), out_len - sizeof(resp));
index dc2f59e33971cdb98eb050dfa5e0089a5654205a,afa5f6e88e1df36f46741187be85d8803c70eb03..a7f2e60085c46c2300e3695029fbe1373cc0d480
@@@ -60,7 -60,8 +60,7 @@@
  #include "cmd.h"
  
  #define DRIVER_NAME "mlx5_ib"
 -#define DRIVER_VERSION "2.2-1"
 -#define DRIVER_RELDATE        "Feb 2014"
 +#define DRIVER_VERSION "5.0-0"
  
  MODULE_AUTHOR("Eli Cohen <[email protected]>");
  MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
@@@ -69,7 -70,7 +69,7 @@@ MODULE_VERSION(DRIVER_VERSION)
  
  static char mlx5_version[] =
        DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
 -      DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
 +      DRIVER_VERSION "\n";
  
  enum {
        MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3,
@@@ -223,8 -224,8 +223,8 @@@ static int translate_eth_proto_oper(u3
        return 0;
  }
  
 -static void mlx5_query_port_roce(struct ib_device *device, u8 port_num,
 -                               struct ib_port_attr *props)
 +static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
 +                              struct ib_port_attr *props)
  {
        struct mlx5_ib_dev *dev = to_mdev(device);
        struct mlx5_core_dev *mdev = dev->mdev;
        enum ib_mtu ndev_ib_mtu;
        u16 qkey_viol_cntr;
        u32 eth_prot_oper;
 +      int err;
  
        /* Possible bad flows are checked before filling out props so in case
         * of an error it will still be zeroed out.
         */
 -      if (mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num))
 -              return;
 +      err = mlx5_query_port_eth_proto_oper(mdev, &eth_prot_oper, port_num);
 +      if (err)
 +              return err;
  
        translate_eth_proto_oper(eth_prot_oper, &props->active_speed,
                                 &props->active_width);
  
        ndev = mlx5_ib_get_netdev(device, port_num);
        if (!ndev)
 -              return;
 +              return 0;
  
        if (mlx5_lag_is_active(dev->mdev)) {
                rcu_read_lock();
        dev_put(ndev);
  
        props->active_mtu       = min(props->max_mtu, ndev_ib_mtu);
 +      return 0;
  }
  
 -static void ib_gid_to_mlx5_roce_addr(const union ib_gid *gid,
 -                                   const struct ib_gid_attr *attr,
 -                                   void *mlx5_addr)
 +static int set_roce_addr(struct mlx5_ib_dev *dev, u8 port_num,
 +                       unsigned int index, const union ib_gid *gid,
 +                       const struct ib_gid_attr *attr)
  {
 -#define MLX5_SET_RA(p, f, v) MLX5_SET(roce_addr_layout, p, f, v)
 -      char *mlx5_addr_l3_addr = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
 -                                             source_l3_address);
 -      void *mlx5_addr_mac     = MLX5_ADDR_OF(roce_addr_layout, mlx5_addr,
 -                                             source_mac_47_32);
 -
 -      if (!gid)
 -              return;
 +      enum ib_gid_type gid_type = IB_GID_TYPE_IB;
 +      u8 roce_version = 0;
 +      u8 roce_l3_type = 0;
 +      bool vlan = false;
 +      u8 mac[ETH_ALEN];
 +      u16 vlan_id = 0;
  
 -      ether_addr_copy(mlx5_addr_mac, attr->ndev->dev_addr);
 +      if (gid) {
 +              gid_type = attr->gid_type;
 +              ether_addr_copy(mac, attr->ndev->dev_addr);
  
 -      if (is_vlan_dev(attr->ndev)) {
 -              MLX5_SET_RA(mlx5_addr, vlan_valid, 1);
 -              MLX5_SET_RA(mlx5_addr, vlan_id, vlan_dev_vlan_id(attr->ndev));
 +              if (is_vlan_dev(attr->ndev)) {
 +                      vlan = true;
 +                      vlan_id = vlan_dev_vlan_id(attr->ndev);
 +              }
        }
  
 -      switch (attr->gid_type) {
 +      switch (gid_type) {
        case IB_GID_TYPE_IB:
 -              MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_1);
 +              roce_version = MLX5_ROCE_VERSION_1;
                break;
        case IB_GID_TYPE_ROCE_UDP_ENCAP:
 -              MLX5_SET_RA(mlx5_addr, roce_version, MLX5_ROCE_VERSION_2);
 +              roce_version = MLX5_ROCE_VERSION_2;
 +              if (ipv6_addr_v4mapped((void *)gid))
 +                      roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4;
 +              else
 +                      roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6;
                break;
  
        default:
 -              WARN_ON(true);
 +              mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type);
        }
  
 -      if (attr->gid_type != IB_GID_TYPE_IB) {
 -              if (ipv6_addr_v4mapped((void *)gid))
 -                      MLX5_SET_RA(mlx5_addr, roce_l3_type,
 -                                  MLX5_ROCE_L3_TYPE_IPV4);
 -              else
 -                      MLX5_SET_RA(mlx5_addr, roce_l3_type,
 -                                  MLX5_ROCE_L3_TYPE_IPV6);
 -      }
 -
 -      if ((attr->gid_type == IB_GID_TYPE_IB) ||
 -          !ipv6_addr_v4mapped((void *)gid))
 -              memcpy(mlx5_addr_l3_addr, gid, sizeof(*gid));
 -      else
 -              memcpy(&mlx5_addr_l3_addr[12], &gid->raw[12], 4);
 -}
 -
 -static int set_roce_addr(struct ib_device *device, u8 port_num,
 -                       unsigned int index,
 -                       const union ib_gid *gid,
 -                       const struct ib_gid_attr *attr)
 -{
 -      struct mlx5_ib_dev *dev = to_mdev(device);
 -      u32  in[MLX5_ST_SZ_DW(set_roce_address_in)]  = {0};
 -      u32 out[MLX5_ST_SZ_DW(set_roce_address_out)] = {0};
 -      void *in_addr = MLX5_ADDR_OF(set_roce_address_in, in, roce_address);
 -      enum rdma_link_layer ll = mlx5_ib_port_link_layer(device, port_num);
 -
 -      if (ll != IB_LINK_LAYER_ETHERNET)
 -              return -EINVAL;
 -
 -      ib_gid_to_mlx5_roce_addr(gid, attr, in_addr);
 -
 -      MLX5_SET(set_roce_address_in, in, roce_address_index, index);
 -      MLX5_SET(set_roce_address_in, in, opcode, MLX5_CMD_OP_SET_ROCE_ADDRESS);
 -      return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
 +      return mlx5_core_roce_gid_set(dev->mdev, index, roce_version,
 +                                    roce_l3_type, gid->raw, mac, vlan,
 +                                    vlan_id);
  }
  
  static int mlx5_ib_add_gid(struct ib_device *device, u8 port_num,
                           const struct ib_gid_attr *attr,
                           __always_unused void **context)
  {
 -      return set_roce_addr(device, port_num, index, gid, attr);
 +      return set_roce_addr(to_mdev(device), port_num, index, gid, attr);
  }
  
  static int mlx5_ib_del_gid(struct ib_device *device, u8 port_num,
                           unsigned int index, __always_unused void **context)
  {
 -      return set_roce_addr(device, port_num, index, NULL, NULL);
 +      return set_roce_addr(to_mdev(device), port_num, index, NULL, NULL);
  }
  
  __be16 mlx5_get_roce_udp_sport(struct mlx5_ib_dev *dev, u8 port_num,
@@@ -415,7 -440,7 +415,7 @@@ static void get_atomic_caps(struct mlx5
        u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations);
        u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp);
        u8 atomic_req_8B_endianness_mode =
 -              MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianess_mode);
 +              MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode);
  
        /* Check if HW supports 8 bytes standard atomic operations and capable
         * of host endianness respond
@@@ -954,31 -979,20 +954,31 @@@ out
  int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
                       struct ib_port_attr *props)
  {
 +      unsigned int count;
 +      int ret;
 +
        switch (mlx5_get_vport_access_method(ibdev)) {
        case MLX5_VPORT_ACCESS_METHOD_MAD:
 -              return mlx5_query_mad_ifc_port(ibdev, port, props);
 +              ret = mlx5_query_mad_ifc_port(ibdev, port, props);
 +              break;
  
        case MLX5_VPORT_ACCESS_METHOD_HCA:
 -              return mlx5_query_hca_port(ibdev, port, props);
 +              ret = mlx5_query_hca_port(ibdev, port, props);
 +              break;
  
        case MLX5_VPORT_ACCESS_METHOD_NIC:
 -              mlx5_query_port_roce(ibdev, port, props);
 -              return 0;
 +              ret = mlx5_query_port_roce(ibdev, port, props);
 +              break;
  
        default:
 -              return -EINVAL;
 +              ret = -EINVAL;
 +      }
 +
 +      if (!ret && props) {
 +              count = mlx5_core_reserved_gids_count(to_mdev(ibdev)->mdev);
 +              props->gid_tbl_len -= count;
        }
 +      return ret;
  }
  
  static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
@@@ -2249,7 -2263,7 +2249,7 @@@ static struct mlx5_ib_flow_handler *cre
        if (!is_valid_attr(dev->mdev, flow_attr))
                return ERR_PTR(-EINVAL);
  
 -      spec = mlx5_vzalloc(sizeof(*spec));
 +      spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
        handler = kzalloc(sizeof(*handler), GFP_KERNEL);
        if (!handler || !spec) {
                err = -ENOMEM;
@@@ -3454,7 -3468,7 +3454,7 @@@ static int mlx5_ib_query_q_counters(str
        __be32 val;
        int ret, i;
  
 -      out = mlx5_vzalloc(outlen);
 +      out = kvzalloc(outlen, GFP_KERNEL);
        if (!out)
                return -ENOMEM;
  
@@@ -3483,7 -3497,7 +3483,7 @@@ static int mlx5_ib_query_cong_counters(
        int ret, i;
        int offset = port->cnts.num_q_counters;
  
 -      out = mlx5_vzalloc(outlen);
 +      out = kvzalloc(outlen, GFP_KERNEL);
        if (!out)
                return -ENOMEM;
  
@@@ -3528,6 -3542,11 +3528,11 @@@ static int mlx5_ib_get_hw_stats(struct 
        return num_counters;
  }
  
+ static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
+ {
+       return mlx5_rdma_netdev_free(netdev);
+ }
  static struct net_device*
  mlx5_ib_alloc_rdma_netdev(struct ib_device *hca,
                          u8 port_num,
                          unsigned char name_assign_type,
                          void (*setup)(struct net_device *))
  {
+       struct net_device *netdev;
+       struct rdma_netdev *rn;
        if (type != RDMA_NETDEV_IPOIB)
                return ERR_PTR(-EOPNOTSUPP);
  
-       return mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
-                                     name, setup);
- }
- static void mlx5_ib_free_rdma_netdev(struct net_device *netdev)
- {
-       return mlx5_rdma_netdev_free(netdev);
+       netdev = mlx5_rdma_netdev_alloc(to_mdev(hca)->mdev, hca,
+                                       name, setup);
+       if (likely(!IS_ERR_OR_NULL(netdev))) {
+               rn = netdev_priv(netdev);
+               rn->free_rdma_netdev = mlx5_ib_free_rdma_netdev;
+       }
+       return netdev;
  }
  
  static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
        dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
        dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
        dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
-       if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
+       if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
                dev->ib_dev.alloc_rdma_netdev   = mlx5_ib_alloc_rdma_netdev;
-               dev->ib_dev.free_rdma_netdev    = mlx5_ib_free_rdma_netdev;
-       }
        if (mlx5_core_is_pf(mdev)) {
                dev->ib_dev.get_vf_config       = mlx5_ib_get_vf_config;
                dev->ib_dev.set_vf_link_state   = mlx5_ib_set_vf_link_state;
index d129625af0a7125cbfd827b0577b05de1a2e96bd,9ec0dbea3b6b2716db74b2b470e571b46ce7d586..6e86eeee370e86602977bbf3f3949bf2d5ac26ef
@@@ -681,7 -681,7 +681,7 @@@ static void push_pseudo_header(struct s
  {
        struct ipoib_pseudo_header *phdr;
  
 -      phdr = (struct ipoib_pseudo_header *)skb_push(skb, sizeof(*phdr));
 +      phdr = skb_push(skb, sizeof(*phdr));
        memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
  }
  
@@@ -1129,7 -1129,7 +1129,7 @@@ static int ipoib_hard_header(struct sk_
  {
        struct ipoib_header *header;
  
 -      header = (struct ipoib_header *) skb_push(skb, sizeof *header);
 +      header = skb_push(skb, sizeof *header);
  
        header->proto = htons(type);
        header->reserved = 0;
@@@ -1893,6 -1893,7 +1893,7 @@@ static struct net_devic
        rn->send = ipoib_send;
        rn->attach_mcast = ipoib_mcast_attach;
        rn->detach_mcast = ipoib_mcast_detach;
+       rn->free_rdma_netdev = free_netdev;
        rn->hca = hca;
  
        dev->netdev_ops = &ipoib_netdev_default_pf;
@@@ -2288,6 -2289,8 +2289,8 @@@ static void ipoib_remove_one(struct ib_
                return;
  
        list_for_each_entry_safe(priv, tmp, dev_list, list) {
+               struct rdma_netdev *rn = netdev_priv(priv->dev);
                ib_unregister_event_handler(&priv->event_handler);
                flush_workqueue(ipoib_workqueue);
  
                flush_workqueue(priv->wq);
  
                unregister_netdev(priv->dev);
-               if (device->free_rdma_netdev)
-                       device->free_rdma_netdev(priv->dev);
-               else
-                       free_netdev(priv->dev);
+               rn->free_rdma_netdev(priv->dev);
  
                list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
                        kfree(cpriv);
index fcf75323d62ae77354f89d65a0f4d3a6c69987b4,1a89c603335832905da722278bcab72e1d14e56d..1a3c25364b645e971d307763454ce17ffa1da2d4
@@@ -69,9 -69,9 +69,9 @@@ static void opa_vnic_get_stats64(struc
        struct opa_vnic_stats vstats;
  
        memset(&vstats, 0, sizeof(vstats));
-       mutex_lock(&adapter->stats_lock);
+       spin_lock(&adapter->stats_lock);
        adapter->rn_ops->ndo_get_stats64(netdev, &vstats.netstats);
-       mutex_unlock(&adapter->stats_lock);
+       spin_unlock(&adapter->stats_lock);
        memcpy(stats, &vstats.netstats, sizeof(*stats));
  }
  
@@@ -103,7 -103,7 +103,7 @@@ static u16 opa_vnic_select_queue(struc
        int rc;
  
        /* pass entropy and vl as metadata in skb */
 -      mdata = (struct opa_vnic_skb_mdata *)skb_push(skb, sizeof(*mdata));
 +      mdata = skb_push(skb, sizeof(*mdata));
        mdata->entropy =  opa_vnic_calc_entropy(adapter, skb);
        mdata->vl = opa_vnic_get_vl(adapter, skb);
        rc = adapter->rn_ops->ndo_select_queue(netdev, skb,
@@@ -323,13 -323,13 +323,13 @@@ struct opa_vnic_adapter *opa_vnic_add_n
        else if (IS_ERR(netdev))
                return ERR_CAST(netdev);
  
+       rn = netdev_priv(netdev);
        adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
        if (!adapter) {
                rc = -ENOMEM;
                goto adapter_err;
        }
  
-       rn = netdev_priv(netdev);
        rn->clnt_priv = adapter;
        rn->hca = ibdev;
        rn->port_num = port_num;
        netdev->hard_header_len += OPA_VNIC_SKB_HEADROOM;
        mutex_init(&adapter->lock);
        mutex_init(&adapter->mactbl_lock);
-       mutex_init(&adapter->stats_lock);
+       spin_lock_init(&adapter->stats_lock);
  
        SET_NETDEV_DEV(netdev, ibdev->dev.parent);
  
  netdev_err:
        mutex_destroy(&adapter->lock);
        mutex_destroy(&adapter->mactbl_lock);
-       mutex_destroy(&adapter->stats_lock);
        kfree(adapter);
  adapter_err:
-       ibdev->free_rdma_netdev(netdev);
+       rn->free_rdma_netdev(netdev);
  
        return ERR_PTR(rc);
  }
  void opa_vnic_rem_netdev(struct opa_vnic_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
-       struct ib_device *ibdev = adapter->ibdev;
+       struct rdma_netdev *rn = netdev_priv(netdev);
  
        v_info("removing\n");
        unregister_netdev(netdev);
        opa_vnic_release_mac_tbl(adapter);
        mutex_destroy(&adapter->lock);
        mutex_destroy(&adapter->mactbl_lock);
-       mutex_destroy(&adapter->stats_lock);
        kfree(adapter);
-       ibdev->free_rdma_netdev(netdev);
+       rn->free_rdma_netdev(netdev);
  }
diff --combined include/rdma/ib_verbs.h
index 0e480a5630d440240628619d5fcd5d3267554ea4,71313d5ca1c84acffb3c0ae2494847c615fb977a..356953d3dbd18c351313b3f5e36e92ae24b58cda
@@@ -1614,45 -1614,6 +1614,45 @@@ struct ib_rwq_ind_table_init_attr 
        struct ib_wq    **ind_tbl;
  };
  
 +enum port_pkey_state {
 +      IB_PORT_PKEY_NOT_VALID = 0,
 +      IB_PORT_PKEY_VALID = 1,
 +      IB_PORT_PKEY_LISTED = 2,
 +};
 +
 +struct ib_qp_security;
 +
 +struct ib_port_pkey {
 +      enum port_pkey_state    state;
 +      u16                     pkey_index;
 +      u8                      port_num;
 +      struct list_head        qp_list;
 +      struct list_head        to_error_list;
 +      struct ib_qp_security  *sec;
 +};
 +
 +struct ib_ports_pkeys {
 +      struct ib_port_pkey     main;
 +      struct ib_port_pkey     alt;
 +};
 +
 +struct ib_qp_security {
 +      struct ib_qp           *qp;
 +      struct ib_device       *dev;
 +      /* Hold this mutex when changing port and pkey settings. */
 +      struct mutex            mutex;
 +      struct ib_ports_pkeys  *ports_pkeys;
 +      /* A list of all open shared QP handles.  Required to enforce security
 +       * properly for all users of a shared QP.
 +       */
 +      struct list_head        shared_qp_list;
 +      void                   *security;
 +      bool                    destroying;
 +      atomic_t                error_list_count;
 +      struct completion       error_complete;
 +      int                     error_comps_pending;
 +};
 +
  /*
   * @max_write_sge: Maximum SGE elements per RDMA WRITE request.
   * @max_read_sge:  Maximum SGE elements per RDMA READ request.
@@@ -1682,7 -1643,6 +1682,7 @@@ struct ib_qp 
        u32                     max_read_sge;
        enum ib_qp_type         qp_type;
        struct ib_rwq_ind_table *rwq_ind_tbl;
 +      struct ib_qp_security  *qp_sec;
  };
  
  struct ib_mr {
@@@ -1931,7 -1891,6 +1931,7 @@@ enum ib_mad_result 
  };
  
  struct ib_port_cache {
 +      u64                   subnet_prefix;
        struct ib_pkey_cache  *pkey;
        struct ib_gid_table   *gid;
        u8                     lmc;
@@@ -1968,6 -1927,9 +1968,9 @@@ struct rdma_netdev 
        struct ib_device  *hca;
        u8                 port_num;
  
+       /* cleanup function must be specified */
+       void (*free_rdma_netdev)(struct net_device *netdev);
        /* control functions */
        void (*set_id)(struct net_device *netdev, int id);
        /* send packet */
                            union ib_gid *gid, u16 mlid);
  };
  
 +struct ib_port_pkey_list {
 +      /* Lock to hold while modifying the list. */
 +      spinlock_t                    list_lock;
 +      struct list_head              pkey_list;
 +};
 +
  struct ib_device {
        /* Do not access @dma_device directly from ULP nor from HW drivers. */
        struct device                *dma_device;
  
        int                           num_comp_vectors;
  
 +      struct ib_port_pkey_list     *port_pkey_list;
 +
        struct iw_cm_verbs           *iwcm;
  
        /**
                                                           struct ib_udata *udata);
        int                        (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
        /**
-        * rdma netdev operations
+        * rdma netdev operation
         *
         * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
         * doesn't support the specified rdma netdev type.
                                        const char *name,
                                        unsigned char name_assign_type,
                                        void (*setup)(struct net_device *));
-       void (*free_rdma_netdev)(struct net_device *netdev);
  
        struct module               *owner;
        struct device                dev;
This page took 0.105587 seconds and 4 git commands to generate.