]> Git Repo - linux.git/commitdiff
Merge tag 'v6.4' into rdma.git for-next
authorJason Gunthorpe <[email protected]>
Tue, 27 Jun 2023 17:06:29 +0000 (14:06 -0300)
committerJason Gunthorpe <[email protected]>
Tue, 27 Jun 2023 17:06:29 +0000 (14:06 -0300)
Linux 6.4

Resolve conflicts between rdma rc and next in rxe_cq matching linux-next:

drivers/infiniband/sw/rxe/rxe_cq.c:
  https://lore.kernel.org/r/20230622115246.365d30ad@canb.auug.org.au

Signed-off-by: Jason Gunthorpe <[email protected]>
20 files changed:
1  2 
drivers/infiniband/core/cma.c
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/main.c
drivers/infiniband/hw/bnxt_re/qplib_fp.c
drivers/infiniband/hw/bnxt_re/qplib_res.c
drivers/infiniband/hw/bnxt_re/qplib_sp.c
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/irdma/verbs.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/sw/rxe/rxe_comp.c
drivers/infiniband/sw/rxe/rxe_cq.c
drivers/infiniband/sw/rxe/rxe_qp.c
drivers/infiniband/sw/rxe/rxe_req.c
drivers/infiniband/sw/rxe/rxe_resp.c
drivers/infiniband/sw/rxe/rxe_verbs.c
drivers/infiniband/ulp/rtrs/rtrs-clt.c
drivers/net/ethernet/microsoft/mana/mana_en.c
include/linux/mlx5/driver.h
include/net/mana/mana.h

index 5146ef2dbfd9e97af0114e34d0e9999814909d50,6b3f4384e46acbfcbc0f6cee7ed8e31d048c924f..1ee87c3aaeabc9ceec26c8ef8f6b06762233d1c0
@@@ -3295,7 -3295,7 +3295,7 @@@ static int cma_resolve_iboe_route(struc
        route->path_rec->traffic_class = tos;
        route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
        route->path_rec->rate_selector = IB_SA_EQ;
-       route->path_rec->rate = iboe_get_rate(ndev);
+       route->path_rec->rate = IB_RATE_PORT_CURRENT;
        dev_put(ndev);
        route->path_rec->packet_life_time_selector = IB_SA_EQ;
        /* In case ACK timeout is set, use this value to calculate
@@@ -4805,7 -4805,8 +4805,7 @@@ static void cma_make_mc_event(int statu
        event->param.ud.qkey = id_priv->qkey;
  
  out:
 -      if (ndev)
 -              dev_put(ndev);
 +      dev_put(ndev);
  }
  
  static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
@@@ -4963,7 -4964,7 +4963,7 @@@ static int cma_iboe_join_multicast(stru
        if (!ndev)
                return -ENODEV;
  
-       ib.rec.rate = iboe_get_rate(ndev);
+       ib.rec.rate = IB_RATE_PORT_CURRENT;
        ib.rec.hop_limit = 1;
        ib.rec.mtu = iboe_get_mtu(ndev->mtu);
  
index 9e278d23eb82b3aa1e7902a04bbf4e6453f956ad,2c95e6f3d47acc6c43aceb627907f66579c142c8..ea81b2497511a4ced31f7e65846d3bb877710855
@@@ -39,7 -39,6 +39,7 @@@
  
  #ifndef __BNXT_RE_H__
  #define __BNXT_RE_H__
 +#include <rdma/uverbs_ioctl.h>
  #include "hw_counters.h"
  #define ROCE_DRV_MODULE_NAME          "bnxt_re"
  
@@@ -136,8 -135,6 +136,6 @@@ struct bnxt_re_dev 
  
        struct delayed_work             worker;
        u8                              cur_prio_map;
-       u16                             active_speed;
-       u8                              active_width;
  
        /* FP Notification Queue (CQ & SRQ) */
        struct tasklet_struct           nq_task;
  #define BNXT_RE_ROCEV2_IPV4_PACKET    2
  #define BNXT_RE_ROCEV2_IPV6_PACKET    3
  
 +#define BNXT_RE_CHECK_RC(x) ((x) && ((x) != -ETIMEDOUT))
 +
  static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev)
  {
        if (rdev)
                return  &rdev->ibdev.dev;
        return NULL;
  }
 +
 +extern const struct uapi_definition bnxt_re_uapi_defs[];
  #endif
index ef47c32a53cb514b387e33da33df53eab89072a3,952811c40c54be64d52532f455399a1a1417d96c..abef0b8baa7c31b43b0e8f93bf4d7f48e5466a3d
  
  #include "bnxt_re.h"
  #include "ib_verbs.h"
 +
 +#include <rdma/uverbs_types.h>
 +#include <rdma/uverbs_std_types.h>
 +
 +#include <rdma/ib_user_ioctl_cmds.h>
 +
 +#define UVERBS_MODULE_NAME bnxt_re
 +#include <rdma/uverbs_named_ioctl.h>
 +
  #include <rdma/bnxt_re-abi.h>
  
  static int __from_ib_access_flags(int iflags)
@@@ -208,6 -199,7 +208,7 @@@ int bnxt_re_query_port(struct ib_devic
  {
        struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
        struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
+       int rc;
  
        memset(port_attr, 0, sizeof(*port_attr));
  
        port_attr->sm_sl = 0;
        port_attr->subnet_timeout = 0;
        port_attr->init_type_reply = 0;
-       port_attr->active_speed = rdev->active_speed;
-       port_attr->active_width = rdev->active_width;
+       rc = ib_get_eth_speed(&rdev->ibdev, port_num, &port_attr->active_speed,
+                             &port_attr->active_width);
  
-       return 0;
+       return rc;
  }
  
  int bnxt_re_get_port_immutable(struct ib_device *ibdev, u32 port_num,
@@@ -542,57 -534,12 +543,57 @@@ fail
        return rc;
  }
  
 +static struct bnxt_re_user_mmap_entry*
 +bnxt_re_mmap_entry_insert(struct bnxt_re_ucontext *uctx, u64 mem_offset,
 +                        enum bnxt_re_mmap_flag mmap_flag, u64 *offset)
 +{
 +      struct bnxt_re_user_mmap_entry *entry;
 +      int ret;
 +
 +      entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 +      if (!entry)
 +              return NULL;
 +
 +      entry->mem_offset = mem_offset;
 +      entry->mmap_flag = mmap_flag;
 +      entry->uctx = uctx;
 +
 +      switch (mmap_flag) {
 +      case BNXT_RE_MMAP_SH_PAGE:
 +              ret = rdma_user_mmap_entry_insert_exact(&uctx->ib_uctx,
 +                                                      &entry->rdma_entry, PAGE_SIZE, 0);
 +              break;
 +      case BNXT_RE_MMAP_UC_DB:
 +      case BNXT_RE_MMAP_WC_DB:
 +              ret = rdma_user_mmap_entry_insert(&uctx->ib_uctx,
 +                                                &entry->rdma_entry, PAGE_SIZE);
 +              break;
 +      default:
 +              ret = -EINVAL;
 +              break;
 +      }
 +
 +      if (ret) {
 +              kfree(entry);
 +              return NULL;
 +      }
 +      if (offset)
 +              *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
 +
 +      return entry;
 +}
 +
  /* Protection Domains */
  int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
  {
        struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
        struct bnxt_re_dev *rdev = pd->rdev;
  
 +      if (udata) {
 +              rdma_user_mmap_entry_remove(pd->pd_db_mmap);
 +              pd->pd_db_mmap = NULL;
 +      }
 +
        bnxt_re_destroy_fence_mr(pd);
  
        if (pd->qplib_pd.id) {
@@@ -611,8 -558,7 +612,8 @@@ int bnxt_re_alloc_pd(struct ib_pd *ibpd
        struct bnxt_re_ucontext *ucntx = rdma_udata_to_drv_context(
                udata, struct bnxt_re_ucontext, ib_uctx);
        struct bnxt_re_pd *pd = container_of(ibpd, struct bnxt_re_pd, ib_pd);
 -      int rc;
 +      struct bnxt_re_user_mmap_entry *entry = NULL;
 +      int rc = 0;
  
        pd->rdev = rdev;
        if (bnxt_qplib_alloc_pd(&rdev->qplib_res.pd_tbl, &pd->qplib_pd)) {
        }
  
        if (udata) {
 -              struct bnxt_re_pd_resp resp;
 +              struct bnxt_re_pd_resp resp = {};
  
                if (!ucntx->dpi.dbr) {
                        /* Allocate DPI in alloc_pd to avoid failing of
                         * ibv_devinfo and family of application when DPIs
                         * are depleted.
                         */
 -                      if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
 -                                               &ucntx->dpi, ucntx)) {
 +                      if (bnxt_qplib_alloc_dpi(&rdev->qplib_res,
 +                                               &ucntx->dpi, ucntx, BNXT_QPLIB_DPI_TYPE_UC)) {
                                rc = -ENOMEM;
                                goto dbfail;
                        }
                resp.pdid = pd->qplib_pd.id;
                /* Still allow mapping this DBR to the new user PD. */
                resp.dpi = ucntx->dpi.dpi;
 -              resp.dbr = (u64)ucntx->dpi.umdbr;
  
 -              rc = ib_copy_to_udata(udata, &resp, sizeof(resp));
 +              entry = bnxt_re_mmap_entry_insert(ucntx, (u64)ucntx->dpi.umdbr,
 +                                                BNXT_RE_MMAP_UC_DB, &resp.dbr);
 +
 +              if (!entry) {
 +                      rc = -ENOMEM;
 +                      goto dbfail;
 +              }
 +
 +              pd->pd_db_mmap = &entry->rdma_entry;
 +
 +              rc = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen));
                if (rc) {
 -                      ibdev_err(&rdev->ibdev,
 -                                "Failed to copy user response\n");
 +                      rdma_user_mmap_entry_remove(pd->pd_db_mmap);
 +                      rc = -EFAULT;
                        goto dbfail;
                }
        }
@@@ -677,20 -614,12 +678,20 @@@ int bnxt_re_destroy_ah(struct ib_ah *ib
  {
        struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
        struct bnxt_re_dev *rdev = ah->rdev;
 +      bool block = true;
 +      int rc = 0;
  
 -      bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
 -                            !(flags & RDMA_DESTROY_AH_SLEEPABLE));
 +      block = !(flags & RDMA_DESTROY_AH_SLEEPABLE);
 +      rc = bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, block);
 +      if (BNXT_RE_CHECK_RC(rc)) {
 +              if (rc == -ETIMEDOUT)
 +                      rc = 0;
 +              else
 +                      goto fail;
 +      }
        atomic_dec(&rdev->ah_count);
 -
 -      return 0;
 +fail:
 +      return rc;
  }
  
  static u8 bnxt_re_stack_to_dev_nw_type(enum rdma_network_type ntype)
@@@ -3413,9 -3342,7 +3414,7 @@@ static int bnxt_re_process_raw_qp_pkt_r
        udwr.remote_qkey = gsi_sqp->qplib_qp.qkey;
  
        /* post data received  in the send queue */
-       rc = bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
-       return 0;
+       return bnxt_re_post_send_shadow_qp(rdev, gsi_sqp, swr);
  }
  
  static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
@@@ -4028,7 -3955,6 +4027,7 @@@ int bnxt_re_alloc_ucontext(struct ib_uc
                container_of(ctx, struct bnxt_re_ucontext, ib_uctx);
        struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
        struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
 +      struct bnxt_re_user_mmap_entry *entry;
        struct bnxt_re_uctx_resp resp = {};
        u32 chip_met_rev_num = 0;
        int rc;
        resp.comp_mask |= BNXT_RE_UCNTX_CMASK_HAVE_MODE;
        resp.mode = rdev->chip_ctx->modes.wqe_mode;
  
 +      if (rdev->chip_ctx->modes.db_push)
 +              resp.comp_mask |= BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED;
 +
 +      entry = bnxt_re_mmap_entry_insert(uctx, 0, BNXT_RE_MMAP_SH_PAGE, NULL);
 +      if (!entry) {
 +              rc = -ENOMEM;
 +              goto cfail;
 +      }
 +      uctx->shpage_mmap = &entry->rdma_entry;
 +
        rc = ib_copy_to_udata(udata, &resp, min(udata->outlen, sizeof(resp)));
        if (rc) {
                ibdev_err(ibdev, "Failed to copy user context");
@@@ -4100,8 -4016,6 +4099,8 @@@ void bnxt_re_dealloc_ucontext(struct ib
  
        struct bnxt_re_dev *rdev = uctx->rdev;
  
 +      rdma_user_mmap_entry_remove(uctx->shpage_mmap);
 +      uctx->shpage_mmap = NULL;
        if (uctx->shpg)
                free_page((unsigned long)uctx->shpg);
  
                /* Free DPI only if this is the first PD allocated by the
                 * application and mark the context dpi as NULL
                 */
 -              bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
 -                                     &rdev->qplib_res.dpi_tbl, &uctx->dpi);
 +              bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->dpi);
                uctx->dpi.dbr = NULL;
        }
  }
@@@ -4120,177 -4035,27 +4119,177 @@@ int bnxt_re_mmap(struct ib_ucontext *ib
        struct bnxt_re_ucontext *uctx = container_of(ib_uctx,
                                                   struct bnxt_re_ucontext,
                                                   ib_uctx);
 -      struct bnxt_re_dev *rdev = uctx->rdev;
 +      struct bnxt_re_user_mmap_entry *bnxt_entry;
 +      struct rdma_user_mmap_entry *rdma_entry;
 +      int ret = 0;
        u64 pfn;
  
 -      if (vma->vm_end - vma->vm_start != PAGE_SIZE)
 +      rdma_entry = rdma_user_mmap_entry_get(&uctx->ib_uctx, vma);
 +      if (!rdma_entry)
                return -EINVAL;
  
 -      if (vma->vm_pgoff) {
 -              vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 -              if (io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
 -                                     PAGE_SIZE, vma->vm_page_prot)) {
 -                      ibdev_err(&rdev->ibdev, "Failed to map DPI");
 -                      return -EAGAIN;
 -              }
 -      } else {
 -              pfn = virt_to_phys(uctx->shpg) >> PAGE_SHIFT;
 -              if (remap_pfn_range(vma, vma->vm_start,
 -                                  pfn, PAGE_SIZE, vma->vm_page_prot)) {
 -                      ibdev_err(&rdev->ibdev, "Failed to map shared page");
 -                      return -EAGAIN;
 +      bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
 +                                rdma_entry);
 +
 +      switch (bnxt_entry->mmap_flag) {
 +      case BNXT_RE_MMAP_WC_DB:
 +              pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
 +              ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
 +                                      pgprot_writecombine(vma->vm_page_prot),
 +                                      rdma_entry);
 +              break;
 +      case BNXT_RE_MMAP_UC_DB:
 +              pfn = bnxt_entry->mem_offset >> PAGE_SHIFT;
 +              ret = rdma_user_mmap_io(ib_uctx, vma, pfn, PAGE_SIZE,
 +                                      pgprot_noncached(vma->vm_page_prot),
 +                              rdma_entry);
 +              break;
 +      case BNXT_RE_MMAP_SH_PAGE:
 +              ret = vm_insert_page(vma, vma->vm_start, virt_to_page(uctx->shpg));
 +              break;
 +      default:
 +              ret = -EINVAL;
 +              break;
 +      }
 +
 +      rdma_user_mmap_entry_put(rdma_entry);
 +      return ret;
 +}
 +
 +void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
 +{
 +      struct bnxt_re_user_mmap_entry *bnxt_entry;
 +
 +      bnxt_entry = container_of(rdma_entry, struct bnxt_re_user_mmap_entry,
 +                                rdma_entry);
 +
 +      kfree(bnxt_entry);
 +}
 +
 +static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle *attrs)
 +{
 +      struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
 +      enum bnxt_re_alloc_page_type alloc_type;
 +      struct bnxt_re_user_mmap_entry *entry;
 +      enum bnxt_re_mmap_flag mmap_flag;
 +      struct bnxt_qplib_chip_ctx *cctx;
 +      struct bnxt_re_ucontext *uctx;
 +      struct bnxt_re_dev *rdev;
 +      u64 mmap_offset;
 +      u32 length;
 +      u32 dpi;
 +      u64 dbr;
 +      int err;
 +
 +      uctx = container_of(ib_uverbs_get_ucontext(attrs), struct bnxt_re_ucontext, ib_uctx);
 +      if (IS_ERR(uctx))
 +              return PTR_ERR(uctx);
 +
 +      err = uverbs_get_const(&alloc_type, attrs, BNXT_RE_ALLOC_PAGE_TYPE);
 +      if (err)
 +              return err;
 +
 +      rdev = uctx->rdev;
 +      cctx = rdev->chip_ctx;
 +
 +      switch (alloc_type) {
 +      case BNXT_RE_ALLOC_WC_PAGE:
 +              if (cctx->modes.db_push)  {
 +                      if (bnxt_qplib_alloc_dpi(&rdev->qplib_res, &uctx->wcdpi,
 +                                               uctx, BNXT_QPLIB_DPI_TYPE_WC))
 +                              return -ENOMEM;
 +                      length = PAGE_SIZE;
 +                      dpi = uctx->wcdpi.dpi;
 +                      dbr = (u64)uctx->wcdpi.umdbr;
 +                      mmap_flag = BNXT_RE_MMAP_WC_DB;
 +              } else {
 +                      return -EINVAL;
                }
 +
 +              break;
 +
 +      default:
 +              return -EOPNOTSUPP;
        }
  
 +      entry = bnxt_re_mmap_entry_insert(uctx, dbr, mmap_flag, &mmap_offset);
 +      if (!entry)
 +              return -ENOMEM;
 +
 +      uobj->object = entry;
 +      uverbs_finalize_uobj_create(attrs, BNXT_RE_ALLOC_PAGE_HANDLE);
 +      err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
 +                           &mmap_offset, sizeof(mmap_offset));
 +      if (err)
 +              return err;
 +
 +      err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
 +                           &length, sizeof(length));
 +      if (err)
 +              return err;
 +
 +      err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI,
 +                           &dpi, sizeof(length));
 +      if (err)
 +              return err;
 +
        return 0;
  }
 +
 +static int alloc_page_obj_cleanup(struct ib_uobject *uobject,
 +                                enum rdma_remove_reason why,
 +                          struct uverbs_attr_bundle *attrs)
 +{
 +      struct  bnxt_re_user_mmap_entry *entry = uobject->object;
 +      struct bnxt_re_ucontext *uctx = entry->uctx;
 +
 +      switch (entry->mmap_flag) {
 +      case BNXT_RE_MMAP_WC_DB:
 +              if (uctx && uctx->wcdpi.dbr) {
 +                      struct bnxt_re_dev *rdev = uctx->rdev;
 +
 +                      bnxt_qplib_dealloc_dpi(&rdev->qplib_res, &uctx->wcdpi);
 +                      uctx->wcdpi.dbr = NULL;
 +              }
 +              break;
 +      default:
 +              goto exit;
 +      }
 +      rdma_user_mmap_entry_remove(&entry->rdma_entry);
 +exit:
 +      return 0;
 +}
 +
 +DECLARE_UVERBS_NAMED_METHOD(BNXT_RE_METHOD_ALLOC_PAGE,
 +                          UVERBS_ATTR_IDR(BNXT_RE_ALLOC_PAGE_HANDLE,
 +                                          BNXT_RE_OBJECT_ALLOC_PAGE,
 +                                          UVERBS_ACCESS_NEW,
 +                                          UA_MANDATORY),
 +                          UVERBS_ATTR_CONST_IN(BNXT_RE_ALLOC_PAGE_TYPE,
 +                                               enum bnxt_re_alloc_page_type,
 +                                               UA_MANDATORY),
 +                          UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_OFFSET,
 +                                              UVERBS_ATTR_TYPE(u64),
 +                                              UA_MANDATORY),
 +                          UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_MMAP_LENGTH,
 +                                              UVERBS_ATTR_TYPE(u32),
 +                                              UA_MANDATORY),
 +                          UVERBS_ATTR_PTR_OUT(BNXT_RE_ALLOC_PAGE_DPI,
 +                                              UVERBS_ATTR_TYPE(u32),
 +                                              UA_MANDATORY));
 +
 +DECLARE_UVERBS_NAMED_METHOD_DESTROY(BNXT_RE_METHOD_DESTROY_PAGE,
 +                                  UVERBS_ATTR_IDR(BNXT_RE_DESTROY_PAGE_HANDLE,
 +                                                  BNXT_RE_OBJECT_ALLOC_PAGE,
 +                                                  UVERBS_ACCESS_DESTROY,
 +                                                  UA_MANDATORY));
 +
 +DECLARE_UVERBS_NAMED_OBJECT(BNXT_RE_OBJECT_ALLOC_PAGE,
 +                          UVERBS_TYPE_ALLOC_IDR(alloc_page_obj_cleanup),
 +                          &UVERBS_METHOD(BNXT_RE_METHOD_ALLOC_PAGE),
 +                          &UVERBS_METHOD(BNXT_RE_METHOD_DESTROY_PAGE));
 +
 +const struct uapi_definition bnxt_re_uapi_defs[] = {
 +      UAPI_DEF_CHAIN_OBJ_TREE_NAMED(BNXT_RE_OBJECT_ALLOC_PAGE),
 +      {}
 +};
index 729a2f5318ccfdd2c349760ad912145bb6201b95,3073398a218341e67f5742f00247461276bc4bdf..b42166fe7454045c87a442d62695129ec7e83b7a
@@@ -83,45 -83,6 +83,45 @@@ static int bnxt_re_netdev_event(struct 
                                unsigned long event, void *ptr);
  static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev);
  static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev);
 +static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev);
 +
 +static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
 +                           u32 *offset);
 +static void bnxt_re_set_db_offset(struct bnxt_re_dev *rdev)
 +{
 +      struct bnxt_qplib_chip_ctx *cctx;
 +      struct bnxt_en_dev *en_dev;
 +      struct bnxt_qplib_res *res;
 +      u32 l2db_len = 0;
 +      u32 offset = 0;
 +      u32 barlen;
 +      int rc;
 +
 +      res = &rdev->qplib_res;
 +      en_dev = rdev->en_dev;
 +      cctx = rdev->chip_ctx;
 +
 +      /* Issue qcfg */
 +      rc = bnxt_re_hwrm_qcfg(rdev, &l2db_len, &offset);
 +      if (rc)
 +              dev_info(rdev_to_dev(rdev),
 +                       "Couldn't get DB bar size, Low latency framework is disabled\n");
 +      /* set register offsets for both UC and WC */
 +      res->dpi_tbl.ucreg.offset = res->is_vf ? BNXT_QPLIB_DBR_VF_DB_OFFSET :
 +                                               BNXT_QPLIB_DBR_PF_DB_OFFSET;
 +      res->dpi_tbl.wcreg.offset = res->dpi_tbl.ucreg.offset;
 +
 +      /* If WC mapping is disabled by L2 driver then en_dev->l2_db_size
 +       * is equal to the DB-Bar actual size. This indicates that L2
 +       * is mapping entire bar as UC-. RoCE driver can't enable WC mapping
 +       * in such cases and DB-push will be disabled.
 +       */
 +      barlen = pci_resource_len(res->pdev, RCFW_DBR_PCI_BAR_REGION);
 +      if (cctx->modes.db_push && l2db_len && en_dev->l2_db_size != barlen) {
 +              res->dpi_tbl.wcreg.offset = en_dev->l2_db_size;
 +              dev_info(rdev_to_dev(rdev),  "Low latency framework is enabled\n");
 +      }
 +}
  
  static void bnxt_re_set_drv_mode(struct bnxt_re_dev *rdev, u8 mode)
  {
        cctx = rdev->chip_ctx;
        cctx->modes.wqe_mode = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
                               mode : BNXT_QPLIB_WQE_MODE_STATIC;
 +      if (bnxt_re_hwrm_qcaps(rdev))
 +              dev_err(rdev_to_dev(rdev),
 +                      "Failed to query hwrm qcaps\n");
  }
  
  static void bnxt_re_destroy_chip_ctx(struct bnxt_re_dev *rdev)
@@@ -154,7 -112,6 +154,7 @@@ static int bnxt_re_setup_chip_ctx(struc
  {
        struct bnxt_qplib_chip_ctx *chip_ctx;
        struct bnxt_en_dev *en_dev;
 +      int rc;
  
        en_dev = rdev->en_dev;
  
        rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev);
  
        bnxt_re_set_drv_mode(rdev, wqe_mode);
 +
 +      bnxt_re_set_db_offset(rdev);
 +      rc = bnxt_qplib_map_db_bar(&rdev->qplib_res);
 +      if (rc)
 +              return rc;
 +
        if (bnxt_qplib_determine_atomics(en_dev->pdev))
                ibdev_info(&rdev->ibdev,
                           "platform doesn't support global atomics.");
@@@ -332,21 -283,15 +332,21 @@@ static void bnxt_re_start_irq(void *han
        for (indx = 0; indx < rdev->num_msix; indx++)
                rdev->en_dev->msix_entries[indx].vector = ent[indx].vector;
  
 -      bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
 -                                false);
 +      rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_ent[BNXT_RE_AEQ_IDX].vector,
 +                                     false);
 +      if (rc) {
 +              ibdev_warn(&rdev->ibdev, "Failed to reinit CREQ\n");
 +              return;
 +      }
        for (indx = BNXT_RE_NQ_IDX ; indx < rdev->num_msix; indx++) {
                nq = &rdev->nq[indx - 1];
                rc = bnxt_qplib_nq_start_irq(nq, indx - 1,
                                             msix_ent[indx].vector, false);
 -              if (rc)
 +              if (rc) {
                        ibdev_warn(&rdev->ibdev, "Failed to reinit NQ index %d\n",
                                   indx - 1);
 +                      return;
 +              }
        }
  }
  
@@@ -370,11 -315,12 +370,11 @@@ static int bnxt_re_register_netdev(stru
        return rc;
  }
  
 -static void bnxt_re_init_hwrm_hdr(struct bnxt_re_dev *rdev, struct input *hdr,
 -                                u16 opcd, u16 crid, u16 trid)
 +static void bnxt_re_init_hwrm_hdr(struct input *hdr, u16 opcd)
  {
        hdr->req_type = cpu_to_le16(opcd);
 -      hdr->cmpl_ring = cpu_to_le16(crid);
 -      hdr->target_id = cpu_to_le16(trid);
 +      hdr->cmpl_ring = cpu_to_le16(-1);
 +      hdr->target_id = cpu_to_le16(-1);
  }
  
  static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
        fw_msg->timeout = timeout;
  }
  
 +/* Query device config using common hwrm */
 +static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len,
 +                           u32 *offset)
 +{
 +      struct bnxt_en_dev *en_dev = rdev->en_dev;
 +      struct hwrm_func_qcfg_output resp = {0};
 +      struct hwrm_func_qcfg_input req = {0};
 +      struct bnxt_fw_msg fw_msg;
 +      int rc;
 +
 +      memset(&fw_msg, 0, sizeof(fw_msg));
 +      bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG);
 +      req.fid = cpu_to_le16(0xffff);
 +      bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
 +                          sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
 +      rc = bnxt_send_msg(en_dev, &fw_msg);
 +      if (!rc) {
 +              *db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024);
 +              *offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024);
 +      }
 +      return rc;
 +}
 +
 +/* Query function capabilities using common hwrm */
 +int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev)
 +{
 +      struct bnxt_en_dev *en_dev = rdev->en_dev;
 +      struct hwrm_func_qcaps_output resp = {};
 +      struct hwrm_func_qcaps_input req = {};
 +      struct bnxt_qplib_chip_ctx *cctx;
 +      struct bnxt_fw_msg fw_msg = {};
 +      int rc;
 +
 +      cctx = rdev->chip_ctx;
 +      bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS);
 +      req.fid = cpu_to_le16(0xffff);
 +      bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
 +                          sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
 +
 +      rc = bnxt_send_msg(en_dev, &fw_msg);
 +      if (rc)
 +              return rc;
 +      cctx->modes.db_push = le32_to_cpu(resp.flags) & FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE;
 +
 +      return 0;
 +}
 +
  static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev,
                                 u16 fw_ring_id, int type)
  {
        struct bnxt_en_dev *en_dev;
 -      struct hwrm_ring_free_input req = {0};
 +      struct hwrm_ring_free_input req = {};
        struct hwrm_ring_free_output resp;
 -      struct bnxt_fw_msg fw_msg;
 +      struct bnxt_fw_msg fw_msg = {};
        int rc = -EINVAL;
  
        if (!rdev)
        if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
                return 0;
  
 -      memset(&fw_msg, 0, sizeof(fw_msg));
 -
 -      bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
 +      bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_FREE);
        req.ring_type = type;
        req.ring_id = cpu_to_le16(fw_ring_id);
        bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
@@@ -472,15 -373,16 +472,15 @@@ static int bnxt_re_net_ring_alloc(struc
                                  u16 *fw_ring_id)
  {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
 -      struct hwrm_ring_alloc_input req = {0};
 +      struct hwrm_ring_alloc_input req = {};
        struct hwrm_ring_alloc_output resp;
 -      struct bnxt_fw_msg fw_msg;
 +      struct bnxt_fw_msg fw_msg = {};
        int rc = -EINVAL;
  
        if (!en_dev)
                return rc;
  
 -      memset(&fw_msg, 0, sizeof(fw_msg));
 -      bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
 +      bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC);
        req.enables = 0;
        req.page_tbl_addr =  cpu_to_le64(ring_attr->dma_arr[0]);
        if (ring_attr->pages > 1) {
@@@ -509,7 -411,7 +509,7 @@@ static int bnxt_re_net_stats_ctx_free(s
        struct bnxt_en_dev *en_dev = rdev->en_dev;
        struct hwrm_stat_ctx_free_input req = {};
        struct hwrm_stat_ctx_free_output resp = {};
 -      struct bnxt_fw_msg fw_msg;
 +      struct bnxt_fw_msg fw_msg = {};
        int rc = -EINVAL;
  
        if (!en_dev)
        if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags))
                return 0;
  
 -      memset(&fw_msg, 0, sizeof(fw_msg));
 -
 -      bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
 +      bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE);
        req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
        bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
                            sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
@@@ -535,10 -439,10 +535,10 @@@ static int bnxt_re_net_stats_ctx_alloc(
                                       u32 *fw_stats_ctx_id)
  {
        struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx;
 -      struct hwrm_stat_ctx_alloc_output resp = {0};
 -      struct hwrm_stat_ctx_alloc_input req = {0};
 +      struct hwrm_stat_ctx_alloc_output resp = {};
 +      struct hwrm_stat_ctx_alloc_input req = {};
        struct bnxt_en_dev *en_dev = rdev->en_dev;
 -      struct bnxt_fw_msg fw_msg;
 +      struct bnxt_fw_msg fw_msg = {};
        int rc = -EINVAL;
  
        *fw_stats_ctx_id = INVALID_STATS_CTX_ID;
        if (!en_dev)
                return rc;
  
 -      memset(&fw_msg, 0, sizeof(fw_msg));
 -
 -      bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
 +      bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC);
        req.update_period_ms = cpu_to_le32(1000);
        req.stats_dma_addr = cpu_to_le64(dma_map);
        req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size);
        return rc;
  }
  
 +static void bnxt_re_disassociate_ucontext(struct ib_ucontext *ibcontext)
 +{
 +}
 +
  /* Device */
  
  static struct bnxt_re_dev *bnxt_re_from_netdev(struct net_device *netdev)
@@@ -630,7 -532,6 +630,7 @@@ static const struct ib_device_ops bnxt_
        .destroy_qp = bnxt_re_destroy_qp,
        .destroy_srq = bnxt_re_destroy_srq,
        .device_group = &bnxt_re_dev_attr_group,
 +      .disassociate_ucontext = bnxt_re_disassociate_ucontext,
        .get_dev_fw_str = bnxt_re_query_fw_str,
        .get_dma_mr = bnxt_re_get_dma_mr,
        .get_hw_stats = bnxt_re_ib_get_hw_stats,
        .get_port_immutable = bnxt_re_get_port_immutable,
        .map_mr_sg = bnxt_re_map_mr_sg,
        .mmap = bnxt_re_mmap,
 +      .mmap_free = bnxt_re_mmap_free,
        .modify_qp = bnxt_re_modify_qp,
        .modify_srq = bnxt_re_modify_srq,
        .poll_cq = bnxt_re_poll_cq,
@@@ -679,9 -579,6 +679,9 @@@ static int bnxt_re_register_ib(struct b
        ibdev->dev.parent = &rdev->en_dev->pdev->dev;
        ibdev->local_dma_lkey = BNXT_QPLIB_RSVD_LKEY;
  
 +      if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
 +              ibdev->driver_def = bnxt_re_uapi_defs;
 +
        ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
        ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
        if (ret)
@@@ -925,6 -822,7 +925,6 @@@ static void bnxt_re_free_res(struct bnx
  
        if (rdev->qplib_res.dpi_tbl.max) {
                bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
 -                                     &rdev->qplib_res.dpi_tbl,
                                       &rdev->dpi_privileged);
        }
        if (rdev->qplib_res.rcfw) {
@@@ -952,9 -850,9 +952,9 @@@ static int bnxt_re_alloc_res(struct bnx
        if (rc)
                goto fail;
  
 -      rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl,
 +      rc = bnxt_qplib_alloc_dpi(&rdev->qplib_res,
                                  &rdev->dpi_privileged,
 -                                rdev);
 +                                rdev, BNXT_QPLIB_DPI_TYPE_KERNEL);
        if (rc)
                goto dealloc_res;
  
@@@ -994,6 -892,7 +994,6 @@@ free_nq
                bnxt_qplib_free_nq(&rdev->nq[i]);
        }
        bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
 -                             &rdev->qplib_res.dpi_tbl,
                               &rdev->dpi_privileged);
  dealloc_res:
        bnxt_qplib_free_res(&rdev->qplib_res);
@@@ -1064,6 -963,12 +1064,6 @@@ static int bnxt_re_update_gid(struct bn
        if (!ib_device_try_get(&rdev->ibdev))
                return 0;
  
 -      if (!sgid_tbl) {
 -              ibdev_err(&rdev->ibdev, "QPLIB: SGID table not allocated");
 -              rc = -EINVAL;
 -              goto out;
 -      }
 -
        for (index = 0; index < sgid_tbl->active; index++) {
                gid_idx = sgid_tbl->hw_id[index];
  
                rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx,
                                            rdev->qplib_res.netdev->dev_addr);
        }
 -out:
 +
        ib_device_put(&rdev->ibdev);
        return rc;
  }
@@@ -1134,13 -1039,14 +1134,13 @@@ static int bnxt_re_setup_qos(struct bnx
  static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev)
  {
        struct bnxt_en_dev *en_dev = rdev->en_dev;
 -      struct hwrm_ver_get_output resp = {0};
 -      struct hwrm_ver_get_input req = {0};
 -      struct bnxt_fw_msg fw_msg;
 +      struct hwrm_ver_get_output resp = {};
 +      struct hwrm_ver_get_input req = {};
 +      struct bnxt_qplib_chip_ctx *cctx;
 +      struct bnxt_fw_msg fw_msg = {};
        int rc = 0;
  
 -      memset(&fw_msg, 0, sizeof(fw_msg));
 -      bnxt_re_init_hwrm_hdr(rdev, (void *)&req,
 -                            HWRM_VER_GET, -1, -1);
 +      bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VER_GET);
        req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
        req.hwrm_intf_min = HWRM_VERSION_MINOR;
        req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
                          rc);
                return;
        }
 -      rdev->qplib_ctx.hwrm_intf_ver =
 +
 +      cctx = rdev->chip_ctx;
 +      cctx->hwrm_intf_ver =
                (u64)le16_to_cpu(resp.hwrm_intf_major) << 48 |
                (u64)le16_to_cpu(resp.hwrm_intf_minor) << 32 |
                (u64)le16_to_cpu(resp.hwrm_intf_build) << 16 |
                le16_to_cpu(resp.hwrm_intf_patch);
 +
 +      cctx->hwrm_cmd_max_timeout = le16_to_cpu(resp.max_req_timeout);
 +
 +      if (!cctx->hwrm_cmd_max_timeout)
 +              cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT;
  }
  
  static int bnxt_re_ib_init(struct bnxt_re_dev *rdev)
                return rc;
        }
        dev_info(rdev_to_dev(rdev), "Device registered with IB successfully");
-       ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
-                        &rdev->active_width);
        set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
  
        event = netif_running(rdev->netdev) && netif_carrier_ok(rdev->netdev) ?
@@@ -1303,7 -1200,7 +1301,7 @@@ static int bnxt_re_dev_init(struct bnxt
        db_offt = bnxt_re_get_nqdb_offset(rdev, BNXT_RE_AEQ_IDX);
        vid = rdev->en_dev->msix_entries[BNXT_RE_AEQ_IDX].vector;
        rc = bnxt_qplib_enable_rcfw_channel(&rdev->rcfw,
 -                                          vid, db_offt, rdev->is_virtfn,
 +                                          vid, db_offt,
                                            &bnxt_re_aeq_handler);
        if (rc) {
                ibdev_err(&rdev->ibdev, "Failed to enable RCFW channel: %#x\n",
@@@ -1437,6 -1334,10 +1435,10 @@@ static void bnxt_re_setup_cc(struct bnx
  {
        struct bnxt_qplib_cc_param cc_param = {};
  
+       /* Do not enable congestion control on VFs */
+       if (rdev->is_virtfn)
+               return;
        /* Currently enabling only for GenP5 adapters */
        if (!bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
                return;
@@@ -1596,7 -1497,6 +1598,7 @@@ static int bnxt_re_suspend(struct auxil
         */
        set_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags);
        set_bit(ERR_DEVICE_DETACHED, &rdev->rcfw.cmdq.flags);
 +      wake_up_all(&rdev->rcfw.cmdq.waitq);
        mutex_unlock(&bnxt_re_mutex);
  
        return 0;
index d5d418a8b003a6372b3159e29ca1f72529ccb91b,8974f6235cfaa83a9e658faf08e2085a23738f4e..91aed77ce40d5f91f4622588ba691c100bc520bc
@@@ -399,9 -399,6 +399,9 @@@ static irqreturn_t bnxt_qplib_nq_irq(in
  
  void bnxt_qplib_nq_stop_irq(struct bnxt_qplib_nq *nq, bool kill)
  {
 +      if (!nq->requested)
 +              return;
 +
        tasklet_disable(&nq->nq_tasklet);
        /* Mask h/w interrupt */
        bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, false);
        synchronize_irq(nq->msix_vec);
        if (kill)
                tasklet_kill(&nq->nq_tasklet);
 -      if (nq->requested) {
 -              irq_set_affinity_hint(nq->msix_vec, NULL);
 -              free_irq(nq->msix_vec, nq);
 -              nq->requested = false;
 -      }
 +
 +      irq_set_affinity_hint(nq->msix_vec, NULL);
 +      free_irq(nq->msix_vec, nq);
 +      kfree(nq->name);
 +      nq->name = NULL;
 +      nq->requested = false;
  }
  
  void bnxt_qplib_disable_nq(struct bnxt_qplib_nq *nq)
  int bnxt_qplib_nq_start_irq(struct bnxt_qplib_nq *nq, int nq_indx,
                            int msix_vector, bool need_init)
  {
 +      struct bnxt_qplib_res *res = nq->res;
        int rc;
  
        if (nq->requested)
        else
                tasklet_enable(&nq->nq_tasklet);
  
 -      snprintf(nq->name, sizeof(nq->name), "bnxt_qplib_nq-%d", nq_indx);
 +      nq->name = kasprintf(GFP_KERNEL, "bnxt_re-nq-%d@pci:%s",
 +                           nq_indx, pci_name(res->pdev));
 +      if (!nq->name)
 +              return -ENOMEM;
        rc = request_irq(nq->msix_vec, bnxt_qplib_nq_irq, 0, nq->name, nq);
 -      if (rc)
 +      if (rc) {
 +              kfree(nq->name);
 +              nq->name = NULL;
 +              tasklet_disable(&nq->nq_tasklet);
                return rc;
 +      }
  
        cpumask_clear(&nq->mask);
        cpumask_set_cpu(nq_indx, &nq->mask);
                         nq->msix_vec, nq_indx);
        }
        nq->requested = true;
 -      bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, nq->res->cctx, true);
 +      bnxt_qplib_ring_nq_db(&nq->nq_db.dbinfo, res->cctx, true);
  
        return rc;
  }
@@@ -483,6 -471,7 +483,6 @@@ static int bnxt_qplib_map_nq_db(struct 
        resource_size_t reg_base;
        struct bnxt_qplib_nq_db *nq_db;
        struct pci_dev *pdev;
 -      int rc = 0;
  
        pdev = nq->pdev;
        nq_db = &nq->nq_db;
        if (!nq_db->reg.bar_base) {
                dev_err(&pdev->dev, "QPLIB: NQ BAR region %d resc start is 0!",
                        nq_db->reg.bar_id);
 -              rc = -ENOMEM;
 -              goto fail;
 +              return -ENOMEM;
        }
  
        reg_base = nq_db->reg.bar_base + reg_offt;
        if (!nq_db->reg.bar_reg) {
                dev_err(&pdev->dev, "QPLIB: NQ BAR region %d mapping failed",
                        nq_db->reg.bar_id);
 -              rc = -ENOMEM;
 -              goto fail;
 +              return -ENOMEM;
        }
  
        nq_db->dbinfo.db = nq_db->reg.bar_reg;
        nq_db->dbinfo.hwq = &nq->hwq;
        nq_db->dbinfo.xid = nq->ring_id;
 -fail:
 -      return rc;
 +
 +      return 0;
  }
  
  int bnxt_qplib_enable_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq,
@@@ -623,7 -614,7 +623,7 @@@ int bnxt_qplib_create_srq(struct bnxt_q
        hwq_attr.type = HWQ_TYPE_QUEUE;
        rc = bnxt_qplib_alloc_init_hwq(&srq->hwq, &hwq_attr);
        if (rc)
 -              goto exit;
 +              return rc;
  
        srq->swq = kcalloc(srq->hwq.max_elements, sizeof(*srq->swq),
                           GFP_KERNEL);
        srq->dbinfo.xid = srq->id;
        srq->dbinfo.db = srq->dpi->dbr;
        srq->dbinfo.max_slot = 1;
 -      srq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
 +      srq->dbinfo.priv_db = res->dpi_tbl.priv_db;
        if (srq->threshold)
                bnxt_qplib_armen_db(&srq->dbinfo, DBC_DBC_TYPE_SRQ_ARMENA);
        srq->arm_req = false;
  fail:
        bnxt_qplib_free_hwq(res, &srq->hwq);
        kfree(srq->swq);
 -exit:
 +
        return rc;
  }
  
@@@ -741,14 -732,15 +741,14 @@@ int bnxt_qplib_post_srq_recv(struct bnx
        struct rq_wqe *srqe;
        struct sq_sge *hw_sge;
        u32 sw_prod, sw_cons, count = 0;
 -      int i, rc = 0, next;
 +      int i, next;
  
        spin_lock(&srq_hwq->lock);
        if (srq->start_idx == srq->last_idx) {
                dev_err(&srq_hwq->pdev->dev,
                        "FP: SRQ (0x%x) is full!\n", srq->id);
 -              rc = -EINVAL;
                spin_unlock(&srq_hwq->lock);
 -              goto done;
 +              return -EINVAL;
        }
        next = srq->start_idx;
        srq->start_idx = srq->swq[next].next_idx;
                srq->arm_req = false;
                bnxt_qplib_srq_arm_db(&srq->dbinfo, srq->threshold);
        }
 -done:
 -      return rc;
 +
 +      return 0;
  }
  
  /* QP */
  
  static int bnxt_qplib_alloc_init_swq(struct bnxt_qplib_q *que)
  {
 -      int rc = 0;
        int indx;
  
        que->swq = kcalloc(que->max_wqe, sizeof(*que->swq), GFP_KERNEL);
 -      if (!que->swq) {
 -              rc = -ENOMEM;
 -              goto out;
 -      }
 +      if (!que->swq)
 +              return -ENOMEM;
  
        que->swq_start = 0;
        que->swq_last = que->max_wqe - 1;
                que->swq[indx].next_idx = indx + 1;
        que->swq[que->swq_last].next_idx = 0; /* Make it circular */
        que->swq_last = 0;
 -out:
 -      return rc;
 +
 +      return 0;
  }
  
  int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
        hwq_attr.type = HWQ_TYPE_QUEUE;
        rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
        if (rc)
 -              goto exit;
 +              return rc;
  
        rc = bnxt_qplib_alloc_init_swq(sq);
        if (rc)
@@@ -932,6 -927,7 +932,6 @@@ sq_swq
        kfree(sq->swq);
  fail_sq:
        bnxt_qplib_free_hwq(res, &sq->hwq);
 -exit:
        return rc;
  }
  
@@@ -996,7 -992,7 +996,7 @@@ int bnxt_qplib_create_qp(struct bnxt_qp
        hwq_attr.type = HWQ_TYPE_QUEUE;
        rc = bnxt_qplib_alloc_init_hwq(&sq->hwq, &hwq_attr);
        if (rc)
 -              goto exit;
 +              return rc;
  
        rc = bnxt_qplib_alloc_init_swq(sq);
        if (rc)
@@@ -1144,6 -1140,7 +1144,6 @@@ sq_swq
        kfree(sq->swq);
  fail_sq:
        bnxt_qplib_free_hwq(res, &sq->hwq);
 -exit:
        return rc;
  }
  
@@@ -1617,7 -1614,7 +1617,7 @@@ static int bnxt_qplib_put_inline(struc
                il_src = (void *)wqe->sg_list[indx].addr;
                t_len += len;
                if (t_len > qp->max_inline_data)
 -                      goto bad;
 +                      return -ENOMEM;
                while (len) {
                        if (pull_dst) {
                                pull_dst = false;
        }
  
        return t_len;
 -bad:
 -      return -ENOMEM;
  }
  
  static u32 bnxt_qplib_put_sges(struct bnxt_qplib_hwq *hwq,
@@@ -2057,6 -2056,12 +2057,12 @@@ int bnxt_qplib_create_cq(struct bnxt_qp
        u32 pg_sz_lvl;
        int rc;
  
+       if (!cq->dpi) {
+               dev_err(&rcfw->pdev->dev,
+                       "FP: CREATE_CQ failed due to NULL DPI\n");
+               return -EINVAL;
+       }
        hwq_attr.res = res;
        hwq_attr.depth = cq->max_wqe;
        hwq_attr.stride = sizeof(struct cq_base);
        hwq_attr.sginfo = &cq->sg_info;
        rc = bnxt_qplib_alloc_init_hwq(&cq->hwq, &hwq_attr);
        if (rc)
 -              goto exit;
 +              return rc;
  
        bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
                                 CMDQ_BASE_OPCODE_CREATE_CQ,
                                 sizeof(req));
  
-       if (!cq->dpi) {
-               dev_err(&rcfw->pdev->dev,
-                       "FP: CREATE_CQ failed due to NULL DPI\n");
-               return -EINVAL;
-       }
        req.dpi = cpu_to_le32(cq->dpi->dpi);
        req.cq_handle = cpu_to_le64(cq->cq_handle);
        req.cq_size = cpu_to_le32(cq->hwq.max_elements);
        cq->dbinfo.hwq = &cq->hwq;
        cq->dbinfo.xid = cq->id;
        cq->dbinfo.db = cq->dpi->dbr;
 -      cq->dbinfo.priv_db = res->dpi_tbl.dbr_bar_reg_iomem;
 +      cq->dbinfo.priv_db = res->dpi_tbl.priv_db;
  
        bnxt_qplib_armen_db(&cq->dbinfo, DBC_DBC_TYPE_CQ_ARMENA);
  
  
  fail:
        bnxt_qplib_free_hwq(res, &cq->hwq);
 -exit:
        return rc;
  }
  
@@@ -2504,6 -2505,7 +2505,6 @@@ static int bnxt_qplib_cq_process_res_rc
        struct bnxt_qplib_qp *qp;
        struct bnxt_qplib_q *rq;
        u32 wr_id_idx;
 -      int rc = 0;
  
        qp = (struct bnxt_qplib_qp *)((unsigned long)
                                      le64_to_cpu(hwcqe->qp_handle));
        if (qp->rq.flushed) {
                dev_dbg(&cq->hwq.pdev->dev,
                        "%s: QP in Flush QP = %p\n", __func__, qp);
 -              goto done;
 +              return 0;
        }
  
        cqe = *pcqe;
                }
        }
  
 -done:
 -      return rc;
 +      return 0;
  }
  
  static int bnxt_qplib_cq_process_res_ud(struct bnxt_qplib_cq *cq,
        struct bnxt_qplib_qp *qp;
        struct bnxt_qplib_q *rq;
        u32 wr_id_idx;
 -      int rc = 0;
  
        qp = (struct bnxt_qplib_qp *)((unsigned long)
                                      le64_to_cpu(hwcqe->qp_handle));
        if (qp->rq.flushed) {
                dev_dbg(&cq->hwq.pdev->dev,
                        "%s: QP in Flush QP = %p\n", __func__, qp);
 -              goto done;
 +              return 0;
        }
        cqe = *pcqe;
        cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
                        bnxt_qplib_add_flush_qp(qp);
                }
        }
 -done:
 -      return rc;
 +
 +      return 0;
  }
  
  bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq)
@@@ -2683,6 -2687,7 +2684,6 @@@ static int bnxt_qplib_cq_process_res_ra
        struct bnxt_qplib_srq *srq;
        struct bnxt_qplib_cqe *cqe;
        u32 wr_id_idx;
 -      int rc = 0;
  
        qp = (struct bnxt_qplib_qp *)((unsigned long)
                                      le64_to_cpu(hwcqe->qp_handle));
        if (qp->rq.flushed) {
                dev_dbg(&cq->hwq.pdev->dev,
                        "%s: QP in Flush QP = %p\n", __func__, qp);
 -              goto done;
 +              return 0;
        }
        cqe = *pcqe;
        cqe->opcode = hwcqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
                }
        }
  
 -done:
 -      return rc;
 +      return 0;
  }
  
  static int bnxt_qplib_cq_process_terminal(struct bnxt_qplib_cq *cq,
  
        qp = (struct bnxt_qplib_qp *)((unsigned long)
                                      le64_to_cpu(hwcqe->qp_handle));
 -      if (!qp) {
 -              dev_err(&cq->hwq.pdev->dev,
 -                      "FP: CQ Process terminal qp is NULL\n");
 +      if (!qp)
                return -EINVAL;
 -      }
  
        /* Must block new posting of SQ and RQ */
        qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
index 7674136c08b25afd19dfc6532e72b4b883c25dd0,81b0c5e879f9e857a77875dd104530eb4edaee91..5fd8f7c90bb06a8b734f568c1c24c06a70457282
@@@ -215,17 -215,9 +215,9 @@@ int bnxt_qplib_alloc_init_hwq(struct bn
                        return -EINVAL;
                hwq_attr->sginfo->npages = npages;
        } else {
-               unsigned long sginfo_num_pages = ib_umem_num_dma_blocks(
-                       hwq_attr->sginfo->umem, hwq_attr->sginfo->pgsize);
+               npages = ib_umem_num_dma_blocks(hwq_attr->sginfo->umem,
+                                               hwq_attr->sginfo->pgsize);
                hwq->is_user = true;
-               npages = sginfo_num_pages;
-               npages = (npages * PAGE_SIZE) /
-                         BIT_ULL(hwq_attr->sginfo->pgshft);
-               if ((sginfo_num_pages * PAGE_SIZE) %
-                    BIT_ULL(hwq_attr->sginfo->pgshft))
-                       if (!npages)
-                               npages++;
        }
  
        if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
@@@ -704,76 -696,44 +696,76 @@@ static int bnxt_qplib_alloc_pd_tbl(stru
  }
  
  /* DPIs */
 -int bnxt_qplib_alloc_dpi(struct bnxt_qplib_dpi_tbl *dpit,
 -                       struct bnxt_qplib_dpi     *dpi,
 -                       void                      *app)
 +int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
 +                       struct bnxt_qplib_dpi *dpi,
 +                       void *app, u8 type)
  {
 +      struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
 +      struct bnxt_qplib_reg_desc *reg;
        u32 bit_num;
 +      u64 umaddr;
 +
 +      reg = &dpit->wcreg;
 +      mutex_lock(&res->dpi_tbl_lock);
  
        bit_num = find_first_bit(dpit->tbl, dpit->max);
 -      if (bit_num == dpit->max)
 +      if (bit_num == dpit->max) {
 +              mutex_unlock(&res->dpi_tbl_lock);
                return -ENOMEM;
 +      }
  
        /* Found unused DPI */
        clear_bit(bit_num, dpit->tbl);
        dpit->app_tbl[bit_num] = app;
  
 -      dpi->dpi = bit_num;
 -      dpi->dbr = dpit->dbr_bar_reg_iomem + (bit_num * PAGE_SIZE);
 -      dpi->umdbr = dpit->unmapped_dbr + (bit_num * PAGE_SIZE);
 +      dpi->bit = bit_num;
 +      dpi->dpi = bit_num + (reg->offset - dpit->ucreg.offset) / PAGE_SIZE;
 +
 +      umaddr = reg->bar_base + reg->offset + bit_num * PAGE_SIZE;
 +      dpi->umdbr = umaddr;
 +
 +      switch (type) {
 +      case BNXT_QPLIB_DPI_TYPE_KERNEL:
 +              /* privileged dbr was already mapped just initialize it. */
 +              dpi->umdbr = dpit->ucreg.bar_base +
 +                           dpit->ucreg.offset + bit_num * PAGE_SIZE;
 +              dpi->dbr = dpit->priv_db;
 +              dpi->dpi = dpi->bit;
 +              break;
 +      case BNXT_QPLIB_DPI_TYPE_WC:
 +              dpi->dbr = ioremap_wc(umaddr, PAGE_SIZE);
 +              break;
 +      default:
 +              dpi->dbr = ioremap(umaddr, PAGE_SIZE);
 +              break;
 +      }
  
 +      dpi->type = type;
 +      mutex_unlock(&res->dpi_tbl_lock);
        return 0;
 +
  }
  
  int bnxt_qplib_dealloc_dpi(struct bnxt_qplib_res *res,
 -                         struct bnxt_qplib_dpi_tbl *dpit,
 -                         struct bnxt_qplib_dpi     *dpi)
 +                         struct bnxt_qplib_dpi *dpi)
  {
 -      if (dpi->dpi >= dpit->max) {
 -              dev_warn(&res->pdev->dev, "Invalid DPI? dpi = %d\n", dpi->dpi);
 -              return -EINVAL;
 -      }
 -      if (test_and_set_bit(dpi->dpi, dpit->tbl)) {
 -              dev_warn(&res->pdev->dev, "Freeing an unused DPI? dpi = %d\n",
 -                       dpi->dpi);
 +      struct bnxt_qplib_dpi_tbl *dpit = &res->dpi_tbl;
 +
 +      mutex_lock(&res->dpi_tbl_lock);
 +      if (dpi->dpi && dpi->type != BNXT_QPLIB_DPI_TYPE_KERNEL)
 +              pci_iounmap(res->pdev, dpi->dbr);
 +
 +      if (test_and_set_bit(dpi->bit, dpit->tbl)) {
 +              dev_warn(&res->pdev->dev,
 +                       "Freeing an unused DPI? dpi = %d, bit = %d\n",
 +                              dpi->dpi, dpi->bit);
 +              mutex_unlock(&res->dpi_tbl_lock);
                return -EINVAL;
        }
        if (dpit->app_tbl)
 -              dpit->app_tbl[dpi->dpi] = NULL;
 +              dpit->app_tbl[dpi->bit] = NULL;
        memset(dpi, 0, sizeof(*dpi));
 -
 +      mutex_unlock(&res->dpi_tbl_lock);
        return 0;
  }
  
@@@ -782,38 -742,52 +774,38 @@@ static void bnxt_qplib_free_dpi_tbl(str
  {
        kfree(dpit->tbl);
        kfree(dpit->app_tbl);
 -      if (dpit->dbr_bar_reg_iomem)
 -              pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
 -      memset(dpit, 0, sizeof(*dpit));
 +      dpit->tbl = NULL;
 +      dpit->app_tbl = NULL;
 +      dpit->max = 0;
  }
  
 -static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res     *res,
 -                                  struct bnxt_qplib_dpi_tbl *dpit,
 -                                  u32                       dbr_offset)
 +static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res,
 +                                  struct bnxt_qplib_dev_attr *dev_attr)
  {
 -      u32 dbr_bar_reg = RCFW_DBR_PCI_BAR_REGION;
 -      resource_size_t bar_reg_base;
 -      u32 dbr_len, bytes;
 -
 -      if (dpit->dbr_bar_reg_iomem) {
 -              dev_err(&res->pdev->dev, "DBR BAR region %d already mapped\n",
 -                      dbr_bar_reg);
 -              return -EALREADY;
 -      }
 -
 -      bar_reg_base = pci_resource_start(res->pdev, dbr_bar_reg);
 -      if (!bar_reg_base) {
 -              dev_err(&res->pdev->dev, "BAR region %d resc start failed\n",
 -                      dbr_bar_reg);
 -              return -ENOMEM;
 -      }
 +      struct bnxt_qplib_dpi_tbl *dpit;
 +      struct bnxt_qplib_reg_desc *reg;
 +      unsigned long bar_len;
 +      u32 dbr_offset;
 +      u32 bytes;
  
 -      dbr_len = pci_resource_len(res->pdev, dbr_bar_reg) - dbr_offset;
 -      if (!dbr_len || ((dbr_len & (PAGE_SIZE - 1)) != 0)) {
 -              dev_err(&res->pdev->dev, "Invalid DBR length %d\n", dbr_len);
 -              return -ENOMEM;
 -      }
 +      dpit = &res->dpi_tbl;
 +      reg = &dpit->wcreg;
  
 -      dpit->dbr_bar_reg_iomem = ioremap(bar_reg_base + dbr_offset,
 -                                                dbr_len);
 -      if (!dpit->dbr_bar_reg_iomem) {
 -              dev_err(&res->pdev->dev,
 -                      "FP: DBR BAR region %d mapping failed\n", dbr_bar_reg);
 -              return -ENOMEM;
 +      if (!bnxt_qplib_is_chip_gen_p5(res->cctx)) {
 +              /* Offest should come from L2 driver */
 +              dbr_offset = dev_attr->l2_db_size;
 +              dpit->ucreg.offset = dbr_offset;
 +              dpit->wcreg.offset = dbr_offset;
        }
  
 -      dpit->unmapped_dbr = bar_reg_base + dbr_offset;
 -      dpit->max = dbr_len / PAGE_SIZE;
 +      bar_len = pci_resource_len(res->pdev, reg->bar_id);
 +      dpit->max = (bar_len - reg->offset) / PAGE_SIZE;
 +      if (dev_attr->max_dpi)
 +              dpit->max = min_t(u32, dpit->max, dev_attr->max_dpi);
  
 -      dpit->app_tbl = kcalloc(dpit->max, sizeof(void *), GFP_KERNEL);
 +      dpit->app_tbl = kcalloc(dpit->max,  sizeof(void *), GFP_KERNEL);
        if (!dpit->app_tbl)
 -              goto unmap_io;
 +              return -ENOMEM;
  
        bytes = dpit->max >> 3;
        if (!bytes)
        if (!dpit->tbl) {
                kfree(dpit->app_tbl);
                dpit->app_tbl = NULL;
 -              goto unmap_io;
 +              return -ENOMEM;
        }
  
        memset((u8 *)dpit->tbl, 0xFF, bytes);
 +      dpit->priv_db = dpit->ucreg.bar_reg + dpit->ucreg.offset;
  
        return 0;
  
 -unmap_io:
 -      pci_iounmap(res->pdev, dpit->dbr_bar_reg_iomem);
 -      dpit->dbr_bar_reg_iomem = NULL;
 -      return -ENOMEM;
  }
  
  /* Stats */
@@@ -897,7 -874,7 +889,7 @@@ int bnxt_qplib_alloc_res(struct bnxt_qp
        if (rc)
                goto fail;
  
 -      rc = bnxt_qplib_alloc_dpi_tbl(res, &res->dpi_tbl, dev_attr->l2_db_size);
 +      rc = bnxt_qplib_alloc_dpi_tbl(res, dev_attr);
        if (rc)
                goto fail;
  
@@@ -907,46 -884,6 +899,46 @@@ fail
        return rc;
  }
  
 +void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res)
 +{
 +      struct bnxt_qplib_reg_desc *reg;
 +
 +      reg = &res->dpi_tbl.ucreg;
 +      if (reg->bar_reg)
 +              pci_iounmap(res->pdev, reg->bar_reg);
 +      reg->bar_reg = NULL;
 +      reg->bar_base = 0;
 +      reg->len = 0;
 +      reg->bar_id = 0;
 +}
 +
 +int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
 +{
 +      struct bnxt_qplib_reg_desc *ucreg;
 +      struct bnxt_qplib_reg_desc *wcreg;
 +
 +      wcreg = &res->dpi_tbl.wcreg;
 +      wcreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
 +      wcreg->bar_base = pci_resource_start(res->pdev, wcreg->bar_id);
 +
 +      ucreg = &res->dpi_tbl.ucreg;
 +      ucreg->bar_id = RCFW_DBR_PCI_BAR_REGION;
 +      ucreg->bar_base = pci_resource_start(res->pdev, ucreg->bar_id);
 +      ucreg->len = ucreg->offset + PAGE_SIZE;
 +      if (!ucreg->len || ((ucreg->len & (PAGE_SIZE - 1)) != 0)) {
 +              dev_err(&res->pdev->dev, "QPLIB: invalid dbr length %d",
 +                      (int)ucreg->len);
 +              return -EINVAL;
 +      }
 +      ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
 +      if (!ucreg->bar_reg) {
 +              dev_err(&res->pdev->dev, "privileged dpi map failed!");
 +              return -ENOMEM;
 +      }
 +
 +      return 0;
 +}
 +
  int bnxt_qplib_determine_atomics(struct pci_dev *dev)
  {
        int comp;
index d5ad0861c537fe2a947cd173e7d15f52cee87fc5,b967a17a44bebcc282547488eb9209781dda3450..ab45f9d4bb02fecfb448136875a6addf0286a12d
@@@ -170,9 -170,6 +170,9 @@@ int bnxt_qplib_get_dev_attr(struct bnxt
                attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc);
        }
  
 +      if (rcfw->res->cctx->hwrm_intf_ver >= HWRM_VERSION_DEV_ATTR_MAX_DPI)
 +              attr->max_dpi = le32_to_cpu(sb->max_dpi);
 +
        attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw);
  bail:
        bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
@@@ -236,6 -233,10 +236,6 @@@ int bnxt_qplib_del_sgid(struct bnxt_qpl
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        int index;
  
 -      if (!sgid_tbl) {
 -              dev_err(&res->pdev->dev, "SGID table not allocated\n");
 -              return -EINVAL;
 -      }
        /* Do we need a sgid_lock here? */
        if (!sgid_tbl->active) {
                dev_err(&res->pdev->dev, "SGID table has no active entries\n");
@@@ -296,6 -297,10 +296,6 @@@ int bnxt_qplib_add_sgid(struct bnxt_qpl
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        int i, free_idx;
  
 -      if (!sgid_tbl) {
 -              dev_err(&res->pdev->dev, "SGID table not allocated\n");
 -              return -EINVAL;
 -      }
        /* Do we need a sgid_lock here? */
        if (sgid_tbl->active == sgid_tbl->max) {
                dev_err(&res->pdev->dev, "SGID table is full\n");
@@@ -463,14 -468,13 +463,14 @@@ int bnxt_qplib_create_ah(struct bnxt_qp
        return 0;
  }
  
 -void bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
 -                         bool block)
 +int bnxt_qplib_destroy_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah,
 +                        bool block)
  {
        struct bnxt_qplib_rcfw *rcfw = res->rcfw;
        struct creq_destroy_ah_resp resp = {};
        struct bnxt_qplib_cmdqmsg msg = {};
        struct cmdq_destroy_ah req = {};
 +      int rc;
  
        /* Clean up the AH table in the device */
        bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req,
  
        bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req),
                                sizeof(resp), block);
 -      bnxt_qplib_rcfw_send_message(rcfw, &msg);
 +      rc = bnxt_qplib_rcfw_send_message(rcfw, &msg);
 +      return rc;
  }
  
  /* MRW */
@@@ -614,16 -617,15 +614,15 @@@ int bnxt_qplib_reg_mr(struct bnxt_qplib
                /* Free the hwq if it already exist, must be a rereg */
                if (mr->hwq.max_elements)
                        bnxt_qplib_free_hwq(res, &mr->hwq);
-               /* Use system PAGE_SIZE */
                hwq_attr.res = res;
                hwq_attr.depth = pages;
-               hwq_attr.stride = buf_pg_size;
+               hwq_attr.stride = sizeof(dma_addr_t);
                hwq_attr.type = HWQ_TYPE_MR;
                hwq_attr.sginfo = &sginfo;
                hwq_attr.sginfo->umem = umem;
                hwq_attr.sginfo->npages = pages;
-               hwq_attr.sginfo->pgsize = PAGE_SIZE;
-               hwq_attr.sginfo->pgshft = PAGE_SHIFT;
+               hwq_attr.sginfo->pgsize = buf_pg_size;
+               hwq_attr.sginfo->pgshft = ilog2(buf_pg_size);
                rc = bnxt_qplib_alloc_init_hwq(&mr->hwq, &hwq_attr);
                if (rc) {
                        dev_err(&res->pdev->dev,
index eba5fdc107036238c821a1996704e9948ac4a9a1,d4c6b9bc0a4ea222e7d4bff78ab3010d452a049c..8f7eb11066b43e8a12f9db59cbb5dafe6a6de7ca
@@@ -373,10 -373,17 +373,10 @@@ static int check_send_valid(struct hns_
                            struct hns_roce_qp *hr_qp)
  {
        struct ib_device *ibdev = &hr_dev->ib_dev;
 -      struct ib_qp *ibqp = &hr_qp->ibqp;
 -
 -      if (unlikely(ibqp->qp_type != IB_QPT_RC &&
 -                   ibqp->qp_type != IB_QPT_GSI &&
 -                   ibqp->qp_type != IB_QPT_UD)) {
 -              ibdev_err(ibdev, "not supported QP(0x%x)type!\n",
 -                        ibqp->qp_type);
 -              return -EOPNOTSUPP;
 -      } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
 -                 hr_qp->state == IB_QPS_INIT ||
 -                 hr_qp->state == IB_QPS_RTR)) {
 +
 +      if (unlikely(hr_qp->state == IB_QPS_RESET ||
 +                   hr_qp->state == IB_QPS_INIT ||
 +                   hr_qp->state == IB_QPS_RTR)) {
                ibdev_err(ibdev, "failed to post WQE, QP state %u!\n",
                          hr_qp->state);
                return -EINVAL;
@@@ -764,6 -771,17 +764,6 @@@ out
  static int check_recv_valid(struct hns_roce_dev *hr_dev,
                            struct hns_roce_qp *hr_qp)
  {
 -      struct ib_device *ibdev = &hr_dev->ib_dev;
 -      struct ib_qp *ibqp = &hr_qp->ibqp;
 -
 -      if (unlikely(ibqp->qp_type != IB_QPT_RC &&
 -                   ibqp->qp_type != IB_QPT_GSI &&
 -                   ibqp->qp_type != IB_QPT_UD)) {
 -              ibdev_err(ibdev, "unsupported qp type, qp_type = %d.\n",
 -                        ibqp->qp_type);
 -              return -EOPNOTSUPP;
 -      }
 -
        if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN))
                return -EIO;
  
@@@ -4565,11 -4583,9 +4565,9 @@@ static int modify_qp_init_to_rtr(struc
        mtu = ib_mtu_enum_to_int(ib_mtu);
        if (WARN_ON(mtu <= 0))
                return -EINVAL;
- #define MAX_LP_MSG_LEN 16384
-       /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
-       lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
-       if (WARN_ON(lp_pktn_ini >= 0xF))
-               return -EINVAL;
+ #define MIN_LP_MSG_LEN 1024
+       /* mtu * (2 ^ lp_pktn_ini) should be in the range of 1024 to mtu */
+       lp_pktn_ini = ilog2(max(mtu, MIN_LP_MSG_LEN) / mtu);
  
        if (attr_mask & IB_QP_PATH_MTU) {
                hr_reg_write(context, QPC_MTU, ib_mtu);
@@@ -4994,7 -5010,6 +4992,6 @@@ static int hns_roce_v2_set_abs_fields(s
  static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout)
  {
  #define QP_ACK_TIMEOUT_MAX_HIP08 20
- #define QP_ACK_TIMEOUT_OFFSET 10
  #define QP_ACK_TIMEOUT_MAX 31
  
        if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) {
                                   "local ACK timeout shall be 0 to 20.\n");
                        return false;
                }
-               *timeout += QP_ACK_TIMEOUT_OFFSET;
+               *timeout += HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
        } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) {
                if (*timeout > QP_ACK_TIMEOUT_MAX) {
                        ibdev_warn(&hr_dev->ib_dev,
        return ret;
  }
  
+ static u8 get_qp_timeout_attr(struct hns_roce_dev *hr_dev,
+                             struct hns_roce_v2_qp_context *context)
+ {
+       u8 timeout;
+       timeout = (u8)hr_reg_read(context, QPC_AT);
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08)
+               timeout -= HNS_ROCE_V2_QP_ACK_TIMEOUT_OFS_HIP08;
+       return timeout;
+ }
  static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
                                int qp_attr_mask,
                                struct ib_qp_init_attr *qp_init_attr)
        qp_attr->max_dest_rd_atomic = 1 << hr_reg_read(&context, QPC_RR_MAX);
  
        qp_attr->min_rnr_timer = (u8)hr_reg_read(&context, QPC_MIN_RNR_TIME);
-       qp_attr->timeout = (u8)hr_reg_read(&context, QPC_AT);
+       qp_attr->timeout = get_qp_timeout_attr(hr_dev, &context);
        qp_attr->retry_cnt = hr_reg_read(&context, QPC_RETRY_NUM_INIT);
        qp_attr->rnr_retry = hr_reg_read(&context, QPC_RNR_NUM_INIT);
  
index 6242ab6af77fa9aaadec27418d49ee552a46bb98,eaa12c124598285beaa891761d8a2ec6aaa17ada..9c4fe4fa90018142f67fd0bd7bd56a9c57b8cdce
@@@ -522,11 -522,6 +522,6 @@@ static int irdma_destroy_qp(struct ib_q
        if (!iwqp->user_mode)
                cancel_delayed_work_sync(&iwqp->dwork_flush);
  
-       irdma_qp_rem_ref(&iwqp->ibqp);
-       wait_for_completion(&iwqp->free_qp);
-       irdma_free_lsmm_rsrc(iwqp);
-       irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
        if (!iwqp->user_mode) {
                if (iwqp->iwscq) {
                        irdma_clean_cqes(iwqp, iwqp->iwscq);
                                irdma_clean_cqes(iwqp, iwqp->iwrcq);
                }
        }
+       irdma_qp_rem_ref(&iwqp->ibqp);
+       wait_for_completion(&iwqp->free_qp);
+       irdma_free_lsmm_rsrc(iwqp);
+       irdma_cqp_qp_destroy_cmd(&iwdev->rf->sc_dev, &iwqp->sc_qp);
        irdma_remove_push_mmap_entries(iwqp);
        irdma_free_qp_rsrc(iwqp);
  
@@@ -3291,6 -3292,7 +3292,7 @@@ static int irdma_post_send(struct ib_q
                        break;
                case IB_WR_LOCAL_INV:
                        info.op_type = IRDMA_OP_TYPE_INV_STAG;
+                       info.local_fence = info.read_fence;
                        info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey;
                        err = irdma_uk_stag_local_invalidate(ukqp, &info, true);
                        break;
@@@ -4450,16 -4452,8 +4452,16 @@@ static const struct ib_device_ops irdma
  };
  
  static const struct ib_device_ops irdma_iw_dev_ops = {
 -      .modify_qp = irdma_modify_qp,
        .get_port_immutable = irdma_iw_port_immutable,
 +      .iw_accept = irdma_accept,
 +      .iw_add_ref = irdma_qp_add_ref,
 +      .iw_connect = irdma_connect,
 +      .iw_create_listen = irdma_create_listen,
 +      .iw_destroy_listen = irdma_destroy_listen,
 +      .iw_get_qp = irdma_get_qp,
 +      .iw_reject = irdma_reject,
 +      .iw_rem_ref = irdma_qp_rem_ref,
 +      .modify_qp = irdma_modify_qp,
        .query_gid = irdma_query_gid,
  };
  
@@@ -4523,35 -4517,50 +4525,35 @@@ static void irdma_init_roce_device(stru
   * irdma_init_iw_device - initialization of iwarp rdma device
   * @iwdev: irdma device
   */
 -static int irdma_init_iw_device(struct irdma_device *iwdev)
 +static void irdma_init_iw_device(struct irdma_device *iwdev)
  {
        struct net_device *netdev = iwdev->netdev;
  
        iwdev->ibdev.node_type = RDMA_NODE_RNIC;
        addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid,
                            netdev->dev_addr);
 -      iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref;
 -      iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref;
 -      iwdev->ibdev.ops.iw_get_qp = irdma_get_qp;
 -      iwdev->ibdev.ops.iw_connect = irdma_connect;
 -      iwdev->ibdev.ops.iw_accept = irdma_accept;
 -      iwdev->ibdev.ops.iw_reject = irdma_reject;
 -      iwdev->ibdev.ops.iw_create_listen = irdma_create_listen;
 -      iwdev->ibdev.ops.iw_destroy_listen = irdma_destroy_listen;
        memcpy(iwdev->ibdev.iw_ifname, netdev->name,
               sizeof(iwdev->ibdev.iw_ifname));
        ib_set_device_ops(&iwdev->ibdev, &irdma_iw_dev_ops);
 -
 -      return 0;
  }
  
  /**
   * irdma_init_rdma_device - initialization of rdma device
   * @iwdev: irdma device
   */
 -static int irdma_init_rdma_device(struct irdma_device *iwdev)
 +static void irdma_init_rdma_device(struct irdma_device *iwdev)
  {
        struct pci_dev *pcidev = iwdev->rf->pcidev;
 -      int ret;
  
 -      if (iwdev->roce_mode) {
 +      if (iwdev->roce_mode)
                irdma_init_roce_device(iwdev);
 -      } else {
 -              ret = irdma_init_iw_device(iwdev);
 -              if (ret)
 -                      return ret;
 -      }
 +      else
 +              irdma_init_iw_device(iwdev);
 +
        iwdev->ibdev.phys_port_cnt = 1;
        iwdev->ibdev.num_comp_vectors = iwdev->rf->ceqs_count;
        iwdev->ibdev.dev.parent = &pcidev->dev;
        ib_set_device_ops(&iwdev->ibdev, &irdma_dev_ops);
 -
 -      return 0;
  }
  
  /**
@@@ -4589,7 -4598,9 +4591,7 @@@ int irdma_ib_register_device(struct ird
  {
        int ret;
  
 -      ret = irdma_init_rdma_device(iwdev);
 -      if (ret)
 -              return ret;
 +      irdma_init_rdma_device(iwdev);
  
        ret = ib_device_set_netdev(&iwdev->ibdev, iwdev->netdev, 1);
        if (ret)
index 4bceef878c437c96e8169ba61c4d3b85288cce37,2dfa6f49a6f4816c7c0bad61d5928a6e38187e5f..9c33d960af3c5cd3887be7546ad496f2666c1394
@@@ -25,7 -25,6 +25,7 @@@
  #include <rdma/mlx5_user_ioctl_verbs.h>
  
  #include "srq.h"
 +#include "qp.h"
  
  #define mlx5_ib_dbg(_dev, format, arg...)                                      \
        dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__,      \
@@@ -238,8 -237,19 +238,19 @@@ enum 
  #define MLX5_IB_NUM_SNIFFER_FTS               2
  #define MLX5_IB_NUM_EGRESS_FTS                1
  #define MLX5_IB_NUM_FDB_FTS           MLX5_BY_PASS_NUM_REGULAR_PRIOS
+ struct mlx5_ib_anchor {
+       struct mlx5_flow_table *ft;
+       struct mlx5_flow_group *fg_goto_table;
+       struct mlx5_flow_group *fg_drop;
+       struct mlx5_flow_handle *rule_goto_table;
+       struct mlx5_flow_handle *rule_drop;
+       unsigned int rule_goto_table_ref;
+ };
  struct mlx5_ib_flow_prio {
        struct mlx5_flow_table          *flow_table;
+       struct mlx5_ib_anchor           anchor;
        unsigned int                    refcount;
  };
  
@@@ -1588,6 -1598,9 +1599,9 @@@ static inline bool mlx5_ib_lag_should_a
            MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
                return 0;
  
+       if (mlx5_lag_is_lacp_owner(dev->mdev) && !dev->lag_active)
+               return 0;
        return dev->lag_active ||
                (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
                 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
index 0c0ae214c3a9b7ca30d346e1b29ab6d1ceb1ab5d,f46c5a5fd0aea4d4c0530d054949a8867013053a..5111735aafaed8286a72a4972750a11e2c4cedbc
@@@ -115,15 -115,16 +115,16 @@@ static enum ib_wc_opcode wr_to_wc_opcod
  void retransmit_timer(struct timer_list *t)
  {
        struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
+       unsigned long flags;
  
        rxe_dbg_qp(qp, "retransmit timer fired\n");
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->valid) {
                qp->comp.timeout = 1;
                rxe_sched_task(&qp->comp.task);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  }
  
  void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
@@@ -481,11 -482,13 +482,13 @@@ static void do_complete(struct rxe_qp *
  
  static void comp_check_sq_drain_done(struct rxe_qp *qp)
  {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(qp_state(qp) == IB_QPS_SQD)) {
                if (qp->attr.sq_draining && qp->comp.psn == qp->req.psn) {
                        qp->attr.sq_draining = 0;
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
  
                        if (qp->ibqp.event_handler) {
                                struct ib_event ev;
                        return;
                }
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  }
  
  static inline enum comp_state complete_ack(struct rxe_qp *qp,
@@@ -625,13 -628,15 +628,15 @@@ static void free_pkt(struct rxe_pkt_inf
   */
  static void reset_retry_timer(struct rxe_qp *qp)
  {
+       unsigned long flags;
        if (qp_type(qp) == IB_QPT_RC && qp->qp_timeout_jiffies) {
-               spin_lock_bh(&qp->state_lock);
+               spin_lock_irqsave(&qp->state_lock, flags);
                if (qp_state(qp) >= IB_QPS_RTS &&
                    psn_compare(qp->req.psn, qp->comp.psn) > 0)
                        mod_timer(&qp->retrans_timer,
                                  jiffies + qp->qp_timeout_jiffies);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
        }
  }
  
@@@ -643,18 -648,19 +648,19 @@@ int rxe_completer(struct rxe_qp *qp
        struct rxe_pkt_info *pkt = NULL;
        enum comp_state state;
        int ret;
+       unsigned long flags;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
                          qp_state(qp) == IB_QPS_RESET) {
                bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
  
                drain_resp_pkts(qp);
                flush_send_queue(qp, notify);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        if (qp->comp.timeout) {
                qp->comp.timeout_retry = 1;
        }
  
        /* A non-zero return value will cause rxe_do_task to
 -       * exit its loop and end the tasklet. A zero return
 +       * exit its loop and end the work item. A zero return
         * will continue looping and return to rxe_completer
         */
  done:
index 31a25aaa44a07806f8464e9de8d502f2762c6543,6ca2a05b6a2ab262594b7a3fbbc799f86fba4eff..d5486cbb3f100404808e1e1df5b68452239292cc
@@@ -113,14 -113,15 +113,14 @@@ int rxe_cq_post(struct rxe_cq *cq, stru
  
        queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
  
-       spin_unlock_irqrestore(&cq->cq_lock, flags);
 -      if ((cq->notify == IB_CQ_NEXT_COMP) ||
 -          (cq->notify == IB_CQ_SOLICITED && solicited)) {
 +      if ((cq->notify & IB_CQ_NEXT_COMP) ||
 +          (cq->notify & IB_CQ_SOLICITED && solicited)) {
                cq->notify = 0;
 -
                cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
        }
  
+       spin_unlock_irqrestore(&cq->cq_lock, flags);
        return 0;
  }
  
index 95d4a6760c3354f231417ce91d443dcde4e552cc,a0f206431cf8eb82d21573888290f2826367b803..a569b111a9d2a88c79b5a99350e5d261942b45a5
@@@ -176,6 -176,9 +176,9 @@@ static void rxe_qp_init_misc(struct rxe
        spin_lock_init(&qp->rq.producer_lock);
        spin_lock_init(&qp->rq.consumer_lock);
  
+       skb_queue_head_init(&qp->req_pkts);
+       skb_queue_head_init(&qp->resp_pkts);
        atomic_set(&qp->ssn, 0);
        atomic_set(&qp->skb_out, 0);
  }
@@@ -234,8 -237,6 +237,6 @@@ static int rxe_qp_init_req(struct rxe_d
        qp->req.opcode          = -1;
        qp->comp.opcode         = -1;
  
-       skb_queue_head_init(&qp->req_pkts);
        rxe_init_task(&qp->req.task, qp, rxe_requester);
        rxe_init_task(&qp->comp.task, qp, rxe_completer);
  
@@@ -279,8 -280,6 +280,6 @@@ static int rxe_qp_init_resp(struct rxe_
                }
        }
  
-       skb_queue_head_init(&qp->resp_pkts);
        rxe_init_task(&qp->resp.task, qp, rxe_responder);
  
        qp->resp.opcode         = OPCODE_NONE;
@@@ -300,6 -299,7 +299,7 @@@ int rxe_qp_from_init(struct rxe_dev *rx
        struct rxe_cq *rcq = to_rcq(init->recv_cq);
        struct rxe_cq *scq = to_rcq(init->send_cq);
        struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
+       unsigned long flags;
  
        rxe_get(pd);
        rxe_get(rcq);
        if (err)
                goto err2;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.qp_state = IB_QPS_RESET;
        qp->valid = 1;
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        return 0;
  
@@@ -392,13 -392,6 +392,13 @@@ int rxe_qp_chk_attr(struct rxe_dev *rxe
        if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
                goto err1;
  
 +      if (mask & IB_QP_ACCESS_FLAGS) {
 +              if (!(qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_UC))
 +                      goto err1;
 +              if (attr->qp_access_flags & ~RXE_ACCESS_SUPPORTED_QP)
 +                      goto err1;
 +      }
 +
        if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr))
                goto err1;
  
@@@ -499,24 -492,28 +499,28 @@@ static void rxe_qp_reset(struct rxe_qp 
  /* move the qp to the error state */
  void rxe_qp_error(struct rxe_qp *qp)
  {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.qp_state = IB_QPS_ERR;
  
        /* drain work and packet queues */
        rxe_sched_task(&qp->resp.task);
        rxe_sched_task(&qp->comp.task);
        rxe_sched_task(&qp->req.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  }
  
  static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
                       int mask)
  {
-       spin_lock_bh(&qp->state_lock);
+       unsigned long flags;
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->attr.sq_draining = 1;
        rxe_sched_task(&qp->comp.task);
        rxe_sched_task(&qp->req.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  }
  
  /* caller should hold qp->state_lock */
@@@ -562,14 -559,16 +566,16 @@@ int rxe_qp_from_attr(struct rxe_qp *qp
                qp->attr.cur_qp_state = attr->qp_state;
  
        if (mask & IB_QP_STATE) {
-               spin_lock_bh(&qp->state_lock);
+               unsigned long flags;
+               spin_lock_irqsave(&qp->state_lock, flags);
                err = __qp_chk_state(qp, attr, mask);
                if (!err) {
                        qp->attr.qp_state = attr->qp_state;
                        rxe_dbg_qp(qp, "state -> %s\n",
                                        qps2str[attr->qp_state]);
                }
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
  
                if (err)
                        return err;
  /* called by the query qp verb */
  int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
  {
+       unsigned long flags;
        *attr = qp->attr;
  
        attr->rq_psn                            = qp->resp.psn;
        /* Applications that get this state typically spin on it.
         * Yield the processor
         */
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->attr.sq_draining) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                cond_resched();
+       } else {
+               spin_unlock_irqrestore(&qp->state_lock, flags);
        }
-       spin_unlock_bh(&qp->state_lock);
  
        return 0;
  }
@@@ -743,10 -745,11 +752,11 @@@ int rxe_qp_chk_destroy(struct rxe_qp *q
  static void rxe_qp_do_cleanup(struct work_struct *work)
  {
        struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
+       unsigned long flags;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        qp->valid = 0;
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
        qp->qp_timeout_jiffies = 0;
  
        if (qp_type(qp) == IB_QPT_RC) {
index 400840c913a9520a8da6bce4a8c82257ff26a528,5fe7cbae30313c0ce5f2ab61e00e33e6778b19ad..2171f19494bca131971d3cfaee6dba78281d1276
@@@ -99,17 -99,18 +99,18 @@@ static void req_retry(struct rxe_qp *qp
  void rnr_nak_timer(struct timer_list *t)
  {
        struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
+       unsigned long flags;
  
        rxe_dbg_qp(qp, "nak timer fired\n");
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp->valid) {
                /* request a send queue retry */
                qp->req.need_retry = 1;
                qp->req.wait_for_rnr_timer = 0;
                rxe_sched_task(&qp->req.task);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  }
  
  static void req_check_sq_drain_done(struct rxe_qp *qp)
        unsigned int index;
        unsigned int cons;
        struct rxe_send_wqe *wqe;
+       unsigned long flags;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_SQD) {
                q = qp->sq.queue;
                index = qp->req.wqe_index;
                                break;
  
                        qp->attr.sq_draining = 0;
-                       spin_unlock_bh(&qp->state_lock);
+                       spin_unlock_irqrestore(&qp->state_lock, flags);
  
                        if (qp->ibqp.event_handler) {
                                struct ib_event ev;
                        return;
                } while (0);
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  }
  
  static struct rxe_send_wqe *__req_next_wqe(struct rxe_qp *qp)
  static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
  {
        struct rxe_send_wqe *wqe;
+       unsigned long flags;
  
        req_check_sq_drain_done(qp);
  
        if (wqe == NULL)
                return NULL;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely((qp_state(qp) == IB_QPS_SQD) &&
                     (wqe->state != wqe_state_processing))) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                return NULL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
        return wqe;
@@@ -676,16 -679,17 +679,17 @@@ int rxe_requester(struct rxe_qp *qp
        struct rxe_queue *q = qp->sq.queue;
        struct rxe_ah *ah;
        struct rxe_av *av;
+       unsigned long flags;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
  
        if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
                wqe = __req_next_wqe(qp);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                if (wqe)
                        goto err;
                else
                qp->req.wait_psn = 0;
                qp->req.need_retry = 0;
                qp->req.wait_for_rnr_timer = 0;
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        /* we come here if the retransmit timer has fired
         * or if the rnr timer has fired. If the retransmit
        update_state(qp, &pkt);
  
        /* A non-zero return value will cause rxe_do_task to
 -       * exit its loop and end the tasklet. A zero return
 +       * exit its loop and end the work item. A zero return
         * will continue looping and return to rxe_requester
         */
  done:
index 8a3c9c2c5a2d48d554d5a4360343158c86b18af5,ee68306555b990ca169175bae2c31003737d62aa..64c64f5f36a810addfa84284268fb8699a6be772
@@@ -387,10 -387,7 +387,10 @@@ static enum resp_states rxe_resp_check_
                }
        }
  
 -      return RESPST_CHK_RKEY;
 +      if (pkt->mask & RXE_RDMA_OP_MASK)
 +              return RESPST_CHK_RKEY;
 +      else
 +              return RESPST_EXECUTE;
  }
  
  /* if the reth length field is zero we can assume nothing
@@@ -437,10 -434,6 +437,10 @@@ static enum resp_states check_rkey(stru
        enum resp_states state;
        int access = 0;
  
 +      /* parse RETH or ATMETH header for first/only packets
 +       * for va, length, rkey, etc. or use current value for
 +       * middle/last packets.
 +       */
        if (pkt->mask & (RXE_READ_OR_WRITE_MASK | RXE_ATOMIC_WRITE_MASK)) {
                if (pkt->mask & RXE_RETH_MASK)
                        qp_resp_from_reth(qp, pkt);
                qp_resp_from_atmeth(qp, pkt);
                access = IB_ACCESS_REMOTE_ATOMIC;
        } else {
 -              return RESPST_EXECUTE;
 +              /* shouldn't happen */
 +              WARN_ON(1);
        }
  
        /* A zero-byte read or write op is not required to
                if (mw->access & IB_ZERO_BASED)
                        qp->resp.offset = mw->addr;
  
-               rxe_put(mw);
                rxe_get(mr);
+               rxe_put(mw);
+               mw = NULL;
        } else {
                mr = lookup_mr(qp->pd, access, rkey, RXE_LOOKUP_REMOTE);
                if (!mr) {
@@@ -1055,6 -1048,7 +1056,7 @@@ static enum resp_states do_complete(str
        struct ib_uverbs_wc *uwc = &cqe.uibwc;
        struct rxe_recv_wqe *wqe = qp->resp.wqe;
        struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+       unsigned long flags;
  
        if (!wqe)
                goto finish;
                return RESPST_ERR_CQ_OVERFLOW;
  
  finish:
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (unlikely(qp_state(qp) == IB_QPS_ERR)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                return RESPST_CHK_RESOURCE;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        if (unlikely(!pkt))
                return RESPST_DONE;
@@@ -1455,17 -1449,8 +1457,17 @@@ static void flush_recv_queue(struct rxe
        struct rxe_recv_wqe *wqe;
        int err;
  
 -      if (qp->srq)
 +      if (qp->srq) {
 +              if (notify && qp->ibqp.event_handler) {
 +                      struct ib_event ev;
 +
 +                      ev.device = qp->ibqp.device;
 +                      ev.element.qp = &qp->ibqp;
 +                      ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
 +                      qp->ibqp.event_handler(&ev, qp->ibqp.qp_context);
 +              }
                return;
 +      }
  
        while ((wqe = queue_head(q, q->type))) {
                if (notify) {
@@@ -1485,18 -1470,19 +1487,19 @@@ int rxe_responder(struct rxe_qp *qp
        enum resp_states state;
        struct rxe_pkt_info *pkt = NULL;
        int ret;
+       unsigned long flags;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (!qp->valid || qp_state(qp) == IB_QPS_ERR ||
                          qp_state(qp) == IB_QPS_RESET) {
                bool notify = qp->valid && (qp_state(qp) == IB_QPS_ERR);
  
                drain_req_pkts(qp);
                flush_recv_queue(qp, notify);
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                goto exit;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
  
        }
  
        /* A non-zero return value will cause rxe_do_task to
 -       * exit its loop and end the tasklet. A zero return
 +       * exit its loop and end the work item. A zero return
         * will continue looping and return to rxe_responder
         */
  done:
index 515f9ff72d18d636a4159109dcb91acd953cb751,4d8f6b8051ff7db5509b2e9961af54c1479373cb..f4321a1720000039fa88ded10d76b99d28d8a1f4
@@@ -904,10 -904,10 +904,10 @@@ static int rxe_post_send_kernel(struct 
        if (!err)
                rxe_sched_task(&qp->req.task);
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_ERR)
                rxe_sched_task(&qp->comp.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        return err;
  }
@@@ -917,22 -917,23 +917,23 @@@ static int rxe_post_send(struct ib_qp *
  {
        struct rxe_qp *qp = to_rqp(ibqp);
        int err;
+       unsigned long flags;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        /* caller has already called destroy_qp */
        if (WARN_ON_ONCE(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                rxe_err_qp(qp, "qp has been destroyed");
                return -EINVAL;
        }
  
        if (unlikely(qp_state(qp) < IB_QPS_RTS)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                *bad_wr = wr;
                rxe_err_qp(qp, "qp not ready to send");
                return -EINVAL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        if (qp->is_user) {
                /* Utilize process context to do protocol processing */
@@@ -1008,22 -1009,22 +1009,22 @@@ static int rxe_post_recv(struct ib_qp *
        struct rxe_rq *rq = &qp->rq;
        unsigned long flags;
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        /* caller has already called destroy_qp */
        if (WARN_ON_ONCE(!qp->valid)) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                rxe_err_qp(qp, "qp has been destroyed");
                return -EINVAL;
        }
  
        /* see C10-97.2.1 */
        if (unlikely((qp_state(qp) < IB_QPS_INIT))) {
-               spin_unlock_bh(&qp->state_lock);
+               spin_unlock_irqrestore(&qp->state_lock, flags);
                *bad_wr = wr;
                rxe_dbg_qp(qp, "qp not ready to post recv");
                return -EINVAL;
        }
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        if (unlikely(qp->srq)) {
                *bad_wr = wr;
  
        spin_unlock_irqrestore(&rq->producer_lock, flags);
  
-       spin_lock_bh(&qp->state_lock);
+       spin_lock_irqsave(&qp->state_lock, flags);
        if (qp_state(qp) == IB_QPS_ERR)
                rxe_sched_task(&qp->resp.task);
-       spin_unlock_bh(&qp->state_lock);
+       spin_unlock_irqrestore(&qp->state_lock, flags);
  
        return err;
  }
@@@ -1181,7 -1182,9 +1182,7 @@@ static int rxe_req_notify_cq(struct ib_
        unsigned long irq_flags;
  
        spin_lock_irqsave(&cq->cq_lock, irq_flags);
 -      if (cq->notify != IB_CQ_NEXT_COMP)
 -              cq->notify = flags & IB_CQ_SOLICITED_MASK;
 -
 +      cq->notify |= flags & IB_CQ_SOLICITED_MASK;
        empty = queue_empty(cq->queue, QUEUE_TYPE_TO_ULP);
  
        if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
@@@ -1258,12 -1261,6 +1259,12 @@@ static struct ib_mr *rxe_reg_user_mr(st
        struct rxe_mr *mr;
        int err, cleanup_err;
  
 +      if (access & ~RXE_ACCESS_SUPPORTED_MR) {
 +              rxe_err_pd(pd, "access = %#x not supported (%#x)", access,
 +                              RXE_ACCESS_SUPPORTED_MR);
 +              return ERR_PTR(-EOPNOTSUPP);
 +      }
 +
        mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr)
                return ERR_PTR(-ENOMEM);
@@@ -1297,40 -1294,6 +1298,40 @@@ err_free
        return ERR_PTR(err);
  }
  
 +static struct ib_mr *rxe_rereg_user_mr(struct ib_mr *ibmr, int flags,
 +                                     u64 start, u64 length, u64 iova,
 +                                     int access, struct ib_pd *ibpd,
 +                                     struct ib_udata *udata)
 +{
 +      struct rxe_mr *mr = to_rmr(ibmr);
 +      struct rxe_pd *old_pd = to_rpd(ibmr->pd);
 +      struct rxe_pd *pd = to_rpd(ibpd);
 +
 +      /* for now only support the two easy cases:
 +       * rereg_pd and rereg_access
 +       */
 +      if (flags & ~RXE_MR_REREG_SUPPORTED) {
 +              rxe_err_mr(mr, "flags = %#x not supported", flags);
 +              return ERR_PTR(-EOPNOTSUPP);
 +      }
 +
 +      if (flags & IB_MR_REREG_PD) {
 +              rxe_put(old_pd);
 +              rxe_get(pd);
 +              mr->ibmr.pd = ibpd;
 +      }
 +
 +      if (flags & IB_MR_REREG_ACCESS) {
 +              if (access & ~RXE_ACCESS_SUPPORTED_MR) {
 +                      rxe_err_mr(mr, "access = %#x not supported", access);
 +                      return ERR_PTR(-EOPNOTSUPP);
 +              }
 +              mr->access = access;
 +      }
 +
 +      return NULL;
 +}
 +
  static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
                                  u32 max_num_sg)
  {
@@@ -1483,7 -1446,6 +1484,7 @@@ static const struct ib_device_ops rxe_d
        .query_srq = rxe_query_srq,
        .reg_user_mr = rxe_reg_user_mr,
        .req_notify_cq = rxe_req_notify_cq,
 +      .rereg_user_mr = rxe_rereg_user_mr,
        .resize_cq = rxe_resize_cq,
  
        INIT_RDMA_OBJ_SIZE(ib_ah, rxe_ah, ibah),
index 3738aaefbe1316ef7e11af1ea41813fbc691cc2d,cfb50bfe53c3c9a1eeecd5398d2883ecdeb5a2cf..b32941dd67cb90d5b1260369d227316b418a1ec3
@@@ -1710,6 -1710,7 +1710,6 @@@ static int create_con_cq_qp(struct rtrs
                        return -ENOMEM;
                con->queue_num = cq_num;
        }
 -      cq_num = max_send_wr + max_recv_wr;
        cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors;
        if (con->c.cid >= clt_path->s.irq_con_num)
                err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge,
@@@ -2039,6 -2040,7 +2039,7 @@@ static int rtrs_clt_rdma_cm_handler(str
        return 0;
  }
  
+ /* The caller should do the cleanup in case of error */
  static int create_cm(struct rtrs_clt_con *con)
  {
        struct rtrs_path *s = con->c.path;
        err = rdma_set_reuseaddr(cm_id, 1);
        if (err != 0) {
                rtrs_err(s, "Set address reuse failed, err: %d\n", err);
-               goto destroy_cm;
+               return err;
        }
        err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr,
                                (struct sockaddr *)&clt_path->s.dst_addr,
                                RTRS_CONNECT_TIMEOUT_MS);
        if (err) {
                rtrs_err(s, "Failed to resolve address, err: %d\n", err);
-               goto destroy_cm;
+               return err;
        }
        /*
         * Combine connection status and session events. This is needed
                if (err == 0)
                        err = -ETIMEDOUT;
                /* Timedout or interrupted */
-               goto errr;
-       }
-       if (con->cm_err < 0) {
-               err = con->cm_err;
-               goto errr;
+               return err;
        }
-       if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) {
+       if (con->cm_err < 0)
+               return con->cm_err;
+       if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING)
                /* Device removal */
-               err = -ECONNABORTED;
-               goto errr;
-       }
+               return -ECONNABORTED;
  
        return 0;
- errr:
-       stop_cm(con);
-       mutex_lock(&con->con_mutex);
-       destroy_con_cq_qp(con);
-       mutex_unlock(&con->con_mutex);
- destroy_cm:
-       destroy_cm(con);
-       return err;
  }
  
  static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path)
@@@ -2333,7 -2321,7 +2320,7 @@@ static void rtrs_clt_close_work(struct 
  static int init_conns(struct rtrs_clt_path *clt_path)
  {
        unsigned int cid;
-       int err;
+       int err, i;
  
        /*
         * On every new session connections increase reconnect counter
                        goto destroy;
  
                err = create_cm(to_clt_con(clt_path->s.con[cid]));
-               if (err) {
-                       destroy_con(to_clt_con(clt_path->s.con[cid]));
+               if (err)
                        goto destroy;
-               }
        }
        err = alloc_path_reqs(clt_path);
        if (err)
        return 0;
  
  destroy:
-       while (cid--) {
-               struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]);
+       /* Make sure we do the cleanup in the order they are created */
+       for (i = 0; i <= cid; i++) {
+               struct rtrs_clt_con *con;
  
-               stop_cm(con);
+               if (!clt_path->s.con[i])
+                       break;
  
-               mutex_lock(&con->con_mutex);
-               destroy_con_cq_qp(con);
-               mutex_unlock(&con->con_mutex);
-               destroy_cm(con);
+               con = to_clt_con(clt_path->s.con[i]);
+               if (con->c.cm_id) {
+                       stop_cm(con);
+                       mutex_lock(&con->con_mutex);
+                       destroy_con_cq_qp(con);
+                       mutex_unlock(&con->con_mutex);
+                       destroy_cm(con);
+               }
                destroy_con(con);
        }
        /*
index b3fcb767b9aba823c325e8b9132dd5a0d37ebfac,d907727c7b7a5324b66e319683d4d611638cd09c..7f4e861e398e66e2af09643680bb4a1e1aee99d6
@@@ -972,7 -972,7 +972,7 @@@ static int mana_cfg_vport_steering(stru
                                   bool update_tab)
  {
        u16 num_entries = MANA_INDIRECT_TABLE_SIZE;
 -      struct mana_cfg_rx_steer_req *req = NULL;
 +      struct mana_cfg_rx_steer_req_v2 *req;
        struct mana_cfg_rx_steer_resp resp = {};
        struct net_device *ndev = apc->ndev;
        mana_handle_t *req_indir_tab;
        mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
                             sizeof(resp));
  
 +      req->hdr.req.msg_version = GDMA_MESSAGE_V2;
 +
        req->vport = apc->port_handle;
        req->num_indir_entries = num_entries;
        req->indir_tab_offset = sizeof(*req);
        req->update_hashkey = update_key;
        req->update_indir_tab = update_tab;
        req->default_rxobj = apc->default_rxobj;
 +      req->cqe_coalescing_enable = 0;
  
        if (update_key)
                memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
@@@ -1282,8 -1279,6 +1282,6 @@@ static void mana_poll_tx_cq(struct mana
        if (comp_read < 1)
                return;
  
-       apc->eth_stats.tx_cqes = comp_read;
        for (i = 0; i < comp_read; i++) {
                struct mana_tx_comp_oob *cqe_oob;
  
                WARN_ON_ONCE(1);
  
        cq->work_done = pkt_transmitted;
-       apc->eth_stats.tx_cqes -= pkt_transmitted;
  }
  
  static void mana_post_pkt_rxq(struct mana_rxq *rxq)
@@@ -1629,15 -1622,11 +1625,11 @@@ static void mana_poll_rx_cq(struct mana
  {
        struct gdma_comp *comp = cq->gdma_comp_buf;
        struct mana_rxq *rxq = cq->rxq;
-       struct mana_port_context *apc;
        int comp_read, i;
  
-       apc = netdev_priv(rxq->ndev);
        comp_read = mana_gd_poll_cq(cq->gdma_cq, comp, CQE_POLLING_BUFFER);
        WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
  
-       apc->eth_stats.rx_cqes = comp_read;
        rxq->xdp_flush = false;
  
        for (i = 0; i < comp_read; i++) {
                        return;
  
                mana_process_rx_cqe(rxq, cq, &comp[i]);
-               apc->eth_stats.rx_cqes--;
        }
  
        if (rxq->xdp_flush)
index e67c603d507bb3e5f8947f62a737e76514663dee,4b9626cd83e456b01f539e5e3a5c40d3650b2374..e0156b54d0b3291dbf2bc0ac46a13e834c272099
@@@ -382,6 -382,7 +382,6 @@@ enum mlx5_res_type 
        MLX5_RES_SRQ    = 3,
        MLX5_RES_XSRQ   = 4,
        MLX5_RES_XRQ    = 5,
 -      MLX5_RES_DCT    = MLX5_EVENT_QUEUE_TYPE_DCT,
  };
  
  struct mlx5_core_rsc_common {
@@@ -442,6 -443,15 +442,6 @@@ struct mlx5_core_health 
        struct delayed_work             update_fw_log_ts_work;
  };
  
 -struct mlx5_qp_table {
 -      struct notifier_block   nb;
 -
 -      /* protect radix tree
 -       */
 -      spinlock_t              lock;
 -      struct radix_tree_root  tree;
 -};
 -
  enum {
        MLX5_PF_NOTIFY_DISABLE_VF,
        MLX5_PF_NOTIFY_ENABLE_VF,
@@@ -1083,6 -1093,7 +1083,7 @@@ void mlx5_cmdif_debugfs_cleanup(struct 
  int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
                         int npsvs, u32 *sig_index);
  int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
+ __be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
  void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
  int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
                        struct mlx5_odp_caps *odp_caps);
@@@ -1227,6 -1238,18 +1228,18 @@@ static inline u16 mlx5_core_max_vfs(con
        return dev->priv.sriov.max_vfs;
  }
  
+ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
+ {
+       /* LACP owner conditions:
+        * 1) Function is physical.
+        * 2) LAG is supported by FW.
+        * 3) LAG is managed by driver (currently the only option).
+        */
+       return  MLX5_CAP_GEN(dev, vport_group_manager) &&
+                  (MLX5_CAP_GEN(dev, num_lag_ports) > 1) &&
+                   MLX5_CAP_GEN(dev, lag_master);
+ }
  static inline int mlx5_get_gid_table_len(u16 param)
  {
        if (param > 4) {
diff --combined include/net/mana/mana.h
index 1512bd48df81febbf6f991be5bd1501228350ff6,9eef199728454ba03eba291f5e1e61f8399ef57e..024ad8ddb27e5fcd3b3a8fd44aace297cffedc20
@@@ -347,10 -347,8 +347,8 @@@ struct mana_tx_qp 
  struct mana_ethtool_stats {
        u64 stop_queue;
        u64 wake_queue;
-       u64 tx_cqes;
        u64 tx_cqe_err;
        u64 tx_cqe_unknown_type;
-       u64 rx_cqes;
        u64 rx_coalesced_err;
        u64 rx_cqe_unknown_type;
  };
@@@ -581,7 -579,7 +579,7 @@@ struct mana_fence_rq_resp 
  }; /* HW DATA */
  
  /* Configure vPort Rx Steering */
 -struct mana_cfg_rx_steer_req {
 +struct mana_cfg_rx_steer_req_v2 {
        struct gdma_req_hdr hdr;
        mana_handle_t vport;
        u16 num_indir_entries;
        u8 reserved;
        mana_handle_t default_rxobj;
        u8 hashkey[MANA_HASH_KEY_SIZE];
 +      u8 cqe_coalescing_enable;
 +      u8 reserved2[7];
  }; /* HW DATA */
  
  struct mana_cfg_rx_steer_resp {
This page took 0.240801 seconds and 4 git commands to generate.