]> Git Repo - linux.git/commitdiff
Merge branch 'mlx5-next' into rdma.git
authorJason Gunthorpe <[email protected]>
Thu, 20 Dec 2018 20:24:50 +0000 (13:24 -0700)
committerJason Gunthorpe <[email protected]>
Thu, 20 Dec 2018 20:24:50 +0000 (13:24 -0700)
From git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

mlx5 updates taken for dependencies on following patches.

* branche 'mlx5-next': (23 commits)
  IB/mlx5: Introduce uid as part of alloc/dealloc transport domain
  net/mlx5: Add shared Q counter bits
  net/mlx5: Continue driver initialization despite debugfs failure
  net/mlx5: Fold the modify lag code into function
  net/mlx5: Add lag affinity info to log
  net/mlx5: Split the activate lag function into two routines
  net/mlx5: E-Switch, Introduce flow counter affinity
  IB/mlx5: Unify e-switch representors load approach between uplink and VFs
  net/mlx5: Use lowercase 'X' for hex values
  net/mlx5: Remove duplicated include from eswitch.c
  net/mlx5: Remove the get protocol device interface entry
  net/mlx5: Support extended destination format in flow steering command
  net/mlx5: E-Switch, Change vhca id valid bool field to bit flag
  net/mlx5: Introduce extended destination fields
  net/mlx5: Revise gre and nvgre key formats
  net/mlx5: Add monitor commands layout and event data
  net/mlx5: Add support for plugged-disabled cable status in PME
  net/mlx5: Add support for PCIe power slot exceeded error in PME
  net/mlx5: Rework handling of port module events
  net/mlx5: Move flow counters data structures from flow steering header
  ...

1  2 
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
include/linux/mlx5/driver.h
include/linux/mlx5/mlx5_ifc.h

index 95a29e85522ed9dbf300ff4064018e16349126aa,26ab9041f94aa81c35a3e777e1a5e32e2ae991fc..90f1b0bae5b5b2ff01643fc73ae00e7dddc67fb3
@@@ -82,7 -82,7 +82,7 @@@ static void *get_sw_cqe(struct mlx5_ib_
  
        cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64;
  
-       if (likely((cqe64->op_own) >> 4 != MLX5_CQE_INVALID) &&
+       if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) &&
            !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) {
                return cqe;
        } else {
@@@ -197,7 -197,7 +197,7 @@@ static void handle_responder(struct ib_
        }
        wc->byte_len = be32_to_cpu(cqe->byte_cnt);
  
-       switch (cqe->op_own >> 4) {
+       switch (get_cqe_opcode(cqe)) {
        case MLX5_CQE_RESP_WR_IMM:
                wc->opcode      = IB_WC_RECV_RDMA_WITH_IMM;
                wc->wc_flags    = IB_WC_WITH_IMM;
@@@ -330,6 -330,67 +330,6 @@@ static void mlx5_handle_error_cqe(struc
                dump_cqe(dev, cqe);
  }
  
 -static int is_atomic_response(struct mlx5_ib_qp *qp, uint16_t idx)
 -{
 -      /* TBD: waiting decision
 -      */
 -      return 0;
 -}
 -
 -static void *mlx5_get_atomic_laddr(struct mlx5_ib_qp *qp, uint16_t idx)
 -{
 -      struct mlx5_wqe_data_seg *dpseg;
 -      void *addr;
 -
 -      dpseg = mlx5_get_send_wqe(qp, idx) + sizeof(struct mlx5_wqe_ctrl_seg) +
 -              sizeof(struct mlx5_wqe_raddr_seg) +
 -              sizeof(struct mlx5_wqe_atomic_seg);
 -      addr = (void *)(unsigned long)be64_to_cpu(dpseg->addr);
 -      return addr;
 -}
 -
 -static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 -                        uint16_t idx)
 -{
 -      void *addr;
 -      int byte_count;
 -      int i;
 -
 -      if (!is_atomic_response(qp, idx))
 -              return;
 -
 -      byte_count = be32_to_cpu(cqe64->byte_cnt);
 -      addr = mlx5_get_atomic_laddr(qp, idx);
 -
 -      if (byte_count == 4) {
 -              *(uint32_t *)addr = be32_to_cpu(*((__be32 *)addr));
 -      } else {
 -              for (i = 0; i < byte_count; i += 8) {
 -                      *(uint64_t *)addr = be64_to_cpu(*((__be64 *)addr));
 -                      addr += 8;
 -              }
 -      }
 -
 -      return;
 -}
 -
 -static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 -                         u16 tail, u16 head)
 -{
 -      u16 idx;
 -
 -      do {
 -              idx = tail & (qp->sq.wqe_cnt - 1);
 -              handle_atomic(qp, cqe64, idx);
 -              if (idx == head)
 -                      break;
 -
 -              tail = qp->sq.w_list[idx].next;
 -      } while (1);
 -      tail = qp->sq.w_list[idx].next;
 -      qp->sq.last_poll = tail;
 -}
 -
  static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
  {
        mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
@@@ -367,15 -428,45 +367,15 @@@ static void get_sig_err_item(struct mlx
        item->key = be32_to_cpu(cqe->mkey);
  }
  
 -static void sw_send_comp(struct mlx5_ib_qp *qp, int num_entries,
 -                       struct ib_wc *wc, int *npolled)
 -{
 -      struct mlx5_ib_wq *wq;
 -      unsigned int cur;
 -      unsigned int idx;
 -      int np;
 -      int i;
 -
 -      wq = &qp->sq;
 -      cur = wq->head - wq->tail;
 -      np = *npolled;
 -
 -      if (cur == 0)
 -              return;
 -
 -      for (i = 0;  i < cur && np < num_entries; i++) {
 -              idx = wq->last_poll & (wq->wqe_cnt - 1);
 -              wc->wr_id = wq->wrid[idx];
 -              wc->status = IB_WC_WR_FLUSH_ERR;
 -              wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
 -              wq->tail++;
 -              np++;
 -              wc->qp = &qp->ibqp;
 -              wc++;
 -              wq->last_poll = wq->w_list[idx].next;
 -      }
 -      *npolled = np;
 -}
 -
 -static void sw_recv_comp(struct mlx5_ib_qp *qp, int num_entries,
 -                       struct ib_wc *wc, int *npolled)
 +static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
 +                  int *npolled, int is_send)
  {
        struct mlx5_ib_wq *wq;
        unsigned int cur;
        int np;
        int i;
  
 -      wq = &qp->rq;
 +      wq = (is_send) ? &qp->sq : &qp->rq;
        cur = wq->head - wq->tail;
        np = *npolled;
  
@@@ -402,13 -493,13 +402,13 @@@ static void mlx5_ib_poll_sw_comp(struc
        *npolled = 0;
        /* Find uncompleted WQEs belonging to that cq and return mmics ones */
        list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
 -              sw_send_comp(qp, num_entries, wc + *npolled, npolled);
 +              sw_comp(qp, num_entries, wc + *npolled, npolled, true);
                if (*npolled >= num_entries)
                        return;
        }
  
        list_for_each_entry(qp, &cq->list_recv_qp, cq_recv_list) {
 -              sw_recv_comp(qp, num_entries, wc + *npolled, npolled);
 +              sw_comp(qp, num_entries, wc + *npolled, npolled, false);
                if (*npolled >= num_entries)
                        return;
        }
@@@ -446,7 -537,7 +446,7 @@@ repoll
         */
        rmb();
  
-       opcode = cqe64->op_own >> 4;
+       opcode = get_cqe_opcode(cqe64);
        if (unlikely(opcode == MLX5_CQE_RESIZE_CQ)) {
                if (likely(cq->resize_buf)) {
                        free_cq_buf(dev, &cq->buf);
                wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
                idx = wqe_ctr & (wq->wqe_cnt - 1);
                handle_good_req(wc, cqe64, wq, idx);
 -              handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
                wc->wr_id = wq->wrid[idx];
                wq->tail = wq->wqe_head[idx] + 1;
                wc->status = IB_WC_SUCCESS;
@@@ -1203,7 -1295,7 +1203,7 @@@ static int copy_resize_cqes(struct mlx5
                return -EINVAL;
        }
  
-       while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
+       while (get_cqe_opcode(scqe64) != MLX5_CQE_RESIZE_CQ) {
                dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
                                             (i + 1) & cq->resize_buf->nent);
                dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
index d7b56222fea376679aaa9057b96e2c693cfa7e2a,4a4200a88957d76d53a844435474af06cb1444c2..2a0526d7ff13ec7e6d043b37ce503ed556ed563f
@@@ -150,7 -150,7 +150,7 @@@ static int get_port_state(struct ib_dev
        int ret;
  
        memset(&attr, 0, sizeof(attr));
 -      ret = ibdev->query_port(ibdev, port_num, &attr);
 +      ret = ibdev->ops.query_port(ibdev, port_num, &attr);
        if (!ret)
                *state = attr.state;
        return ret;
@@@ -1018,9 -1018,6 +1018,9 @@@ static int mlx5_ib_query_device(struct 
  
                if (MLX5_CAP_GEN(mdev, cqe_128_always))
                        resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD;
 +              if (MLX5_CAP_GEN(mdev, qp_packet_based))
 +                      resp.flags |=
 +                              MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE;
        }
  
        if (field_avail(typeof(resp), sw_parsing_caps,
@@@ -1101,28 -1098,31 +1101,28 @@@ enum mlx5_ib_width 
        MLX5_IB_WIDTH_12X       = 1 << 4
  };
  
 -static int translate_active_width(struct ib_device *ibdev, u8 active_width,
 +static void translate_active_width(struct ib_device *ibdev, u8 active_width,
                                  u8 *ib_width)
  {
        struct mlx5_ib_dev *dev = to_mdev(ibdev);
 -      int err = 0;
  
 -      if (active_width & MLX5_IB_WIDTH_1X) {
 +      if (active_width & MLX5_IB_WIDTH_1X)
                *ib_width = IB_WIDTH_1X;
 -      } else if (active_width & MLX5_IB_WIDTH_2X) {
 -              mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
 -                          (int)active_width);
 -              err = -EINVAL;
 -      } else if (active_width & MLX5_IB_WIDTH_4X) {
 +      else if (active_width & MLX5_IB_WIDTH_2X)
 +              *ib_width = IB_WIDTH_2X;
 +      else if (active_width & MLX5_IB_WIDTH_4X)
                *ib_width = IB_WIDTH_4X;
 -      } else if (active_width & MLX5_IB_WIDTH_8X) {
 +      else if (active_width & MLX5_IB_WIDTH_8X)
                *ib_width = IB_WIDTH_8X;
 -      } else if (active_width & MLX5_IB_WIDTH_12X) {
 +      else if (active_width & MLX5_IB_WIDTH_12X)
                *ib_width = IB_WIDTH_12X;
 -      else {
 -              mlx5_ib_dbg(dev, "Invalid active_width %d\n",
 +      else {
 +              mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n",
                            (int)active_width);
 -              err = -EINVAL;
 +              *ib_width = IB_WIDTH_4X;
        }
  
 -      return err;
 +      return;
  }
  
  static int mlx5_mtu_to_ib_mtu(int mtu)
@@@ -1225,15 -1225,14 +1225,15 @@@ static int mlx5_query_hca_port(struct i
        props->subnet_timeout   = rep->subnet_timeout;
        props->init_type_reply  = rep->init_type_reply;
  
 +      if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP)
 +              props->port_cap_flags2 = rep->cap_mask2;
 +
        err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
        if (err)
                goto out;
  
 -      err = translate_active_width(ibdev, ib_link_width_oper,
 -                                   &props->active_width);
 -      if (err)
 -              goto out;
 +      translate_active_width(ibdev, ib_link_width_oper, &props->active_width);
 +
        err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port);
        if (err)
                goto out;
@@@ -1764,7 -1763,7 +1764,7 @@@ static struct ib_ucontext *mlx5_ib_allo
  #endif
  
        if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
 -              err = mlx5_ib_devx_create(dev);
 +              err = mlx5_ib_devx_create(dev, true);
                if (err < 0)
                        goto out_uars;
                context->devx_uid = err;
@@@ -2681,11 -2680,11 +2681,11 @@@ static int parse_flow_attr(struct mlx5_
                         ntohs(ib_spec->gre.val.protocol));
  
                memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_c,
-                                   gre_key_h),
+                                   gre_key.nvgre.hi),
                       &ib_spec->gre.mask.key,
                       sizeof(ib_spec->gre.mask.key));
                memcpy(MLX5_ADDR_OF(fte_match_set_misc, misc_params_v,
-                                   gre_key_h),
+                                   gre_key.nvgre.hi),
                       &ib_spec->gre.val.key,
                       sizeof(ib_spec->gre.val.key));
                break;
@@@ -3718,8 -3717,7 +3718,8 @@@ _create_raw_flow_rule(struct mlx5_ib_de
                      struct mlx5_flow_destination *dst,
                      struct mlx5_ib_flow_matcher  *fs_matcher,
                      struct mlx5_flow_act *flow_act,
 -                    void *cmd_in, int inlen)
 +                    void *cmd_in, int inlen,
 +                    int dst_num)
  {
        struct mlx5_ib_flow_handler *handler;
        struct mlx5_flow_spec *spec;
        spec->match_criteria_enable = fs_matcher->match_criteria_enable;
  
        handler->rule = mlx5_add_flow_rules(ft, spec,
 -                                          flow_act, dst, 1);
 +                                          flow_act, dst, dst_num);
  
        if (IS_ERR(handler->rule)) {
                err = PTR_ERR(handler->rule);
@@@ -3804,14 -3802,12 +3804,14 @@@ struct mlx5_ib_flow_handler 
  mlx5_ib_raw_fs_rule_add(struct mlx5_ib_dev *dev,
                        struct mlx5_ib_flow_matcher *fs_matcher,
                        struct mlx5_flow_act *flow_act,
 +                      u32 counter_id,
                        void *cmd_in, int inlen, int dest_id,
                        int dest_type)
  {
        struct mlx5_flow_destination *dst;
        struct mlx5_ib_flow_prio *ft_prio;
        struct mlx5_ib_flow_handler *handler;
 +      int dst_num = 0;
        bool mcast;
        int err;
  
        if (fs_matcher->priority > MLX5_IB_FLOW_LAST_PRIO)
                return ERR_PTR(-ENOMEM);
  
 -      dst = kzalloc(sizeof(*dst), GFP_KERNEL);
 +      dst = kzalloc(sizeof(*dst) * 2, GFP_KERNEL);
        if (!dst)
                return ERR_PTR(-ENOMEM);
  
        }
  
        if (dest_type == MLX5_FLOW_DESTINATION_TYPE_TIR) {
 -              dst->type = dest_type;
 -              dst->tir_num = dest_id;
 +              dst[dst_num].type = dest_type;
 +              dst[dst_num].tir_num = dest_id;
                flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        } else if (dest_type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) {
 -              dst->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
 -              dst->ft_num = dest_id;
 +              dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM;
 +              dst[dst_num].ft_num = dest_id;
                flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        } else {
 -              dst->type = MLX5_FLOW_DESTINATION_TYPE_PORT;
 +              dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_PORT;
                flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_ALLOW;
        }
  
 +      dst_num++;
 +
 +      if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
 +              dst[dst_num].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
 +              dst[dst_num].counter_id = counter_id;
 +              dst_num++;
 +      }
 +
        handler = _create_raw_flow_rule(dev, ft_prio, dst, fs_matcher, flow_act,
 -                                      cmd_in, inlen);
 +                                      cmd_in, inlen, dst_num);
  
        if (IS_ERR(handler)) {
                err = PTR_ERR(handler);
@@@ -5398,6 -5386,14 +5398,6 @@@ static void init_delay_drop(struct mlx5
                mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
  }
  
 -static const struct cpumask *
 -mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
 -{
 -      struct mlx5_ib_dev *dev = to_mdev(ibdev);
 -
 -      return mlx5_comp_irq_get_affinity_mask(dev->mdev, comp_vector);
 -}
 -
  /* The mlx5_ib_multiport_mutex should be held when calling this function */
  static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
                                      struct mlx5_ib_multiport_info *mpi)
@@@ -5625,17 -5621,30 +5625,17 @@@ ADD_UVERBS_ATTRIBUTES_SIMPLE
        UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_CREATE_FLOW_ACTION_FLAGS,
                             enum mlx5_ib_uapi_flow_action_flags));
  
 -static int populate_specs_root(struct mlx5_ib_dev *dev)
 -{
 -      const struct uverbs_object_tree_def **trees = dev->driver_trees;
 -      size_t num_trees = 0;
 -
 -      if (mlx5_accel_ipsec_device_caps(dev->mdev) &
 -          MLX5_ACCEL_IPSEC_CAP_DEVICE)
 -              trees[num_trees++] = &mlx5_ib_flow_action;
 -
 -      if (MLX5_CAP_DEV_MEM(dev->mdev, memic))
 -              trees[num_trees++] = &mlx5_ib_dm;
 -
 -      if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) &
 -          MLX5_GENERAL_OBJ_TYPES_CAP_UCTX)
 -              trees[num_trees++] = mlx5_ib_get_devx_tree();
 -
 -      num_trees += mlx5_ib_get_flow_trees(trees + num_trees);
 -
 -      WARN_ON(num_trees >= ARRAY_SIZE(dev->driver_trees));
 -      trees[num_trees] = NULL;
 -      dev->ib_dev.driver_specs = trees;
 +static const struct uapi_definition mlx5_ib_defs[] = {
 +#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
 +      UAPI_DEF_CHAIN(mlx5_ib_devx_defs),
 +      UAPI_DEF_CHAIN(mlx5_ib_flow_defs),
 +#endif
  
 -      return 0;
 -}
 +      UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION,
 +                              &mlx5_ib_flow_action),
 +      UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM, &mlx5_ib_dm),
 +      {}
 +};
  
  static int mlx5_ib_read_counters(struct ib_counters *counters,
                                 struct ib_counters_read_attr *read_attr,
@@@ -5712,8 -5721,6 +5712,8 @@@ void mlx5_ib_stage_init_cleanup(struct 
        mlx5_ib_cleanup_multiport_master(dev);
  #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
        cleanup_srcu_struct(&dev->mr_srcu);
 +      drain_workqueue(dev->advise_mr_wq);
 +      destroy_workqueue(dev->advise_mr_wq);
  #endif
        kfree(dev->port);
  }
@@@ -5768,12 -5775,6 +5768,12 @@@ int mlx5_ib_stage_init_init(struct mlx5
        dev->memic.dev = mdev;
  
  #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
 +      dev->advise_mr_wq = alloc_ordered_workqueue("mlx5_ib_advise_mr_wq", 0);
 +      if (!dev->advise_mr_wq) {
 +              err = -ENOMEM;
 +              goto err_free_port;
 +      }
 +
        err = init_srcu_struct(&dev->mr_srcu);
        if (err)
                goto err_free_port;
@@@ -5820,94 -5821,6 +5820,94 @@@ static void mlx5_ib_stage_flow_db_clean
        kfree(dev->flow_db);
  }
  
 +static const struct ib_device_ops mlx5_ib_dev_ops = {
 +      .add_gid = mlx5_ib_add_gid,
 +      .alloc_mr = mlx5_ib_alloc_mr,
 +      .alloc_pd = mlx5_ib_alloc_pd,
 +      .alloc_ucontext = mlx5_ib_alloc_ucontext,
 +      .attach_mcast = mlx5_ib_mcg_attach,
 +      .check_mr_status = mlx5_ib_check_mr_status,
 +      .create_ah = mlx5_ib_create_ah,
 +      .create_counters = mlx5_ib_create_counters,
 +      .create_cq = mlx5_ib_create_cq,
 +      .create_flow = mlx5_ib_create_flow,
 +      .create_qp = mlx5_ib_create_qp,
 +      .create_srq = mlx5_ib_create_srq,
 +      .dealloc_pd = mlx5_ib_dealloc_pd,
 +      .dealloc_ucontext = mlx5_ib_dealloc_ucontext,
 +      .del_gid = mlx5_ib_del_gid,
 +      .dereg_mr = mlx5_ib_dereg_mr,
 +      .destroy_ah = mlx5_ib_destroy_ah,
 +      .destroy_counters = mlx5_ib_destroy_counters,
 +      .destroy_cq = mlx5_ib_destroy_cq,
 +      .destroy_flow = mlx5_ib_destroy_flow,
 +      .destroy_flow_action = mlx5_ib_destroy_flow_action,
 +      .destroy_qp = mlx5_ib_destroy_qp,
 +      .destroy_srq = mlx5_ib_destroy_srq,
 +      .detach_mcast = mlx5_ib_mcg_detach,
 +      .disassociate_ucontext = mlx5_ib_disassociate_ucontext,
 +      .drain_rq = mlx5_ib_drain_rq,
 +      .drain_sq = mlx5_ib_drain_sq,
 +      .get_dev_fw_str = get_dev_fw_str,
 +      .get_dma_mr = mlx5_ib_get_dma_mr,
 +      .get_link_layer = mlx5_ib_port_link_layer,
 +      .map_mr_sg = mlx5_ib_map_mr_sg,
 +      .mmap = mlx5_ib_mmap,
 +      .modify_cq = mlx5_ib_modify_cq,
 +      .modify_device = mlx5_ib_modify_device,
 +      .modify_port = mlx5_ib_modify_port,
 +      .modify_qp = mlx5_ib_modify_qp,
 +      .modify_srq = mlx5_ib_modify_srq,
 +      .poll_cq = mlx5_ib_poll_cq,
 +      .post_recv = mlx5_ib_post_recv,
 +      .post_send = mlx5_ib_post_send,
 +      .post_srq_recv = mlx5_ib_post_srq_recv,
 +      .process_mad = mlx5_ib_process_mad,
 +      .query_ah = mlx5_ib_query_ah,
 +      .query_device = mlx5_ib_query_device,
 +      .query_gid = mlx5_ib_query_gid,
 +      .query_pkey = mlx5_ib_query_pkey,
 +      .query_qp = mlx5_ib_query_qp,
 +      .query_srq = mlx5_ib_query_srq,
 +      .read_counters = mlx5_ib_read_counters,
 +      .reg_user_mr = mlx5_ib_reg_user_mr,
 +      .req_notify_cq = mlx5_ib_arm_cq,
 +      .rereg_user_mr = mlx5_ib_rereg_user_mr,
 +      .resize_cq = mlx5_ib_resize_cq,
 +};
 +
 +static const struct ib_device_ops mlx5_ib_dev_flow_ipsec_ops = {
 +      .create_flow_action_esp = mlx5_ib_create_flow_action_esp,
 +      .modify_flow_action_esp = mlx5_ib_modify_flow_action_esp,
 +};
 +
 +static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
 +      .rdma_netdev_get_params = mlx5_ib_rn_get_params,
 +};
 +
 +static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
 +      .get_vf_config = mlx5_ib_get_vf_config,
 +      .get_vf_stats = mlx5_ib_get_vf_stats,
 +      .set_vf_guid = mlx5_ib_set_vf_guid,
 +      .set_vf_link_state = mlx5_ib_set_vf_link_state,
 +};
 +
 +static const struct ib_device_ops mlx5_ib_dev_mw_ops = {
 +      .alloc_mw = mlx5_ib_alloc_mw,
 +      .dealloc_mw = mlx5_ib_dealloc_mw,
 +};
 +
 +static const struct ib_device_ops mlx5_ib_dev_xrc_ops = {
 +      .alloc_xrcd = mlx5_ib_alloc_xrcd,
 +      .dealloc_xrcd = mlx5_ib_dealloc_xrcd,
 +};
 +
 +static const struct ib_device_ops mlx5_ib_dev_dm_ops = {
 +      .alloc_dm = mlx5_ib_alloc_dm,
 +      .dealloc_dm = mlx5_ib_dealloc_dm,
 +      .reg_dm_mr = mlx5_ib_reg_dm_mr,
 +};
 +
  int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
  {
        struct mlx5_core_dev *mdev = dev->mdev;
                (1ull << IB_USER_VERBS_EX_CMD_CREATE_CQ)        |
                (1ull << IB_USER_VERBS_EX_CMD_CREATE_QP)        |
                (1ull << IB_USER_VERBS_EX_CMD_MODIFY_QP)        |
 -              (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
 -
 -      dev->ib_dev.query_device        = mlx5_ib_query_device;
 -      dev->ib_dev.get_link_layer      = mlx5_ib_port_link_layer;
 -      dev->ib_dev.query_gid           = mlx5_ib_query_gid;
 -      dev->ib_dev.add_gid             = mlx5_ib_add_gid;
 -      dev->ib_dev.del_gid             = mlx5_ib_del_gid;
 -      dev->ib_dev.query_pkey          = mlx5_ib_query_pkey;
 -      dev->ib_dev.modify_device       = mlx5_ib_modify_device;
 -      dev->ib_dev.modify_port         = mlx5_ib_modify_port;
 -      dev->ib_dev.alloc_ucontext      = mlx5_ib_alloc_ucontext;
 -      dev->ib_dev.dealloc_ucontext    = mlx5_ib_dealloc_ucontext;
 -      dev->ib_dev.mmap                = mlx5_ib_mmap;
 -      dev->ib_dev.alloc_pd            = mlx5_ib_alloc_pd;
 -      dev->ib_dev.dealloc_pd          = mlx5_ib_dealloc_pd;
 -      dev->ib_dev.create_ah           = mlx5_ib_create_ah;
 -      dev->ib_dev.query_ah            = mlx5_ib_query_ah;
 -      dev->ib_dev.destroy_ah          = mlx5_ib_destroy_ah;
 -      dev->ib_dev.create_srq          = mlx5_ib_create_srq;
 -      dev->ib_dev.modify_srq          = mlx5_ib_modify_srq;
 -      dev->ib_dev.query_srq           = mlx5_ib_query_srq;
 -      dev->ib_dev.destroy_srq         = mlx5_ib_destroy_srq;
 -      dev->ib_dev.post_srq_recv       = mlx5_ib_post_srq_recv;
 -      dev->ib_dev.create_qp           = mlx5_ib_create_qp;
 -      dev->ib_dev.modify_qp           = mlx5_ib_modify_qp;
 -      dev->ib_dev.query_qp            = mlx5_ib_query_qp;
 -      dev->ib_dev.destroy_qp          = mlx5_ib_destroy_qp;
 -      dev->ib_dev.drain_sq            = mlx5_ib_drain_sq;
 -      dev->ib_dev.drain_rq            = mlx5_ib_drain_rq;
 -      dev->ib_dev.post_send           = mlx5_ib_post_send;
 -      dev->ib_dev.post_recv           = mlx5_ib_post_recv;
 -      dev->ib_dev.create_cq           = mlx5_ib_create_cq;
 -      dev->ib_dev.modify_cq           = mlx5_ib_modify_cq;
 -      dev->ib_dev.resize_cq           = mlx5_ib_resize_cq;
 -      dev->ib_dev.destroy_cq          = mlx5_ib_destroy_cq;
 -      dev->ib_dev.poll_cq             = mlx5_ib_poll_cq;
 -      dev->ib_dev.req_notify_cq       = mlx5_ib_arm_cq;
 -      dev->ib_dev.get_dma_mr          = mlx5_ib_get_dma_mr;
 -      dev->ib_dev.reg_user_mr         = mlx5_ib_reg_user_mr;
 -      dev->ib_dev.rereg_user_mr       = mlx5_ib_rereg_user_mr;
 -      dev->ib_dev.dereg_mr            = mlx5_ib_dereg_mr;
 -      dev->ib_dev.attach_mcast        = mlx5_ib_mcg_attach;
 -      dev->ib_dev.detach_mcast        = mlx5_ib_mcg_detach;
 -      dev->ib_dev.process_mad         = mlx5_ib_process_mad;
 -      dev->ib_dev.alloc_mr            = mlx5_ib_alloc_mr;
 -      dev->ib_dev.map_mr_sg           = mlx5_ib_map_mr_sg;
 -      dev->ib_dev.check_mr_status     = mlx5_ib_check_mr_status;
 -      dev->ib_dev.get_dev_fw_str      = get_dev_fw_str;
 -      dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
 +              (1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ)        |
 +              (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW)      |
 +              (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
 +
        if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
            IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
 -              dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params;
 +              ib_set_device_ops(&dev->ib_dev,
 +                                &mlx5_ib_dev_ipoib_enhanced_ops);
  
 -      if (mlx5_core_is_pf(mdev)) {
 -              dev->ib_dev.get_vf_config       = mlx5_ib_get_vf_config;
 -              dev->ib_dev.set_vf_link_state   = mlx5_ib_set_vf_link_state;
 -              dev->ib_dev.get_vf_stats        = mlx5_ib_get_vf_stats;
 -              dev->ib_dev.set_vf_guid         = mlx5_ib_set_vf_guid;
 -      }
 -
 -      dev->ib_dev.disassociate_ucontext = mlx5_ib_disassociate_ucontext;
 +      if (mlx5_core_is_pf(mdev))
 +              ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops);
  
        dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
  
        if (MLX5_CAP_GEN(mdev, imaicl)) {
 -              dev->ib_dev.alloc_mw            = mlx5_ib_alloc_mw;
 -              dev->ib_dev.dealloc_mw          = mlx5_ib_dealloc_mw;
                dev->ib_dev.uverbs_cmd_mask |=
                        (1ull << IB_USER_VERBS_CMD_ALLOC_MW)    |
                        (1ull << IB_USER_VERBS_CMD_DEALLOC_MW);
 +              ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops);
        }
  
        if (MLX5_CAP_GEN(mdev, xrc)) {
 -              dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
 -              dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
                dev->ib_dev.uverbs_cmd_mask |=
                        (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
                        (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
 +              ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops);
        }
  
 -      if (MLX5_CAP_DEV_MEM(mdev, memic)) {
 -              dev->ib_dev.alloc_dm = mlx5_ib_alloc_dm;
 -              dev->ib_dev.dealloc_dm = mlx5_ib_dealloc_dm;
 -              dev->ib_dev.reg_dm_mr = mlx5_ib_reg_dm_mr;
 -      }
 +      if (MLX5_CAP_DEV_MEM(mdev, memic))
 +              ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops);
  
 -      dev->ib_dev.create_flow = mlx5_ib_create_flow;
 -      dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
 -      dev->ib_dev.uverbs_ex_cmd_mask |=
 -                      (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
 -                      (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
 -      dev->ib_dev.create_flow_action_esp = mlx5_ib_create_flow_action_esp;
 -      dev->ib_dev.destroy_flow_action = mlx5_ib_destroy_flow_action;
 -      dev->ib_dev.modify_flow_action_esp = mlx5_ib_modify_flow_action_esp;
 +      if (mlx5_accel_ipsec_device_caps(dev->mdev) &
 +          MLX5_ACCEL_IPSEC_CAP_DEVICE)
 +              ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_flow_ipsec_ops);
        dev->ib_dev.driver_id = RDMA_DRIVER_MLX5;
 -      dev->ib_dev.create_counters = mlx5_ib_create_counters;
 -      dev->ib_dev.destroy_counters = mlx5_ib_destroy_counters;
 -      dev->ib_dev.read_counters = mlx5_ib_read_counters;
 +      ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops);
 +
 +      if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
 +              dev->ib_dev.driver_def = mlx5_ib_defs;
  
        err = init_node_data(dev);
        if (err)
        return 0;
  }
  
 +static const struct ib_device_ops mlx5_ib_dev_port_ops = {
 +      .get_port_immutable = mlx5_port_immutable,
 +      .query_port = mlx5_ib_query_port,
 +};
 +
  static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
  {
 -      dev->ib_dev.get_port_immutable  = mlx5_port_immutable;
 -      dev->ib_dev.query_port          = mlx5_ib_query_port;
 -
 +      ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops);
        return 0;
  }
  
 +static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
 +      .get_port_immutable = mlx5_port_rep_immutable,
 +      .query_port = mlx5_ib_rep_query_port,
 +};
 +
  int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
  {
 -      dev->ib_dev.get_port_immutable  = mlx5_port_rep_immutable;
 -      dev->ib_dev.query_port          = mlx5_ib_rep_query_port;
 -
 +      ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
        return 0;
  }
  
 +static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = {
 +      .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table,
 +      .create_wq = mlx5_ib_create_wq,
 +      .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table,
 +      .destroy_wq = mlx5_ib_destroy_wq,
 +      .get_netdev = mlx5_ib_get_netdev,
 +      .modify_wq = mlx5_ib_modify_wq,
 +};
 +
  static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev)
  {
        u8 port_num;
                dev->roce[i].last_port_state = IB_PORT_DOWN;
        }
  
 -      dev->ib_dev.get_netdev  = mlx5_ib_get_netdev;
 -      dev->ib_dev.create_wq    = mlx5_ib_create_wq;
 -      dev->ib_dev.modify_wq    = mlx5_ib_modify_wq;
 -      dev->ib_dev.destroy_wq   = mlx5_ib_destroy_wq;
 -      dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
 -      dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
 -
        dev->ib_dev.uverbs_ex_cmd_mask |=
                        (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
                        (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
                        (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
                        (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
                        (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
 +      ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops);
  
        port_num = mlx5_core_native_port_num(dev->mdev) - 1;
  
@@@ -6145,15 -6108,11 +6145,15 @@@ void mlx5_ib_stage_odp_cleanup(struct m
        mlx5_ib_odp_cleanup_one(dev);
  }
  
 +static const struct ib_device_ops mlx5_ib_dev_hw_stats_ops = {
 +      .alloc_hw_stats = mlx5_ib_alloc_hw_stats,
 +      .get_hw_stats = mlx5_ib_get_hw_stats,
 +};
 +
  int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
  {
        if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
 -              dev->ib_dev.get_hw_stats        = mlx5_ib_get_hw_stats;
 -              dev->ib_dev.alloc_hw_stats      = mlx5_ib_alloc_hw_stats;
 +              ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_hw_stats_ops);
  
                return mlx5_ib_alloc_counters(dev);
        }
@@@ -6211,6 -6170,11 +6211,6 @@@ void mlx5_ib_stage_bfrag_cleanup(struc
        mlx5_free_bfreg(dev->mdev, &dev->bfreg);
  }
  
 -static int mlx5_ib_stage_populate_specs(struct mlx5_ib_dev *dev)
 -{
 -      return populate_specs_root(dev);
 -}
 -
  int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
  {
        const char *name;
@@@ -6250,18 -6214,6 +6250,6 @@@ static void mlx5_ib_stage_delay_drop_cl
        cancel_delay_drop(dev);
  }
  
- static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
- {
-       mlx5_ib_register_vport_reps(dev);
-       return 0;
- }
- static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
- {
-       mlx5_ib_unregister_vport_reps(dev);
- }
  static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev)
  {
        dev->mdev_events.notifier_call = mlx5_ib_event;
@@@ -6278,7 -6230,7 +6266,7 @@@ static int mlx5_ib_stage_devx_init(stru
  {
        int uid;
  
 -      uid = mlx5_ib_devx_create(dev);
 +      uid = mlx5_ib_devx_create(dev, false);
        if (uid > 0)
                dev->devx_whitelist_uid = uid;
  
@@@ -6300,8 -6252,6 +6288,6 @@@ void __mlx5_ib_remove(struct mlx5_ib_de
                if (profile->stage[stage].cleanup)
                        profile->stage[stage].cleanup(dev);
        }
-       ib_dealloc_device((struct ib_device *)dev);
  }
  
  void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
@@@ -6372,6 -6322,9 +6358,6 @@@ static const struct mlx5_ib_profile pf_
        STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
                     NULL,
                     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
 -      STAGE_CREATE(MLX5_IB_STAGE_SPECS,
 -                   mlx5_ib_stage_populate_specs,
 -                   NULL),
        STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID,
                     mlx5_ib_stage_devx_init,
                     mlx5_ib_stage_devx_cleanup),
@@@ -6423,15 -6376,15 +6409,12 @@@ static const struct mlx5_ib_profile nic
        STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR,
                     NULL,
                     mlx5_ib_stage_pre_ib_reg_umr_cleanup),
 -      STAGE_CREATE(MLX5_IB_STAGE_SPECS,
 -                   mlx5_ib_stage_populate_specs,
 -                   NULL),
        STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
                     mlx5_ib_stage_ib_reg_init,
                     mlx5_ib_stage_ib_reg_cleanup),
        STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR,
                     mlx5_ib_stage_post_ib_reg_umr_init,
                     NULL),
-       STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
-                    mlx5_ib_stage_rep_reg_init,
-                    mlx5_ib_stage_rep_reg_cleanup),
  };
  
  static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
@@@ -6499,8 -6452,9 +6482,9 @@@ static void *mlx5_ib_add(struct mlx5_co
        if (MLX5_ESWITCH_MANAGER(mdev) &&
            mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
                dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
-               return __mlx5_ib_add(dev, &nic_rep_profile);
+               dev->profile = &nic_rep_profile;
+               mlx5_ib_register_vport_reps(dev);
+               return dev;
        }
  
        return __mlx5_ib_add(dev, &pf_profile);
@@@ -6522,7 -6476,12 +6506,12 @@@ static void mlx5_ib_remove(struct mlx5_
        }
  
        dev = context;
-       __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+       if (dev->profile == &nic_rep_profile)
+               mlx5_ib_unregister_vport_reps(dev);
+       else
+               __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+       ib_dealloc_device((struct ib_device *)dev);
  }
  
  static struct mlx5_interface mlx5_ib_interface = {
index 96e8fa1109f5b7cae609e4c7740b8b268bd6a9a0,c89b3b44b22ea65c4ea4d49e639c94836a5a858e..d531f25a11055453c172144b42a0370a03f79a0e
@@@ -41,6 -41,7 +41,6 @@@
  #include <linux/mlx5/cq.h>
  #include <linux/mlx5/fs.h>
  #include <linux/mlx5/qp.h>
 -#include <linux/mlx5/fs.h>
  #include <linux/types.h>
  #include <linux/mlx5/transobj.h>
  #include <rdma/ib_user_verbs.h>
@@@ -257,7 -258,6 +257,7 @@@ enum mlx5_ib_rq_flags 
  };
  
  struct mlx5_ib_wq {
 +      struct mlx5_frag_buf_ctrl fbc;
        u64                    *wrid;
        u32                    *wr_data;
        struct wr_list         *w_list;
        unsigned                head;
        unsigned                tail;
        u16                     cur_post;
 -      u16                     last_poll;
 -      void                   *qend;
 +      void                    *cur_edge;
  };
  
  enum mlx5_ib_wq_flags {
@@@ -460,7 -461,6 +460,7 @@@ enum mlx5_ib_qp_flags 
        MLX5_IB_QP_UNDERLAY                     = 1 << 10,
        MLX5_IB_QP_PCI_WRITE_END_PADDING        = 1 << 11,
        MLX5_IB_QP_TUNNEL_OFFLOAD               = 1 << 12,
 +      MLX5_IB_QP_PACKET_BASED_CREDIT          = 1 << 13,
  };
  
  struct mlx5_umr_wr {
@@@ -524,7 -524,6 +524,7 @@@ struct mlx5_ib_srq 
        struct mlx5_core_srq    msrq;
        struct mlx5_frag_buf    buf;
        struct mlx5_db          db;
 +      struct mlx5_frag_buf_ctrl fbc;
        u64                    *wrid;
        /* protect SRQ hanlding
         */
  struct mlx5_ib_xrcd {
        struct ib_xrcd          ibxrcd;
        u32                     xrcdn;
 -      u16                     uid;
  };
  
  enum mlx5_ib_mtt_access_flags {
@@@ -784,12 -784,12 +784,11 @@@ enum mlx5_ib_stages 
        MLX5_IB_STAGE_UAR,
        MLX5_IB_STAGE_BFREG,
        MLX5_IB_STAGE_PRE_IB_REG_UMR,
 -      MLX5_IB_STAGE_SPECS,
        MLX5_IB_STAGE_WHITELIST_UID,
        MLX5_IB_STAGE_IB_REG,
        MLX5_IB_STAGE_POST_IB_REG_UMR,
        MLX5_IB_STAGE_DELAY_DROP,
        MLX5_IB_STAGE_CLASS_ATTR,
-       MLX5_IB_STAGE_REP_REG,
        MLX5_IB_STAGE_MAX,
  };
  
@@@ -895,6 -895,7 +894,6 @@@ struct mlx5_ib_pf_eq 
  
  struct mlx5_ib_dev {
        struct ib_device                ib_dev;
 -      const struct uverbs_object_tree_def *driver_trees[7];
        struct mlx5_core_dev            *mdev;
        struct notifier_block           mdev_events;
        struct mlx5_roce                roce[MLX5_MAX_PORTS];
         */
        struct srcu_struct      mr_srcu;
        u32                     null_mkey;
 +      struct workqueue_struct *advise_mr_wq;
  #endif
        struct mlx5_ib_flow_db  *flow_db;
        /* protect resources needed as part of reset flow */
@@@ -1042,9 -1042,9 +1041,9 @@@ int mlx5_MAD_IFC(struct mlx5_ib_dev *de
                 u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh,
                 const void *in_mad, void *response_mad);
  struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
 -                              struct ib_udata *udata);
 +                              u32 flags, struct ib_udata *udata);
  int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
 -int mlx5_ib_destroy_ah(struct ib_ah *ah);
 +int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags);
  struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
                                  struct ib_srq_init_attr *init_attr,
                                  struct ib_udata *udata);
@@@ -1070,6 -1070,7 +1069,6 @@@ int mlx5_ib_post_send(struct ib_qp *ibq
                      const struct ib_send_wr **bad_wr);
  int mlx5_ib_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
                      const struct ib_recv_wr **bad_wr);
 -void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
  int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
                          void *buffer, u32 length,
                          struct mlx5_ib_qp_base *base);
@@@ -1086,12 -1087,6 +1085,12 @@@ struct ib_mr *mlx5_ib_get_dma_mr(struc
  struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                                  u64 virt_addr, int access_flags,
                                  struct ib_udata *udata);
 +int mlx5_ib_advise_mr(struct ib_pd *pd,
 +                    enum ib_uverbs_advise_mr_advice advice,
 +                    u32 flags,
 +                    struct ib_sge *sg_list,
 +                    u32 num_sge,
 +                    struct uverbs_attr_bundle *attrs);
  struct ib_mw *mlx5_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
                               struct ib_udata *udata);
  int mlx5_ib_dealloc_mw(struct ib_mw *mw);
@@@ -1189,10 -1184,6 +1188,10 @@@ void mlx5_ib_invalidate_range(struct ib
  void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
  void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
                           size_t nentries, struct mlx5_ib_mr *mr, int flags);
 +
 +int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
 +                             enum ib_uverbs_advise_mr_advice advice,
 +                             u32 flags, struct ib_sge *sg_list, u32 num_sge);
  #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  static inline void mlx5_ib_internal_fill_odp_caps(struct mlx5_ib_dev *dev)
  {
@@@ -1208,13 -1199,6 +1207,13 @@@ static inline void mlx5_odp_populate_kl
                                         size_t nentries, struct mlx5_ib_mr *mr,
                                         int flags) {}
  
 +static inline int
 +mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
 +                         enum ib_uverbs_advise_mr_advice advice, u32 flags,
 +                         struct ib_sge *sg_list, u32 num_sge)
 +{
 +      return -EOPNOTSUPP;
 +}
  #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
  
  /* Needed for rep profile */
@@@ -1283,29 -1267,32 +1282,29 @@@ void mlx5_ib_put_native_port_mdev(struc
                                  u8 port_num);
  
  #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
 -int mlx5_ib_devx_create(struct mlx5_ib_dev *dev);
 +int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user);
  void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid);
  const struct uverbs_object_tree_def *mlx5_ib_get_devx_tree(void);
 +extern const struct uapi_definition mlx5_ib_devx_defs[];
 +extern const struct uapi_definition mlx5_ib_flow_defs[];
  struct mlx5_ib_flow_handler *mlx5_ib_raw_fs_rule_add(
        struct mlx5_ib_dev *dev, struct mlx5_ib_flow_matcher *fs_matcher,
 -      struct mlx5_flow_act *flow_act, void *cmd_in, int inlen,
 -      int dest_id, int dest_type);
 +      struct mlx5_flow_act *flow_act, u32 counter_id,
 +      void *cmd_in, int inlen, int dest_id, int dest_type);
  bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type);
 +bool mlx5_ib_devx_is_flow_counter(void *obj, u32 *counter_id);
  int mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root);
  void mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction);
  #else
  static inline int
 -mlx5_ib_devx_create(struct mlx5_ib_dev *dev) { return -EOPNOTSUPP; };
 +mlx5_ib_devx_create(struct mlx5_ib_dev *dev,
 +                         bool is_user) { return -EOPNOTSUPP; }
  static inline void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid) {}
 -static inline const struct uverbs_object_tree_def *
 -mlx5_ib_get_devx_tree(void) { return NULL; }
  static inline bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id,
                                             int *dest_type)
  {
        return false;
  }
 -static inline int
 -mlx5_ib_get_flow_trees(const struct uverbs_object_tree_def **root)
 -{
 -      return 0;
 -}
  static inline void
  mlx5_ib_destroy_flow_action_raw(struct mlx5_ib_flow_action *maction)
  {
index 138c7679eebba08630f1f1df958e95fa0cf881e7,a43092de3cc08ab6f7bd5be5a88950cf7e84d069..e14d383947626235797d91dbf149f317c938618d
@@@ -505,7 -505,6 +505,7 @@@ static int mlx5e_alloc_rq(struct mlx5e_
        rq->channel = c;
        rq->ix      = c->ix;
        rq->mdev    = mdev;
 +      rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
        rq->stats   = &c->priv->channel_stats[c->ix].rq;
  
        rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
@@@ -1627,15 -1626,13 +1627,15 @@@ static int mlx5e_alloc_cq_common(struc
        int err;
        u32 i;
  
 +      err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
 +      if (err)
 +              return err;
 +
        err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
                               &cq->wq_ctrl);
        if (err)
                return err;
  
 -      mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
 -
        mcq->cqe_sz     = 64;
        mcq->set_ci_db  = cq->wq_ctrl.db.db;
        mcq->arm_db     = cq->wq_ctrl.db.db + 1;
@@@ -1693,10 -1690,6 +1693,10 @@@ static int mlx5e_create_cq(struct mlx5e
        int eqn;
        int err;
  
 +      err = mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
 +      if (err)
 +              return err;
 +
        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
                sizeof(u64) * cq->wq_ctrl.buf.npages;
        in = kvzalloc(inlen, GFP_KERNEL);
        mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
                                  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
  
 -      mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
 -
        MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
@@@ -1924,10 -1919,6 +1924,10 @@@ static int mlx5e_open_channel(struct ml
        int err;
        int eqn;
  
 +      err = mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
 +      if (err)
 +              return err;
 +
        c = kvzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
        if (!c)
                return -ENOMEM;
        c->xdp      = !!params->xdp_prog;
        c->stats    = &priv->channel_stats[ix].ch;
  
 -      mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
        c->irq_desc = irq_to_desc(irq);
  
        netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
@@@ -3580,7 -3572,6 +3580,7 @@@ static int set_feature_cvlan_filter(str
        return 0;
  }
  
 +#ifdef CONFIG_MLX5_ESWITCH
  static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
  {
        struct mlx5e_priv *priv = netdev_priv(netdev);
  
        return 0;
  }
 +#endif
  
  static int set_feature_rx_all(struct net_device *netdev, bool enable)
  {
@@@ -3692,9 -3682,7 +3692,9 @@@ static int mlx5e_set_features(struct ne
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
                                    set_feature_cvlan_filter);
 +#ifdef CONFIG_MLX5_ESWITCH
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_tc_num_filters);
 +#endif
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
        err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
@@@ -3765,11 -3753,10 +3765,11 @@@ int mlx5e_change_mtu(struct net_device 
        }
  
        if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
 +              bool is_linear = mlx5e_rx_mpwqe_is_linear_skb(priv->mdev, &new_channels.params);
                u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params);
                u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params);
  
 -              reset = reset && (ppw_old != ppw_new);
 +              reset = reset && (is_linear || (ppw_old != ppw_new));
        }
  
        if (!reset) {
@@@ -4689,9 -4676,7 +4689,9 @@@ static void mlx5e_build_nic_netdev(stru
            FT_CAP(modify_root) &&
            FT_CAP(identified_miss_table_mode) &&
            FT_CAP(flow_table_modify)) {
 +#ifdef CONFIG_MLX5_ESWITCH
                netdev->hw_features      |= NETIF_F_HW_TC;
 +#endif
  #ifdef CONFIG_MLX5_EN_ARFS
                netdev->hw_features      |= NETIF_F_NTUPLE;
  #endif
@@@ -5017,21 -5002,11 +5017,21 @@@ err_free_netdev
  int mlx5e_attach_netdev(struct mlx5e_priv *priv)
  {
        const struct mlx5e_profile *profile;
 +      int max_nch;
        int err;
  
        profile = priv->profile;
        clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
  
 +      /* max number of channels may have changed */
 +      max_nch = mlx5e_get_max_num_channels(priv->mdev);
 +      if (priv->channels.params.num_channels > max_nch) {
 +              mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch);
 +              priv->channels.params.num_channels = max_nch;
 +              mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt,
 +                                            MLX5E_INDIR_RQT_SIZE, max_nch);
 +      }
 +
        err = profile->init_tx(priv);
        if (err)
                goto out;
@@@ -5185,20 -5160,12 +5185,12 @@@ static void mlx5e_remove(struct mlx5_co
        kfree(ppriv);
  }
  
- static void *mlx5e_get_netdev(void *vpriv)
- {
-       struct mlx5e_priv *priv = vpriv;
-       return priv->netdev;
- }
  static struct mlx5_interface mlx5e_interface = {
        .add       = mlx5e_add,
        .remove    = mlx5e_remove,
        .attach    = mlx5e_attach,
        .detach    = mlx5e_detach,
        .protocol  = MLX5_INTERFACE_PROTOCOL_ETH,
-       .get_dev   = mlx5e_get_netdev,
  };
  
  void mlx5e_init(void)
index 624eed345b5d2b19fa5ed54935667b41090383f8,31956ddd394ee71433b09981877498a39eaaf7d2..a75aad03559368381841f1389a4b0d1f42f90d74
@@@ -554,9 -554,9 +554,9 @@@ static inline void mlx5e_poll_ico_singl
  
        mlx5_cqwq_pop(&cq->wq);
  
-       if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+       if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
                netdev_WARN_ONCE(cq->channel->netdev,
-                                "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own);
+                                "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
                return;
        }
  
@@@ -724,9 -724,9 +724,9 @@@ static u32 mlx5e_get_fcs(const struct s
        return __get_unaligned_cpu32(fcs_bytes);
  }
  
 -static u8 get_ip_proto(struct sk_buff *skb, __be16 proto)
 +static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
  {
 -      void *ip_p = skb->data + sizeof(struct ethhdr);
 +      void *ip_p = skb->data + network_depth;
  
        return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
                                            ((struct ipv6hdr *)ip_p)->nexthdr;
@@@ -755,7 -755,7 +755,7 @@@ static inline void mlx5e_handle_csum(st
                goto csum_unnecessary;
  
        if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
 -              if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
 +              if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
                        goto csum_unnecessary;
  
                skb->ip_summed = CHECKSUM_COMPLETE;
@@@ -898,7 -898,7 +898,7 @@@ mlx5e_skb_from_cqe_linear(struct mlx5e_
        prefetchw(va); /* xdp_frame data area */
        prefetch(data);
  
-       if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+       if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
                rq->stats->wqe_err++;
                return NULL;
        }
@@@ -930,7 -930,7 +930,7 @@@ mlx5e_skb_from_cqe_nonlinear(struct mlx
        u16 byte_cnt     = cqe_bcnt - headlen;
        struct sk_buff *skb;
  
-       if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+       if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
                rq->stats->wqe_err++;
                return NULL;
        }
@@@ -1104,12 -1104,6 +1104,12 @@@ mlx5e_skb_from_cqe_mpwrq_linear(struct 
        u32 frag_size;
        bool consumed;
  
 +      /* Check packet size. Note LRO doesn't use linear SKB */
 +      if (unlikely(cqe_bcnt > rq->hw_mtu)) {
 +              rq->stats->oversize_pkts_sw_drop++;
 +              return NULL;
 +      }
 +
        va             = page_address(di->page) + head_offset;
        data           = va + rx_headroom;
        frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
@@@ -1154,7 -1148,7 +1154,7 @@@ void mlx5e_handle_rx_cqe_mpwrq(struct m
  
        wi->consumed_strides += cstrides;
  
-       if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+       if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) {
                rq->stats->wqe_err++;
                goto mpwrq_cqe_out;
        }
index 0b7f3ebb7b9db99fe03eda7a00588ad42023ce4b,881c54c12e1989f6febd0eba1adf2016ff951353..a0ee11c8add6794cf2f09e8688096398e9a36126
@@@ -84,7 -84,6 +84,7 @@@ static const struct counter_desc sw_sta
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) },
 +      { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_oversize_pkts_sw_drop) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
        { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
@@@ -163,7 -162,6 +163,7 @@@ void mlx5e_grp_sw_update_stats(struct m
                s->rx_wqe_err   += rq_stats->wqe_err;
                s->rx_mpwqe_filler_cqes    += rq_stats->mpwqe_filler_cqes;
                s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides;
 +              s->rx_oversize_pkts_sw_drop += rq_stats->oversize_pkts_sw_drop;
                s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
                s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
                s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
@@@ -1089,13 -1087,13 +1089,13 @@@ static void mlx5e_grp_per_prio_update_s
  }
  
  static const struct counter_desc mlx5e_pme_status_desc[] = {
-       { "module_unplug", 8 },
+       { "module_unplug",       sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED },
  };
  
  static const struct counter_desc mlx5e_pme_error_desc[] = {
-       { "module_bus_stuck", 16 },       /* bus stuck (I2C or data shorted) */
-       { "module_high_temp", 48 },       /* high temperature */
-       { "module_bad_shorted", 56 },    /* bad or shorted cable/module */
+       { "module_bus_stuck",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK },
+       { "module_high_temp",    sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE },
+       { "module_bad_shorted",  sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE },
  };
  
  #define NUM_PME_STATUS_STATS          ARRAY_SIZE(mlx5e_pme_status_desc)
@@@ -1194,7 -1192,6 +1194,7 @@@ static const struct counter_desc rq_sta
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_cqes) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler_strides) },
 +      { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, oversize_pkts_sw_drop) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
        { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
index b090a96f87dff2ae74613121c50bfe23d5b34604,cc29e880c7336268373730bf62164430f4cb32c2..cb1c5b10279cf9fd3df97fb16c0a29728620e33d
@@@ -749,8 -749,8 +749,8 @@@ struct mlx5_hca_vport_context 
        u64                     node_guid;
        u32                     cap_mask1;
        u32                     cap_mask1_perm;
 -      u32                     cap_mask2;
 -      u32                     cap_mask2_perm;
 +      u16                     cap_mask2;
 +      u16                     cap_mask2_perm;
        u16                     lid;
        u8                      init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
        u8                      lmc;
@@@ -1004,12 -1004,10 +1004,10 @@@ struct mlx5_interface 
        void                    (*remove)(struct mlx5_core_dev *dev, void *context);
        int                     (*attach)(struct mlx5_core_dev *dev, void *context);
        void                    (*detach)(struct mlx5_core_dev *dev, void *context);
-       void *                  (*get_dev)(void *context);
        int                     protocol;
        struct list_head        list;
  };
  
- void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol);
  int mlx5_register_interface(struct mlx5_interface *intf);
  void mlx5_unregister_interface(struct mlx5_interface *intf);
  int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
index 52393fbcf3b402c3ec7be691d3f4e57af1495d51,5699c6bad59047a12fdbdf06e8296c29d7da9b60..0bca5a6387e965e69c25817465de76356a1afff0
@@@ -85,6 -85,10 +85,10 @@@ enum 
        MLX5_OBJ_TYPE_UMEM = 0x0005,
  };
  
+ enum {
+       MLX5_SHARED_RESOURCE_UID = 0xffff,
+ };
  enum {
        MLX5_CMD_OP_QUERY_HCA_CAP                 = 0x100,
        MLX5_CMD_OP_QUERY_ADAPTER                 = 0x101,
        MLX5_CMD_OP_DESTROY_XRQ                   = 0x718,
        MLX5_CMD_OP_QUERY_XRQ                     = 0x719,
        MLX5_CMD_OP_ARM_XRQ                       = 0x71a,
 +      MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY     = 0x725,
 +      MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY       = 0x726,
 +      MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS        = 0x727,
        MLX5_CMD_OP_QUERY_VPORT_STATE             = 0x750,
        MLX5_CMD_OP_MODIFY_VPORT_STATE            = 0x751,
        MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT       = 0x752,
        MLX5_CMD_OP_ALLOC_Q_COUNTER               = 0x771,
        MLX5_CMD_OP_DEALLOC_Q_COUNTER             = 0x772,
        MLX5_CMD_OP_QUERY_Q_COUNTER               = 0x773,
+       MLX5_CMD_OP_SET_MONITOR_COUNTER           = 0x774,
+       MLX5_CMD_OP_ARM_MONITOR_COUNTER           = 0x775,
        MLX5_CMD_OP_SET_PP_RATE_LIMIT             = 0x780,
        MLX5_CMD_OP_QUERY_RATE_LIMIT              = 0x781,
        MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT      = 0x782,
        MLX5_CMD_OP_MODIFY_FLOW_TABLE             = 0x93c,
        MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT = 0x93d,
        MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT = 0x93e,
 +      MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT = 0x93f,
        MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT   = 0x940,
        MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
        MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT   = 0x942,
        MLX5_CMD_OP_MAX
  };
  
 +/* Valid range for general commands that don't work over an object */
 +enum {
 +      MLX5_CMD_OP_GENERAL_START = 0xb00,
 +      MLX5_CMD_OP_GENERAL_END = 0xd00,
 +};
 +
  struct mlx5_ifc_flow_table_fields_supported_bits {
        u8         outer_dmac[0x1];
        u8         outer_smac[0x1];
@@@ -431,6 -427,16 +437,16 @@@ struct mlx5_ifc_fte_match_set_lyr_2_4_b
        union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits dst_ipv4_dst_ipv6;
  };
  
+ struct mlx5_ifc_nvgre_key_bits {
+       u8 hi[0x18];
+       u8 lo[0x8];
+ };
+ union mlx5_ifc_gre_key_bits {
+       struct mlx5_ifc_nvgre_key_bits nvgre;
+       u8 key[0x20];
+ };
  struct mlx5_ifc_fte_match_set_misc_bits {
        u8         reserved_at_0[0x8];
        u8         source_sqn[0x18];
        u8         reserved_at_64[0xc];
        u8         gre_protocol[0x10];
  
-       u8         gre_key_h[0x18];
-       u8         gre_key_l[0x8];
+       union mlx5_ifc_gre_key_bits gre_key;
  
        u8         vxlan_vni[0x18];
        u8         reserved_at_b8[0x8];
@@@ -607,20 -612,28 +622,28 @@@ struct mlx5_ifc_flow_table_eswitch_cap_
        u8      reserved_at_800[0x7800];
  };
  
+ enum {
+       MLX5_COUNTER_SOURCE_ESWITCH = 0x0,
+       MLX5_COUNTER_FLOW_ESWITCH   = 0x1,
+ };
  struct mlx5_ifc_e_switch_cap_bits {
        u8         vport_svlan_strip[0x1];
        u8         vport_cvlan_strip[0x1];
        u8         vport_svlan_insert[0x1];
        u8         vport_cvlan_insert_if_not_exist[0x1];
        u8         vport_cvlan_insert_overwrite[0x1];
-       u8         reserved_at_5[0x18];
+       u8         reserved_at_5[0x17];
+       u8         counter_eswitch_affinity[0x1];
        u8         merged_eswitch[0x1];
        u8         nic_vport_node_guid_modify[0x1];
        u8         nic_vport_port_guid_modify[0x1];
  
        u8         vxlan_encap_decap[0x1];
        u8         nvgre_encap_decap[0x1];
-       u8         reserved_at_22[0x9];
+       u8         reserved_at_22[0x1];
+       u8         log_max_fdb_encap_uplink[0x5];
+       u8         reserved_at_21[0x3];
        u8         log_max_packet_reformat_context[0x5];
        u8         reserved_2b[0x6];
        u8         max_encap_header_size[0xa];
@@@ -1210,7 -1223,13 +1233,13 @@@ struct mlx5_ifc_cmd_hca_cap_bits 
        u8         sw_owner_id[0x1];
        u8         reserved_at_61f[0x1];
  
-       u8         reserved_at_620[0x80];
+       u8         max_num_of_monitor_counters[0x10];
+       u8         num_ppcnt_monitor_counters[0x10];
+       u8         reserved_at_640[0x10];
+       u8         num_q_monitor_counters[0x10];
+       u8         reserved_at_660[0x40];
  
        u8         uctx_cap[0x20];
  
@@@ -1230,8 -1249,10 +1259,10 @@@ enum mlx5_flow_destination_type 
  struct mlx5_ifc_dest_format_struct_bits {
        u8         destination_type[0x8];
        u8         destination_id[0x18];
        u8         destination_eswitch_owner_vhca_id_valid[0x1];
-       u8         reserved_at_21[0xf];
+       u8         packet_reformat[0x1];
+       u8         reserved_at_22[0xe];
        u8         destination_eswitch_owner_vhca_id[0x10];
  };
  
@@@ -1241,6 -1262,14 +1272,14 @@@ struct mlx5_ifc_flow_counter_list_bits 
        u8         reserved_at_20[0x20];
  };
  
+ struct mlx5_ifc_extended_dest_format_bits {
+       struct mlx5_ifc_dest_format_struct_bits destination_entry;
+       u8         packet_reformat_id[0x20];
+       u8         reserved_at_60[0x20];
+ };
  union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits {
        struct mlx5_ifc_dest_format_struct_bits dest_format_struct;
        struct mlx5_ifc_flow_counter_list_bits flow_counter_list;
@@@ -2462,7 -2491,8 +2501,8 @@@ struct mlx5_ifc_flow_context_bits 
        u8         reserved_at_60[0x10];
        u8         action[0x10];
  
-       u8         reserved_at_80[0x8];
+       u8         extended_destination[0x1];
+       u8         reserved_at_80[0x7];
        u8         destination_list_size[0x18];
  
        u8         reserved_at_a0[0x8];
@@@ -3818,6 -3848,83 +3858,83 @@@ enum 
        MLX5_VPORT_STATE_OP_MOD_ESW_VPORT   = 0x1,
  };
  
+ struct mlx5_ifc_arm_monitor_counter_in_bits {
+       u8         opcode[0x10];
+       u8         uid[0x10];
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+       u8         reserved_at_40[0x20];
+       u8         reserved_at_60[0x20];
+ };
+ struct mlx5_ifc_arm_monitor_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+       u8         syndrome[0x20];
+       u8         reserved_at_40[0x40];
+ };
+ enum {
+       MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT     = 0x0,
+       MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER = 0x1,
+ };
+ enum mlx5_monitor_counter_ppcnt {
+       MLX5_QUERY_MONITOR_PPCNT_IN_RANGE_LENGTH_ERRORS      = 0x0,
+       MLX5_QUERY_MONITOR_PPCNT_OUT_OF_RANGE_LENGTH_FIELD   = 0x1,
+       MLX5_QUERY_MONITOR_PPCNT_FRAME_TOO_LONG_ERRORS       = 0x2,
+       MLX5_QUERY_MONITOR_PPCNT_FRAME_CHECK_SEQUENCE_ERRORS = 0x3,
+       MLX5_QUERY_MONITOR_PPCNT_ALIGNMENT_ERRORS            = 0x4,
+       MLX5_QUERY_MONITOR_PPCNT_IF_OUT_DISCARDS             = 0x5,
+ };
+ enum {
+       MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER     = 0x4,
+ };
+ struct mlx5_ifc_monitor_counter_output_bits {
+       u8         reserved_at_0[0x4];
+       u8         type[0x4];
+       u8         reserved_at_8[0x8];
+       u8         counter[0x10];
+       u8         counter_group_id[0x20];
+ };
+ #define MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 (6)
+ #define MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1    (1)
+ #define MLX5_CMD_SET_MONITOR_NUM_COUNTER (MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +\
+                                         MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1)
+ struct mlx5_ifc_set_monitor_counter_in_bits {
+       u8         opcode[0x10];
+       u8         uid[0x10];
+       u8         reserved_at_20[0x10];
+       u8         op_mod[0x10];
+       u8         reserved_at_40[0x10];
+       u8         num_of_counters[0x10];
+       u8         reserved_at_60[0x20];
+       struct mlx5_ifc_monitor_counter_output_bits monitor_counter[MLX5_CMD_SET_MONITOR_NUM_COUNTER];
+ };
+ struct mlx5_ifc_set_monitor_counter_out_bits {
+       u8         status[0x8];
+       u8         reserved_at_8[0x18];
+       u8         syndrome[0x20];
+       u8         reserved_at_40[0x40];
+ };
  struct mlx5_ifc_query_vport_state_in_bits {
        u8         opcode[0x10];
        u8         reserved_at_10[0x10];
@@@ -4683,7 -4790,7 +4800,7 @@@ enum 
        MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS    = 0x0,
        MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS  = 0x1,
        MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_INNER_HEADERS    = 0x2,
-       MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0X3,
+       MLX5_QUERY_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS_2 = 0x3,
  };
  
  struct mlx5_ifc_query_flow_group_out_bits {
@@@ -6589,7 -6696,7 +6706,7 @@@ struct mlx5_ifc_dealloc_transport_domai
  
  struct mlx5_ifc_dealloc_transport_domain_in_bits {
        u8         opcode[0x10];
-       u8         reserved_at_10[0x10];
+       u8         uid[0x10];
  
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
@@@ -7442,7 -7549,7 +7559,7 @@@ struct mlx5_ifc_alloc_transport_domain_
  
  struct mlx5_ifc_alloc_transport_domain_in_bits {
        u8         opcode[0x10];
-       u8         reserved_at_10[0x10];
+       u8         uid[0x10];
  
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
@@@ -7464,7 -7571,7 +7581,7 @@@ struct mlx5_ifc_alloc_q_counter_out_bit
  
  struct mlx5_ifc_alloc_q_counter_in_bits {
        u8         opcode[0x10];
-       u8         reserved_at_10[0x10];
+       u8         uid[0x10];
  
        u8         reserved_at_20[0x10];
        u8         op_mod[0x10];
This page took 0.147027 seconds and 4 git commands to generate.