#include "uverbs.h"
---- -----static struct lock_class_key pd_lock_key;
---- -----static struct lock_class_key mr_lock_key;
---- -----static struct lock_class_key cq_lock_key;
---- -----static struct lock_class_key qp_lock_key;
---- -----static struct lock_class_key ah_lock_key;
---- -----static struct lock_class_key srq_lock_key;
---- -----static struct lock_class_key xrcd_lock_key;
++++ +++++struct uverbs_lock_class {
++++ +++++ struct lock_class_key key;
++++ +++++ char name[16];
++++ +++++};
++++ +++++
++++ +++++static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
++++ +++++static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
++++ +++++static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
++++ +++++static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
++++ +++++static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
++++ +++++static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
++++ +++++static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
*/
static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
---- ----- struct ib_ucontext *context, struct lock_class_key *key)
++++ +++++ struct ib_ucontext *context, struct uverbs_lock_class *c)
{
uobj->user_handle = user_handle;
uobj->context = context;
kref_init(&uobj->ref);
init_rwsem(&uobj->mutex);
---- ----- lockdep_set_class(&uobj->mutex, key);
++++ +++++ lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
uobj->live = 0;
}
if (!uobj)
return -ENOMEM;
---- ----- init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
++++ +++++ init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
down_write(&uobj->mutex);
pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
goto err_tree_mutex_unlock;
}
---- ----- init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_key);
++++ +++++ init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
down_write(&obj->uobject.mutex);
if (!uobj)
return -ENOMEM;
---- ----- init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
++++ +++++ init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
down_write(&uobj->mutex);
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
if (!obj)
return -ENOMEM;
---- ----- init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
++++ +++++ init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
down_write(&obj->uobject.mutex);
if (cmd.comp_channel >= 0) {
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+++++++++ if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
+++++++++ return -EPERM;
+++++++++
INIT_UDATA(&udata, buf + sizeof cmd,
(unsigned long) cmd.response + sizeof resp,
in_len - sizeof cmd, out_len - sizeof resp);
if (!obj)
return -ENOMEM;
---- ----- init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
++++ +++++ init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
down_write(&obj->uevent.uobject.mutex);
if (cmd.qp_type == IB_QPT_XRC_TGT) {
}
device = xrcd->device;
} else {
---- ----- pd = idr_read_pd(cmd.pd_handle, file->ucontext);
---- ----- scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
---- ----- if (!pd || !scq) {
---- ----- ret = -EINVAL;
---- ----- goto err_put;
---- ----- }
---- -----
if (cmd.qp_type == IB_QPT_XRC_INI) {
cmd.max_recv_wr = cmd.max_recv_sge = 0;
} else {
goto err_put;
}
}
---- ----- rcq = (cmd.recv_cq_handle == cmd.send_cq_handle) ?
---- ----- scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
---- ----- if (!rcq) {
---- ----- ret = -EINVAL;
---- ----- goto err_put;
++++ +++++
++++ +++++ if (cmd.recv_cq_handle != cmd.send_cq_handle) {
++++ +++++ rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
++++ +++++ if (!rcq) {
++++ +++++ ret = -EINVAL;
++++ +++++ goto err_put;
++++ +++++ }
}
}
++++ +++++
++++ +++++ scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
++++ +++++ rcq = rcq ?: scq;
++++ +++++ pd = idr_read_pd(cmd.pd_handle, file->ucontext);
++++ +++++ if (!pd || !scq) {
++++ +++++ ret = -EINVAL;
++++ +++++ goto err_put;
++++ +++++ }
++++ +++++
device = pd->device;
}
if (!obj)
return -ENOMEM;
---- ----- init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
++++ +++++ init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
down_write(&obj->uevent.uobject.mutex);
xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
if (!uobj)
return -ENOMEM;
---- ----- init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
++++ +++++ init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
down_write(&uobj->mutex);
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
if (!obj)
return -ENOMEM;
---- ----- init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_key);
++++ +++++ init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
down_write(&obj->uevent.uobject.mutex);
---- ----- pd = idr_read_pd(cmd->pd_handle, file->ucontext);
---- ----- if (!pd) {
---- ----- ret = -EINVAL;
---- ----- goto err;
---- ----- }
---- -----
if (cmd->srq_type == IB_SRQT_XRC) {
---- ----- attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
---- ----- if (!attr.ext.xrc.cq) {
---- ----- ret = -EINVAL;
---- ----- goto err_put_pd;
---- ----- }
---- -----
attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
if (!attr.ext.xrc.xrcd) {
ret = -EINVAL;
---- ----- goto err_put_cq;
++++ +++++ goto err;
}
obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
atomic_inc(&obj->uxrcd->refcnt);
++++ +++++
++++ +++++ attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
++++ +++++ if (!attr.ext.xrc.cq) {
++++ +++++ ret = -EINVAL;
++++ +++++ goto err_put_xrcd;
++++ +++++ }
++++ +++++ }
++++ +++++
++++ +++++ pd = idr_read_pd(cmd->pd_handle, file->ucontext);
++++ +++++ if (!pd) {
++++ +++++ ret = -EINVAL;
++++ +++++ goto err_put_cq;
}
attr.event_handler = ib_uverbs_srq_event_handler;
ib_destroy_srq(srq);
err_put:
---- ----- if (cmd->srq_type == IB_SRQT_XRC) {
---- ----- atomic_dec(&obj->uxrcd->refcnt);
---- ----- put_uobj_read(xrcd_uobj);
---- ----- }
++++ +++++ put_pd_read(pd);
err_put_cq:
if (cmd->srq_type == IB_SRQT_XRC)
put_cq_read(attr.ext.xrc.cq);
---- -----err_put_pd:
---- ----- put_pd_read(pd);
++++ +++++err_put_xrcd:
++++ +++++ if (cmd->srq_type == IB_SRQT_XRC) {
++++ +++++ atomic_dec(&obj->uxrcd->refcnt);
++++ +++++ put_uobj_read(xrcd_uobj);
++++ +++++ }
err:
put_uobj_write(&obj->uevent.uobject);
MLX4_IB_CACHE_LINE_SIZE = 64,
};
+++++++++ enum {
+++++++++ MLX4_RAW_QP_MTU = 7,
+++++++++ MLX4_RAW_QP_MSGMAX = 31,
+++++++++ };
+++++++++
static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND),
[IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO),
event.event = IB_EVENT_QP_ACCESS_ERR;
break;
default:
----- ---- printk(KERN_WARNING "mlx4_ib: Unexpected event type %d "
+++++ ++++ pr_warn("Unexpected event type %d "
"on QP %06x\n", type, qp->qpn);
return;
}
if (sqpn) {
qpn = sqpn;
} else {
--------- err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
+++++++++ /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE
+++++++++ * BlueFlame setup flow wrongly causes VLAN insertion. */
+++++++++ if (init_attr->qp_type == IB_QPT_RAW_PACKET)
+++++++++ err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn);
+++++++++ else
+++++++++ err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn);
if (err)
goto err_wrid;
}
if (qp->state != IB_QPS_RESET)
if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
----- ---- printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n",
+++++ ++++ pr_warn("modify QP %06x to RESET failed.\n",
qp->mqp.qpn);
get_cqs(qp, &send_cq, &recv_cq);
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
+++++++++ case IB_QPT_RAW_PACKET:
{
qp = kzalloc(sizeof *qp, GFP_KERNEL);
if (!qp)
case IB_QPT_XRC_INI:
case IB_QPT_XRC_TGT: return MLX4_QP_ST_XRC;
case IB_QPT_SMI:
--------- case IB_QPT_GSI: return MLX4_QP_ST_MLX;
+++++++++ case IB_QPT_GSI:
+++++++++ case IB_QPT_RAW_PACKET: return MLX4_QP_ST_MLX;
default: return -1;
}
}
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
----- ---- printk(KERN_ERR "sgid_index (%u) too large. max is %d\n",
+++++ ++++ pr_err("sgid_index (%u) too large. max is %d\n",
ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
return -1;
}
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
+++++++++ else if (ibqp->qp_type == IB_QPT_RAW_PACKET)
+++++++++ context->mtu_msgmax = (MLX4_RAW_QP_MTU << 5) | MLX4_RAW_QP_MSGMAX;
else if (ibqp->qp_type == IB_QPT_UD) {
if (qp->flags & MLX4_IB_QP_LSO)
context->mtu_msgmax = (IB_MTU_4096 << 5) |
context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
----- ---- printk(KERN_ERR "path MTU (%u) is invalid\n",
+++++ ++++ pr_err("path MTU (%u) is invalid\n",
attr->path_mtu);
goto out;
}
if (cur_state == IB_QPS_INIT &&
new_state == IB_QPS_RTR &&
(ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI ||
--------- ibqp->qp_type == IB_QPT_UD)) {
+++++++++ ibqp->qp_type == IB_QPT_UD ||
+++++++++ ibqp->qp_type == IB_QPT_RAW_PACKET)) {
context->pri_path.sched_queue = (qp->port - 1) << 6;
if (is_qp0(dev, qp))
context->pri_path.sched_queue |= MLX4_IB_DEFAULT_QP0_SCHED_QUEUE;
if (is_qp0(dev, qp)) {
if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
if (mlx4_INIT_PORT(dev->dev, qp->port))
----- ---- printk(KERN_WARNING "INIT_PORT failed for port %d\n",
+++++ ++++ pr_warn("INIT_PORT failed for port %d\n",
qp->port);
if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
goto out;
}
+++++++++ if ((attr_mask & IB_QP_PORT) && (ibqp->qp_type == IB_QPT_RAW_PACKET) &&
+++++++++ (rdma_port_get_link_layer(&dev->ib_dev, attr->port_num) !=
+++++++++ IB_LINK_LAYER_ETHERNET))
+++++++++ goto out;
+++++++++
if (attr_mask & IB_QP_PKEY_INDEX) {
int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
if (attr->pkey_index >= dev->dev->caps.pkey_table_len[p])
if (is_eth) {
u8 *smac;
+++++ ++++ u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
+++++ ++++
+++++ ++++ mlx->sched_prio = cpu_to_be16(pcp);
memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
/* FIXME: cache smac value? */
if (!is_vlan) {
sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
} else {
----- ---- u16 pcp;
----- ----
sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
----- ---- pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
}
} else {
header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
if (0) {
----- ---- printk(KERN_ERR "built UD header of size %d:\n", header_size);
+++++ ++++ pr_err("built UD header of size %d:\n", header_size);
for (i = 0; i < header_size / 4; ++i) {
if (i % 8 == 0)
----- ---- printk(" [%02x] ", i * 4);
----- ---- printk(" %08x",
----- ---- be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
+++++ ++++ pr_err(" [%02x] ", i * 4);
+++++ ++++ pr_cont(" %08x",
+++++ ++++ be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
if ((i + 1) % 8 == 0)
----- ---- printk("\n");
+++++ ++++ pr_cont("\n");
}
----- ---- printk("\n");
+++++ ++++ pr_err("\n");
}
/*