2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
39 #include <linux/sched.h>
41 #include <linux/uaccess.h>
43 #include <rdma/uverbs_types.h>
44 #include <rdma/uverbs_std_types.h>
45 #include "rdma_core.h"
48 #include "core_priv.h"
50 static struct ib_uverbs_completion_event_file *
51 ib_uverbs_lookup_comp_file(int fd, struct ib_ucontext *context)
53 struct ib_uobject *uobj = uobj_get_read(uobj_get_type(comp_channel),
55 struct ib_uobject_file *uobj_file;
60 uverbs_uobject_get(uobj);
63 uobj_file = container_of(uobj, struct ib_uobject_file, uobj);
64 return container_of(uobj_file, struct ib_uverbs_completion_event_file,
68 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
69 struct ib_device *ib_dev,
70 const char __user *buf,
71 int in_len, int out_len)
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
75 struct ib_udata udata;
76 struct ib_ucontext *ucontext;
78 struct ib_rdmacg_object cg_obj;
81 if (out_len < sizeof resp)
84 if (copy_from_user(&cmd, buf, sizeof cmd))
87 mutex_lock(&file->mutex);
94 INIT_UDATA(&udata, buf + sizeof(cmd),
95 (unsigned long) cmd.response + sizeof(resp),
96 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
97 out_len - sizeof(resp));
99 ret = ib_rdmacg_try_charge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
103 ucontext = ib_dev->alloc_ucontext(ib_dev, &udata);
104 if (IS_ERR(ucontext)) {
105 ret = PTR_ERR(ucontext);
109 ucontext->device = ib_dev;
110 ucontext->cg_obj = cg_obj;
111 /* ufile is required when some objects are released */
112 ucontext->ufile = file;
113 uverbs_initialize_ucontext(ucontext);
116 ucontext->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
118 ucontext->closing = 0;
120 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
121 ucontext->umem_tree = RB_ROOT_CACHED;
122 init_rwsem(&ucontext->umem_rwsem);
123 ucontext->odp_mrs_count = 0;
124 INIT_LIST_HEAD(&ucontext->no_private_counters);
126 if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_ON_DEMAND_PAGING))
127 ucontext->invalidate_range = NULL;
131 resp.num_comp_vectors = file->device->num_comp_vectors;
133 ret = get_unused_fd_flags(O_CLOEXEC);
138 filp = ib_uverbs_alloc_async_event_file(file, ib_dev);
144 if (copy_to_user((void __user *) (unsigned long) cmd.response,
145 &resp, sizeof resp)) {
150 file->ucontext = ucontext;
152 fd_install(resp.async_fd, filp);
154 mutex_unlock(&file->mutex);
159 ib_uverbs_free_async_event_file(file);
163 put_unused_fd(resp.async_fd);
166 put_pid(ucontext->tgid);
167 ib_dev->dealloc_ucontext(ucontext);
170 ib_rdmacg_uncharge(&cg_obj, ib_dev, RDMACG_RESOURCE_HCA_HANDLE);
173 mutex_unlock(&file->mutex);
177 static void copy_query_dev_fields(struct ib_uverbs_file *file,
178 struct ib_device *ib_dev,
179 struct ib_uverbs_query_device_resp *resp,
180 struct ib_device_attr *attr)
182 resp->fw_ver = attr->fw_ver;
183 resp->node_guid = ib_dev->node_guid;
184 resp->sys_image_guid = attr->sys_image_guid;
185 resp->max_mr_size = attr->max_mr_size;
186 resp->page_size_cap = attr->page_size_cap;
187 resp->vendor_id = attr->vendor_id;
188 resp->vendor_part_id = attr->vendor_part_id;
189 resp->hw_ver = attr->hw_ver;
190 resp->max_qp = attr->max_qp;
191 resp->max_qp_wr = attr->max_qp_wr;
192 resp->device_cap_flags = lower_32_bits(attr->device_cap_flags);
193 resp->max_sge = attr->max_sge;
194 resp->max_sge_rd = attr->max_sge_rd;
195 resp->max_cq = attr->max_cq;
196 resp->max_cqe = attr->max_cqe;
197 resp->max_mr = attr->max_mr;
198 resp->max_pd = attr->max_pd;
199 resp->max_qp_rd_atom = attr->max_qp_rd_atom;
200 resp->max_ee_rd_atom = attr->max_ee_rd_atom;
201 resp->max_res_rd_atom = attr->max_res_rd_atom;
202 resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom;
203 resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom;
204 resp->atomic_cap = attr->atomic_cap;
205 resp->max_ee = attr->max_ee;
206 resp->max_rdd = attr->max_rdd;
207 resp->max_mw = attr->max_mw;
208 resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp;
209 resp->max_raw_ethy_qp = attr->max_raw_ethy_qp;
210 resp->max_mcast_grp = attr->max_mcast_grp;
211 resp->max_mcast_qp_attach = attr->max_mcast_qp_attach;
212 resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
213 resp->max_ah = attr->max_ah;
214 resp->max_fmr = attr->max_fmr;
215 resp->max_map_per_fmr = attr->max_map_per_fmr;
216 resp->max_srq = attr->max_srq;
217 resp->max_srq_wr = attr->max_srq_wr;
218 resp->max_srq_sge = attr->max_srq_sge;
219 resp->max_pkeys = attr->max_pkeys;
220 resp->local_ca_ack_delay = attr->local_ca_ack_delay;
221 resp->phys_port_cnt = ib_dev->phys_port_cnt;
224 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
225 struct ib_device *ib_dev,
226 const char __user *buf,
227 int in_len, int out_len)
229 struct ib_uverbs_query_device cmd;
230 struct ib_uverbs_query_device_resp resp;
232 if (out_len < sizeof resp)
235 if (copy_from_user(&cmd, buf, sizeof cmd))
238 memset(&resp, 0, sizeof resp);
239 copy_query_dev_fields(file, ib_dev, &resp, &ib_dev->attrs);
241 if (copy_to_user((void __user *) (unsigned long) cmd.response,
248 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
249 struct ib_device *ib_dev,
250 const char __user *buf,
251 int in_len, int out_len)
253 struct ib_uverbs_query_port cmd;
254 struct ib_uverbs_query_port_resp resp;
255 struct ib_port_attr attr;
258 if (out_len < sizeof resp)
261 if (copy_from_user(&cmd, buf, sizeof cmd))
264 ret = ib_query_port(ib_dev, cmd.port_num, &attr);
268 memset(&resp, 0, sizeof resp);
270 resp.state = attr.state;
271 resp.max_mtu = attr.max_mtu;
272 resp.active_mtu = attr.active_mtu;
273 resp.gid_tbl_len = attr.gid_tbl_len;
274 resp.port_cap_flags = attr.port_cap_flags;
275 resp.max_msg_sz = attr.max_msg_sz;
276 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
277 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
278 resp.pkey_tbl_len = attr.pkey_tbl_len;
280 if (rdma_cap_opa_ah(ib_dev, cmd.port_num)) {
281 resp.lid = OPA_TO_IB_UCAST_LID(attr.lid);
282 resp.sm_lid = OPA_TO_IB_UCAST_LID(attr.sm_lid);
284 resp.lid = ib_lid_cpu16(attr.lid);
285 resp.sm_lid = ib_lid_cpu16(attr.sm_lid);
288 resp.max_vl_num = attr.max_vl_num;
289 resp.sm_sl = attr.sm_sl;
290 resp.subnet_timeout = attr.subnet_timeout;
291 resp.init_type_reply = attr.init_type_reply;
292 resp.active_width = attr.active_width;
293 resp.active_speed = attr.active_speed;
294 resp.phys_state = attr.phys_state;
295 resp.link_layer = rdma_port_get_link_layer(ib_dev,
298 if (copy_to_user((void __user *) (unsigned long) cmd.response,
305 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
306 struct ib_device *ib_dev,
307 const char __user *buf,
308 int in_len, int out_len)
310 struct ib_uverbs_alloc_pd cmd;
311 struct ib_uverbs_alloc_pd_resp resp;
312 struct ib_udata udata;
313 struct ib_uobject *uobj;
317 if (out_len < sizeof resp)
320 if (copy_from_user(&cmd, buf, sizeof cmd))
323 INIT_UDATA(&udata, buf + sizeof(cmd),
324 (unsigned long) cmd.response + sizeof(resp),
325 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
326 out_len - sizeof(resp));
328 uobj = uobj_alloc(uobj_get_type(pd), file->ucontext);
330 return PTR_ERR(uobj);
332 pd = ib_dev->alloc_pd(ib_dev, file->ucontext, &udata);
340 pd->__internal_mr = NULL;
341 atomic_set(&pd->usecnt, 0);
344 memset(&resp, 0, sizeof resp);
345 resp.pd_handle = uobj->id;
347 if (copy_to_user((void __user *) (unsigned long) cmd.response,
348 &resp, sizeof resp)) {
353 uobj_alloc_commit(uobj);
361 uobj_alloc_abort(uobj);
365 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
366 struct ib_device *ib_dev,
367 const char __user *buf,
368 int in_len, int out_len)
370 struct ib_uverbs_dealloc_pd cmd;
371 struct ib_uobject *uobj;
374 if (copy_from_user(&cmd, buf, sizeof cmd))
377 uobj = uobj_get_write(uobj_get_type(pd), cmd.pd_handle,
380 return PTR_ERR(uobj);
382 ret = uobj_remove_commit(uobj);
384 return ret ?: in_len;
387 struct xrcd_table_entry {
389 struct ib_xrcd *xrcd;
393 static int xrcd_table_insert(struct ib_uverbs_device *dev,
395 struct ib_xrcd *xrcd)
397 struct xrcd_table_entry *entry, *scan;
398 struct rb_node **p = &dev->xrcd_tree.rb_node;
399 struct rb_node *parent = NULL;
401 entry = kmalloc(sizeof *entry, GFP_KERNEL);
406 entry->inode = inode;
410 scan = rb_entry(parent, struct xrcd_table_entry, node);
412 if (inode < scan->inode) {
414 } else if (inode > scan->inode) {
422 rb_link_node(&entry->node, parent, p);
423 rb_insert_color(&entry->node, &dev->xrcd_tree);
428 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
431 struct xrcd_table_entry *entry;
432 struct rb_node *p = dev->xrcd_tree.rb_node;
435 entry = rb_entry(p, struct xrcd_table_entry, node);
437 if (inode < entry->inode)
439 else if (inode > entry->inode)
448 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
450 struct xrcd_table_entry *entry;
452 entry = xrcd_table_search(dev, inode);
459 static void xrcd_table_delete(struct ib_uverbs_device *dev,
462 struct xrcd_table_entry *entry;
464 entry = xrcd_table_search(dev, inode);
467 rb_erase(&entry->node, &dev->xrcd_tree);
472 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
473 struct ib_device *ib_dev,
474 const char __user *buf, int in_len,
477 struct ib_uverbs_open_xrcd cmd;
478 struct ib_uverbs_open_xrcd_resp resp;
479 struct ib_udata udata;
480 struct ib_uxrcd_object *obj;
481 struct ib_xrcd *xrcd = NULL;
482 struct fd f = {NULL, 0};
483 struct inode *inode = NULL;
487 if (out_len < sizeof resp)
490 if (copy_from_user(&cmd, buf, sizeof cmd))
493 INIT_UDATA(&udata, buf + sizeof(cmd),
494 (unsigned long) cmd.response + sizeof(resp),
495 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
496 out_len - sizeof(resp));
498 mutex_lock(&file->device->xrcd_tree_mutex);
501 /* search for file descriptor */
505 goto err_tree_mutex_unlock;
508 inode = file_inode(f.file);
509 xrcd = find_xrcd(file->device, inode);
510 if (!xrcd && !(cmd.oflags & O_CREAT)) {
511 /* no file descriptor. Need CREATE flag */
513 goto err_tree_mutex_unlock;
516 if (xrcd && cmd.oflags & O_EXCL) {
518 goto err_tree_mutex_unlock;
522 obj = (struct ib_uxrcd_object *)uobj_alloc(uobj_get_type(xrcd),
526 goto err_tree_mutex_unlock;
530 xrcd = ib_dev->alloc_xrcd(ib_dev, file->ucontext, &udata);
537 xrcd->device = ib_dev;
538 atomic_set(&xrcd->usecnt, 0);
539 mutex_init(&xrcd->tgt_qp_mutex);
540 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
544 atomic_set(&obj->refcnt, 0);
545 obj->uobject.object = xrcd;
546 memset(&resp, 0, sizeof resp);
547 resp.xrcd_handle = obj->uobject.id;
551 /* create new inode/xrcd table entry */
552 ret = xrcd_table_insert(file->device, inode, xrcd);
554 goto err_dealloc_xrcd;
556 atomic_inc(&xrcd->usecnt);
559 if (copy_to_user((void __user *) (unsigned long) cmd.response,
560 &resp, sizeof resp)) {
568 uobj_alloc_commit(&obj->uobject);
570 mutex_unlock(&file->device->xrcd_tree_mutex);
576 xrcd_table_delete(file->device, inode);
577 atomic_dec(&xrcd->usecnt);
581 ib_dealloc_xrcd(xrcd);
584 uobj_alloc_abort(&obj->uobject);
586 err_tree_mutex_unlock:
590 mutex_unlock(&file->device->xrcd_tree_mutex);
595 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
596 struct ib_device *ib_dev,
597 const char __user *buf, int in_len,
600 struct ib_uverbs_close_xrcd cmd;
601 struct ib_uobject *uobj;
604 if (copy_from_user(&cmd, buf, sizeof cmd))
607 uobj = uobj_get_write(uobj_get_type(xrcd), cmd.xrcd_handle,
610 mutex_unlock(&file->device->xrcd_tree_mutex);
611 return PTR_ERR(uobj);
614 ret = uobj_remove_commit(uobj);
615 return ret ?: in_len;
618 int ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
619 struct ib_xrcd *xrcd,
620 enum rdma_remove_reason why)
626 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
629 ret = ib_dealloc_xrcd(xrcd);
631 if (why == RDMA_REMOVE_DESTROY && ret)
632 atomic_inc(&xrcd->usecnt);
634 xrcd_table_delete(dev, inode);
639 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
640 struct ib_device *ib_dev,
641 const char __user *buf, int in_len,
644 struct ib_uverbs_reg_mr cmd;
645 struct ib_uverbs_reg_mr_resp resp;
646 struct ib_udata udata;
647 struct ib_uobject *uobj;
652 if (out_len < sizeof resp)
655 if (copy_from_user(&cmd, buf, sizeof cmd))
658 INIT_UDATA(&udata, buf + sizeof(cmd),
659 (unsigned long) cmd.response + sizeof(resp),
660 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
661 out_len - sizeof(resp));
663 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
666 ret = ib_check_mr_access(cmd.access_flags);
670 uobj = uobj_alloc(uobj_get_type(mr), file->ucontext);
672 return PTR_ERR(uobj);
674 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
680 if (cmd.access_flags & IB_ACCESS_ON_DEMAND) {
681 if (!(pd->device->attrs.device_cap_flags &
682 IB_DEVICE_ON_DEMAND_PAGING)) {
683 pr_debug("ODP support not available\n");
689 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
690 cmd.access_flags, &udata);
696 mr->device = pd->device;
699 atomic_inc(&pd->usecnt);
703 memset(&resp, 0, sizeof resp);
704 resp.lkey = mr->lkey;
705 resp.rkey = mr->rkey;
706 resp.mr_handle = uobj->id;
708 if (copy_to_user((void __user *) (unsigned long) cmd.response,
709 &resp, sizeof resp)) {
714 uobj_put_obj_read(pd);
716 uobj_alloc_commit(uobj);
724 uobj_put_obj_read(pd);
727 uobj_alloc_abort(uobj);
731 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
732 struct ib_device *ib_dev,
733 const char __user *buf, int in_len,
736 struct ib_uverbs_rereg_mr cmd;
737 struct ib_uverbs_rereg_mr_resp resp;
738 struct ib_udata udata;
739 struct ib_pd *pd = NULL;
741 struct ib_pd *old_pd;
743 struct ib_uobject *uobj;
745 if (out_len < sizeof(resp))
748 if (copy_from_user(&cmd, buf, sizeof(cmd)))
751 INIT_UDATA(&udata, buf + sizeof(cmd),
752 (unsigned long) cmd.response + sizeof(resp),
753 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
754 out_len - sizeof(resp));
756 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
759 if ((cmd.flags & IB_MR_REREG_TRANS) &&
760 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
761 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
764 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
767 return PTR_ERR(uobj);
771 if (cmd.flags & IB_MR_REREG_ACCESS) {
772 ret = ib_check_mr_access(cmd.access_flags);
777 if (cmd.flags & IB_MR_REREG_PD) {
778 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
786 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
787 cmd.length, cmd.hca_va,
788 cmd.access_flags, pd, &udata);
790 if (cmd.flags & IB_MR_REREG_PD) {
791 atomic_inc(&pd->usecnt);
793 atomic_dec(&old_pd->usecnt);
799 memset(&resp, 0, sizeof(resp));
800 resp.lkey = mr->lkey;
801 resp.rkey = mr->rkey;
803 if (copy_to_user((void __user *)(unsigned long)cmd.response,
804 &resp, sizeof(resp)))
810 if (cmd.flags & IB_MR_REREG_PD)
811 uobj_put_obj_read(pd);
814 uobj_put_write(uobj);
819 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
820 struct ib_device *ib_dev,
821 const char __user *buf, int in_len,
824 struct ib_uverbs_dereg_mr cmd;
825 struct ib_uobject *uobj;
828 if (copy_from_user(&cmd, buf, sizeof cmd))
831 uobj = uobj_get_write(uobj_get_type(mr), cmd.mr_handle,
834 return PTR_ERR(uobj);
836 ret = uobj_remove_commit(uobj);
838 return ret ?: in_len;
841 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
842 struct ib_device *ib_dev,
843 const char __user *buf, int in_len,
846 struct ib_uverbs_alloc_mw cmd;
847 struct ib_uverbs_alloc_mw_resp resp;
848 struct ib_uobject *uobj;
851 struct ib_udata udata;
854 if (out_len < sizeof(resp))
857 if (copy_from_user(&cmd, buf, sizeof(cmd)))
860 uobj = uobj_alloc(uobj_get_type(mw), file->ucontext);
862 return PTR_ERR(uobj);
864 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
870 INIT_UDATA(&udata, buf + sizeof(cmd),
871 (unsigned long)cmd.response + sizeof(resp),
872 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
873 out_len - sizeof(resp));
875 mw = pd->device->alloc_mw(pd, cmd.mw_type, &udata);
881 mw->device = pd->device;
884 atomic_inc(&pd->usecnt);
888 memset(&resp, 0, sizeof(resp));
889 resp.rkey = mw->rkey;
890 resp.mw_handle = uobj->id;
892 if (copy_to_user((void __user *)(unsigned long)cmd.response,
893 &resp, sizeof(resp))) {
898 uobj_put_obj_read(pd);
899 uobj_alloc_commit(uobj);
904 uverbs_dealloc_mw(mw);
906 uobj_put_obj_read(pd);
908 uobj_alloc_abort(uobj);
912 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
913 struct ib_device *ib_dev,
914 const char __user *buf, int in_len,
917 struct ib_uverbs_dealloc_mw cmd;
918 struct ib_uobject *uobj;
921 if (copy_from_user(&cmd, buf, sizeof(cmd)))
924 uobj = uobj_get_write(uobj_get_type(mw), cmd.mw_handle,
927 return PTR_ERR(uobj);
929 ret = uobj_remove_commit(uobj);
930 return ret ?: in_len;
933 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
934 struct ib_device *ib_dev,
935 const char __user *buf, int in_len,
938 struct ib_uverbs_create_comp_channel cmd;
939 struct ib_uverbs_create_comp_channel_resp resp;
940 struct ib_uobject *uobj;
941 struct ib_uverbs_completion_event_file *ev_file;
943 if (out_len < sizeof resp)
946 if (copy_from_user(&cmd, buf, sizeof cmd))
949 uobj = uobj_alloc(uobj_get_type(comp_channel), file->ucontext);
951 return PTR_ERR(uobj);
955 ev_file = container_of(uobj, struct ib_uverbs_completion_event_file,
957 ib_uverbs_init_event_queue(&ev_file->ev_queue);
959 if (copy_to_user((void __user *) (unsigned long) cmd.response,
960 &resp, sizeof resp)) {
961 uobj_alloc_abort(uobj);
965 uobj_alloc_commit(uobj);
969 static struct ib_ucq_object *create_cq(struct ib_uverbs_file *file,
970 struct ib_device *ib_dev,
971 struct ib_udata *ucore,
972 struct ib_udata *uhw,
973 struct ib_uverbs_ex_create_cq *cmd,
975 int (*cb)(struct ib_uverbs_file *file,
976 struct ib_ucq_object *obj,
977 struct ib_uverbs_ex_create_cq_resp *resp,
978 struct ib_udata *udata,
982 struct ib_ucq_object *obj;
983 struct ib_uverbs_completion_event_file *ev_file = NULL;
986 struct ib_uverbs_ex_create_cq_resp resp;
987 struct ib_cq_init_attr attr = {};
989 if (cmd->comp_vector >= file->device->num_comp_vectors)
990 return ERR_PTR(-EINVAL);
992 obj = (struct ib_ucq_object *)uobj_alloc(uobj_get_type(cq),
997 if (cmd->comp_channel >= 0) {
998 ev_file = ib_uverbs_lookup_comp_file(cmd->comp_channel,
1000 if (IS_ERR(ev_file)) {
1001 ret = PTR_ERR(ev_file);
1006 obj->uobject.user_handle = cmd->user_handle;
1007 obj->uverbs_file = file;
1008 obj->comp_events_reported = 0;
1009 obj->async_events_reported = 0;
1010 INIT_LIST_HEAD(&obj->comp_list);
1011 INIT_LIST_HEAD(&obj->async_list);
1013 attr.cqe = cmd->cqe;
1014 attr.comp_vector = cmd->comp_vector;
1016 if (cmd_sz > offsetof(typeof(*cmd), flags) + sizeof(cmd->flags))
1017 attr.flags = cmd->flags;
1019 cq = ib_dev->create_cq(ib_dev, &attr, file->ucontext, uhw);
1025 cq->device = ib_dev;
1026 cq->uobject = &obj->uobject;
1027 cq->comp_handler = ib_uverbs_comp_handler;
1028 cq->event_handler = ib_uverbs_cq_event_handler;
1029 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
1030 atomic_set(&cq->usecnt, 0);
1032 obj->uobject.object = cq;
1033 memset(&resp, 0, sizeof resp);
1034 resp.base.cq_handle = obj->uobject.id;
1035 resp.base.cqe = cq->cqe;
1037 resp.response_length = offsetof(typeof(resp), response_length) +
1038 sizeof(resp.response_length);
1040 ret = cb(file, obj, &resp, ucore, context);
1044 uobj_alloc_commit(&obj->uobject);
1053 ib_uverbs_release_ucq(file, ev_file, obj);
1056 uobj_alloc_abort(&obj->uobject);
1058 return ERR_PTR(ret);
1061 static int ib_uverbs_create_cq_cb(struct ib_uverbs_file *file,
1062 struct ib_ucq_object *obj,
1063 struct ib_uverbs_ex_create_cq_resp *resp,
1064 struct ib_udata *ucore, void *context)
1066 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1072 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1073 struct ib_device *ib_dev,
1074 const char __user *buf, int in_len,
1077 struct ib_uverbs_create_cq cmd;
1078 struct ib_uverbs_ex_create_cq cmd_ex;
1079 struct ib_uverbs_create_cq_resp resp;
1080 struct ib_udata ucore;
1081 struct ib_udata uhw;
1082 struct ib_ucq_object *obj;
1084 if (out_len < sizeof(resp))
1087 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1090 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd), sizeof(resp));
1092 INIT_UDATA(&uhw, buf + sizeof(cmd),
1093 (unsigned long)cmd.response + sizeof(resp),
1094 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1095 out_len - sizeof(resp));
1097 memset(&cmd_ex, 0, sizeof(cmd_ex));
1098 cmd_ex.user_handle = cmd.user_handle;
1099 cmd_ex.cqe = cmd.cqe;
1100 cmd_ex.comp_vector = cmd.comp_vector;
1101 cmd_ex.comp_channel = cmd.comp_channel;
1103 obj = create_cq(file, ib_dev, &ucore, &uhw, &cmd_ex,
1104 offsetof(typeof(cmd_ex), comp_channel) +
1105 sizeof(cmd.comp_channel), ib_uverbs_create_cq_cb,
1109 return PTR_ERR(obj);
1114 static int ib_uverbs_ex_create_cq_cb(struct ib_uverbs_file *file,
1115 struct ib_ucq_object *obj,
1116 struct ib_uverbs_ex_create_cq_resp *resp,
1117 struct ib_udata *ucore, void *context)
1119 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1125 int ib_uverbs_ex_create_cq(struct ib_uverbs_file *file,
1126 struct ib_device *ib_dev,
1127 struct ib_udata *ucore,
1128 struct ib_udata *uhw)
1130 struct ib_uverbs_ex_create_cq_resp resp;
1131 struct ib_uverbs_ex_create_cq cmd;
1132 struct ib_ucq_object *obj;
1135 if (ucore->inlen < sizeof(cmd))
1138 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
1148 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1149 sizeof(resp.response_length)))
1152 obj = create_cq(file, ib_dev, ucore, uhw, &cmd,
1153 min(ucore->inlen, sizeof(cmd)),
1154 ib_uverbs_ex_create_cq_cb, NULL);
1157 return PTR_ERR(obj);
1162 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1163 struct ib_device *ib_dev,
1164 const char __user *buf, int in_len,
1167 struct ib_uverbs_resize_cq cmd;
1168 struct ib_uverbs_resize_cq_resp resp = {};
1169 struct ib_udata udata;
1173 if (copy_from_user(&cmd, buf, sizeof cmd))
1176 INIT_UDATA(&udata, buf + sizeof(cmd),
1177 (unsigned long) cmd.response + sizeof(resp),
1178 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1179 out_len - sizeof(resp));
1181 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1185 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1191 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1192 &resp, sizeof resp.cqe))
1196 uobj_put_obj_read(cq);
1198 return ret ? ret : in_len;
1201 static int copy_wc_to_user(struct ib_device *ib_dev, void __user *dest,
1204 struct ib_uverbs_wc tmp;
1206 tmp.wr_id = wc->wr_id;
1207 tmp.status = wc->status;
1208 tmp.opcode = wc->opcode;
1209 tmp.vendor_err = wc->vendor_err;
1210 tmp.byte_len = wc->byte_len;
1211 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1212 tmp.qp_num = wc->qp->qp_num;
1213 tmp.src_qp = wc->src_qp;
1214 tmp.wc_flags = wc->wc_flags;
1215 tmp.pkey_index = wc->pkey_index;
1216 if (rdma_cap_opa_ah(ib_dev, wc->port_num))
1217 tmp.slid = OPA_TO_IB_UCAST_LID(wc->slid);
1219 tmp.slid = ib_lid_cpu16(wc->slid);
1221 tmp.dlid_path_bits = wc->dlid_path_bits;
1222 tmp.port_num = wc->port_num;
1225 if (copy_to_user(dest, &tmp, sizeof tmp))
1231 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1232 struct ib_device *ib_dev,
1233 const char __user *buf, int in_len,
1236 struct ib_uverbs_poll_cq cmd;
1237 struct ib_uverbs_poll_cq_resp resp;
1238 u8 __user *header_ptr;
1239 u8 __user *data_ptr;
1244 if (copy_from_user(&cmd, buf, sizeof cmd))
1247 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1251 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1252 header_ptr = (void __user *)(unsigned long) cmd.response;
1253 data_ptr = header_ptr + sizeof resp;
1255 memset(&resp, 0, sizeof resp);
1256 while (resp.count < cmd.ne) {
1257 ret = ib_poll_cq(cq, 1, &wc);
1263 ret = copy_wc_to_user(ib_dev, data_ptr, &wc);
1267 data_ptr += sizeof(struct ib_uverbs_wc);
1271 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1279 uobj_put_obj_read(cq);
1283 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1284 struct ib_device *ib_dev,
1285 const char __user *buf, int in_len,
1288 struct ib_uverbs_req_notify_cq cmd;
1291 if (copy_from_user(&cmd, buf, sizeof cmd))
1294 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
1298 ib_req_notify_cq(cq, cmd.solicited_only ?
1299 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1301 uobj_put_obj_read(cq);
1306 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1307 struct ib_device *ib_dev,
1308 const char __user *buf, int in_len,
1311 struct ib_uverbs_destroy_cq cmd;
1312 struct ib_uverbs_destroy_cq_resp resp;
1313 struct ib_uobject *uobj;
1315 struct ib_ucq_object *obj;
1318 if (copy_from_user(&cmd, buf, sizeof cmd))
1321 uobj = uobj_get_write(uobj_get_type(cq), cmd.cq_handle,
1324 return PTR_ERR(uobj);
1327 * Make sure we don't free the memory in remove_commit as we still
1328 * needs the uobject memory to create the response.
1330 uverbs_uobject_get(uobj);
1332 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1334 memset(&resp, 0, sizeof(resp));
1336 ret = uobj_remove_commit(uobj);
1338 uverbs_uobject_put(uobj);
1342 resp.comp_events_reported = obj->comp_events_reported;
1343 resp.async_events_reported = obj->async_events_reported;
1345 uverbs_uobject_put(uobj);
1346 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1347 &resp, sizeof resp))
1353 static int create_qp(struct ib_uverbs_file *file,
1354 struct ib_udata *ucore,
1355 struct ib_udata *uhw,
1356 struct ib_uverbs_ex_create_qp *cmd,
1358 int (*cb)(struct ib_uverbs_file *file,
1359 struct ib_uverbs_ex_create_qp_resp *resp,
1360 struct ib_udata *udata),
1363 struct ib_uqp_object *obj;
1364 struct ib_device *device;
1365 struct ib_pd *pd = NULL;
1366 struct ib_xrcd *xrcd = NULL;
1367 struct ib_uobject *xrcd_uobj = ERR_PTR(-ENOENT);
1368 struct ib_cq *scq = NULL, *rcq = NULL;
1369 struct ib_srq *srq = NULL;
1372 struct ib_qp_init_attr attr = {};
1373 struct ib_uverbs_ex_create_qp_resp resp;
1375 struct ib_rwq_ind_table *ind_tbl = NULL;
1378 if (cmd->qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1381 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1384 return PTR_ERR(obj);
1386 obj->uevent.uobject.user_handle = cmd->user_handle;
1387 mutex_init(&obj->mcast_lock);
1389 if (cmd_sz >= offsetof(typeof(*cmd), rwq_ind_tbl_handle) +
1390 sizeof(cmd->rwq_ind_tbl_handle) &&
1391 (cmd->comp_mask & IB_UVERBS_CREATE_QP_MASK_IND_TABLE)) {
1392 ind_tbl = uobj_get_obj_read(rwq_ind_table,
1393 cmd->rwq_ind_tbl_handle,
1400 attr.rwq_ind_tbl = ind_tbl;
1403 if (cmd_sz > sizeof(*cmd) &&
1404 !ib_is_udata_cleared(ucore, sizeof(*cmd),
1405 cmd_sz - sizeof(*cmd))) {
1410 if (ind_tbl && (cmd->max_recv_wr || cmd->max_recv_sge || cmd->is_srq)) {
1415 if (ind_tbl && !cmd->max_send_wr)
1418 if (cmd->qp_type == IB_QPT_XRC_TGT) {
1419 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->pd_handle,
1422 if (IS_ERR(xrcd_uobj)) {
1427 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1432 device = xrcd->device;
1434 if (cmd->qp_type == IB_QPT_XRC_INI) {
1435 cmd->max_recv_wr = 0;
1436 cmd->max_recv_sge = 0;
1439 srq = uobj_get_obj_read(srq, cmd->srq_handle,
1441 if (!srq || srq->srq_type == IB_SRQT_XRC) {
1448 if (cmd->recv_cq_handle != cmd->send_cq_handle) {
1449 rcq = uobj_get_obj_read(cq, cmd->recv_cq_handle,
1460 scq = uobj_get_obj_read(cq, cmd->send_cq_handle,
1464 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
1465 if (!pd || (!scq && has_sq)) {
1470 device = pd->device;
1473 attr.event_handler = ib_uverbs_qp_event_handler;
1474 attr.qp_context = file;
1479 attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR :
1481 attr.qp_type = cmd->qp_type;
1482 attr.create_flags = 0;
1484 attr.cap.max_send_wr = cmd->max_send_wr;
1485 attr.cap.max_recv_wr = cmd->max_recv_wr;
1486 attr.cap.max_send_sge = cmd->max_send_sge;
1487 attr.cap.max_recv_sge = cmd->max_recv_sge;
1488 attr.cap.max_inline_data = cmd->max_inline_data;
1490 obj->uevent.events_reported = 0;
1491 INIT_LIST_HEAD(&obj->uevent.event_list);
1492 INIT_LIST_HEAD(&obj->mcast_list);
1494 if (cmd_sz >= offsetof(typeof(*cmd), create_flags) +
1495 sizeof(cmd->create_flags))
1496 attr.create_flags = cmd->create_flags;
1498 if (attr.create_flags & ~(IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
1499 IB_QP_CREATE_CROSS_CHANNEL |
1500 IB_QP_CREATE_MANAGED_SEND |
1501 IB_QP_CREATE_MANAGED_RECV |
1502 IB_QP_CREATE_SCATTER_FCS |
1503 IB_QP_CREATE_CVLAN_STRIPPING |
1504 IB_QP_CREATE_SOURCE_QPN)) {
1509 if (attr.create_flags & IB_QP_CREATE_SOURCE_QPN) {
1510 if (!capable(CAP_NET_RAW)) {
1515 attr.source_qpn = cmd->source_qpn;
1518 buf = (void *)cmd + sizeof(*cmd);
1519 if (cmd_sz > sizeof(*cmd))
1520 if (!(buf[0] == 0 && !memcmp(buf, buf + 1,
1521 cmd_sz - sizeof(*cmd) - 1))) {
1526 if (cmd->qp_type == IB_QPT_XRC_TGT)
1527 qp = ib_create_qp(pd, &attr);
1529 qp = device->create_qp(pd, &attr, uhw);
1536 if (cmd->qp_type != IB_QPT_XRC_TGT) {
1537 ret = ib_create_qp_security(qp, device);
1542 qp->device = device;
1544 qp->send_cq = attr.send_cq;
1545 qp->recv_cq = attr.recv_cq;
1547 qp->rwq_ind_tbl = ind_tbl;
1548 qp->event_handler = attr.event_handler;
1549 qp->qp_context = attr.qp_context;
1550 qp->qp_type = attr.qp_type;
1551 atomic_set(&qp->usecnt, 0);
1552 atomic_inc(&pd->usecnt);
1555 atomic_inc(&attr.send_cq->usecnt);
1557 atomic_inc(&attr.recv_cq->usecnt);
1559 atomic_inc(&attr.srq->usecnt);
1561 atomic_inc(&ind_tbl->usecnt);
1563 qp->uobject = &obj->uevent.uobject;
1565 obj->uevent.uobject.object = qp;
1567 memset(&resp, 0, sizeof resp);
1568 resp.base.qpn = qp->qp_num;
1569 resp.base.qp_handle = obj->uevent.uobject.id;
1570 resp.base.max_recv_sge = attr.cap.max_recv_sge;
1571 resp.base.max_send_sge = attr.cap.max_send_sge;
1572 resp.base.max_recv_wr = attr.cap.max_recv_wr;
1573 resp.base.max_send_wr = attr.cap.max_send_wr;
1574 resp.base.max_inline_data = attr.cap.max_inline_data;
1576 resp.response_length = offsetof(typeof(resp), response_length) +
1577 sizeof(resp.response_length);
1579 ret = cb(file, &resp, ucore);
1584 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1586 atomic_inc(&obj->uxrcd->refcnt);
1587 uobj_put_read(xrcd_uobj);
1591 uobj_put_obj_read(pd);
1593 uobj_put_obj_read(scq);
1594 if (rcq && rcq != scq)
1595 uobj_put_obj_read(rcq);
1597 uobj_put_obj_read(srq);
1599 uobj_put_obj_read(ind_tbl);
1601 uobj_alloc_commit(&obj->uevent.uobject);
1608 if (!IS_ERR(xrcd_uobj))
1609 uobj_put_read(xrcd_uobj);
1611 uobj_put_obj_read(pd);
1613 uobj_put_obj_read(scq);
1614 if (rcq && rcq != scq)
1615 uobj_put_obj_read(rcq);
1617 uobj_put_obj_read(srq);
1619 uobj_put_obj_read(ind_tbl);
1621 uobj_alloc_abort(&obj->uevent.uobject);
1625 static int ib_uverbs_create_qp_cb(struct ib_uverbs_file *file,
1626 struct ib_uverbs_ex_create_qp_resp *resp,
1627 struct ib_udata *ucore)
1629 if (ib_copy_to_udata(ucore, &resp->base, sizeof(resp->base)))
1635 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1636 struct ib_device *ib_dev,
1637 const char __user *buf, int in_len,
1640 struct ib_uverbs_create_qp cmd;
1641 struct ib_uverbs_ex_create_qp cmd_ex;
1642 struct ib_udata ucore;
1643 struct ib_udata uhw;
1644 ssize_t resp_size = sizeof(struct ib_uverbs_create_qp_resp);
1647 if (out_len < resp_size)
1650 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1653 INIT_UDATA(&ucore, buf, (unsigned long)cmd.response, sizeof(cmd),
1655 INIT_UDATA(&uhw, buf + sizeof(cmd),
1656 (unsigned long)cmd.response + resp_size,
1657 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1658 out_len - resp_size);
1660 memset(&cmd_ex, 0, sizeof(cmd_ex));
1661 cmd_ex.user_handle = cmd.user_handle;
1662 cmd_ex.pd_handle = cmd.pd_handle;
1663 cmd_ex.send_cq_handle = cmd.send_cq_handle;
1664 cmd_ex.recv_cq_handle = cmd.recv_cq_handle;
1665 cmd_ex.srq_handle = cmd.srq_handle;
1666 cmd_ex.max_send_wr = cmd.max_send_wr;
1667 cmd_ex.max_recv_wr = cmd.max_recv_wr;
1668 cmd_ex.max_send_sge = cmd.max_send_sge;
1669 cmd_ex.max_recv_sge = cmd.max_recv_sge;
1670 cmd_ex.max_inline_data = cmd.max_inline_data;
1671 cmd_ex.sq_sig_all = cmd.sq_sig_all;
1672 cmd_ex.qp_type = cmd.qp_type;
1673 cmd_ex.is_srq = cmd.is_srq;
1675 err = create_qp(file, &ucore, &uhw, &cmd_ex,
1676 offsetof(typeof(cmd_ex), is_srq) +
1677 sizeof(cmd.is_srq), ib_uverbs_create_qp_cb,
1686 static int ib_uverbs_ex_create_qp_cb(struct ib_uverbs_file *file,
1687 struct ib_uverbs_ex_create_qp_resp *resp,
1688 struct ib_udata *ucore)
1690 if (ib_copy_to_udata(ucore, resp, resp->response_length))
1696 int ib_uverbs_ex_create_qp(struct ib_uverbs_file *file,
1697 struct ib_device *ib_dev,
1698 struct ib_udata *ucore,
1699 struct ib_udata *uhw)
1701 struct ib_uverbs_ex_create_qp_resp resp;
1702 struct ib_uverbs_ex_create_qp cmd = {0};
1705 if (ucore->inlen < (offsetof(typeof(cmd), comp_mask) +
1706 sizeof(cmd.comp_mask)))
1709 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
1713 if (cmd.comp_mask & ~IB_UVERBS_CREATE_QP_SUP_COMP_MASK)
1719 if (ucore->outlen < (offsetof(typeof(resp), response_length) +
1720 sizeof(resp.response_length)))
1723 err = create_qp(file, ucore, uhw, &cmd,
1724 min(ucore->inlen, sizeof(cmd)),
1725 ib_uverbs_ex_create_qp_cb, NULL);
1733 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1734 struct ib_device *ib_dev,
1735 const char __user *buf, int in_len, int out_len)
1737 struct ib_uverbs_open_qp cmd;
1738 struct ib_uverbs_create_qp_resp resp;
1739 struct ib_udata udata;
1740 struct ib_uqp_object *obj;
1741 struct ib_xrcd *xrcd;
1742 struct ib_uobject *uninitialized_var(xrcd_uobj);
1744 struct ib_qp_open_attr attr;
1747 if (out_len < sizeof resp)
1750 if (copy_from_user(&cmd, buf, sizeof cmd))
1753 INIT_UDATA(&udata, buf + sizeof(cmd),
1754 (unsigned long) cmd.response + sizeof(resp),
1755 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
1756 out_len - sizeof(resp));
1758 obj = (struct ib_uqp_object *)uobj_alloc(uobj_get_type(qp),
1761 return PTR_ERR(obj);
1763 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd.pd_handle,
1765 if (IS_ERR(xrcd_uobj)) {
1770 xrcd = (struct ib_xrcd *)xrcd_uobj->object;
1776 attr.event_handler = ib_uverbs_qp_event_handler;
1777 attr.qp_context = file;
1778 attr.qp_num = cmd.qpn;
1779 attr.qp_type = cmd.qp_type;
1781 obj->uevent.events_reported = 0;
1782 INIT_LIST_HEAD(&obj->uevent.event_list);
1783 INIT_LIST_HEAD(&obj->mcast_list);
1785 qp = ib_open_qp(xrcd, &attr);
1791 obj->uevent.uobject.object = qp;
1792 obj->uevent.uobject.user_handle = cmd.user_handle;
1794 memset(&resp, 0, sizeof resp);
1795 resp.qpn = qp->qp_num;
1796 resp.qp_handle = obj->uevent.uobject.id;
1798 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1799 &resp, sizeof resp)) {
1804 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1805 atomic_inc(&obj->uxrcd->refcnt);
1806 qp->uobject = &obj->uevent.uobject;
1807 uobj_put_read(xrcd_uobj);
1810 uobj_alloc_commit(&obj->uevent.uobject);
1817 uobj_put_read(xrcd_uobj);
1819 uobj_alloc_abort(&obj->uevent.uobject);
1823 static void copy_ah_attr_to_uverbs(struct ib_uverbs_qp_dest *uverb_attr,
1824 struct rdma_ah_attr *rdma_attr)
1826 const struct ib_global_route *grh;
1828 uverb_attr->dlid = rdma_ah_get_dlid(rdma_attr);
1829 uverb_attr->sl = rdma_ah_get_sl(rdma_attr);
1830 uverb_attr->src_path_bits = rdma_ah_get_path_bits(rdma_attr);
1831 uverb_attr->static_rate = rdma_ah_get_static_rate(rdma_attr);
1832 uverb_attr->is_global = !!(rdma_ah_get_ah_flags(rdma_attr) &
1834 if (uverb_attr->is_global) {
1835 grh = rdma_ah_read_grh(rdma_attr);
1836 memcpy(uverb_attr->dgid, grh->dgid.raw, 16);
1837 uverb_attr->flow_label = grh->flow_label;
1838 uverb_attr->sgid_index = grh->sgid_index;
1839 uverb_attr->hop_limit = grh->hop_limit;
1840 uverb_attr->traffic_class = grh->traffic_class;
1842 uverb_attr->port_num = rdma_ah_get_port_num(rdma_attr);
1845 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1846 struct ib_device *ib_dev,
1847 const char __user *buf, int in_len,
1850 struct ib_uverbs_query_qp cmd;
1851 struct ib_uverbs_query_qp_resp resp;
1853 struct ib_qp_attr *attr;
1854 struct ib_qp_init_attr *init_attr;
1857 if (copy_from_user(&cmd, buf, sizeof cmd))
1860 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1861 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1862 if (!attr || !init_attr) {
1867 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
1873 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1875 uobj_put_obj_read(qp);
1880 memset(&resp, 0, sizeof resp);
1882 resp.qp_state = attr->qp_state;
1883 resp.cur_qp_state = attr->cur_qp_state;
1884 resp.path_mtu = attr->path_mtu;
1885 resp.path_mig_state = attr->path_mig_state;
1886 resp.qkey = attr->qkey;
1887 resp.rq_psn = attr->rq_psn;
1888 resp.sq_psn = attr->sq_psn;
1889 resp.dest_qp_num = attr->dest_qp_num;
1890 resp.qp_access_flags = attr->qp_access_flags;
1891 resp.pkey_index = attr->pkey_index;
1892 resp.alt_pkey_index = attr->alt_pkey_index;
1893 resp.sq_draining = attr->sq_draining;
1894 resp.max_rd_atomic = attr->max_rd_atomic;
1895 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1896 resp.min_rnr_timer = attr->min_rnr_timer;
1897 resp.port_num = attr->port_num;
1898 resp.timeout = attr->timeout;
1899 resp.retry_cnt = attr->retry_cnt;
1900 resp.rnr_retry = attr->rnr_retry;
1901 resp.alt_port_num = attr->alt_port_num;
1902 resp.alt_timeout = attr->alt_timeout;
1904 copy_ah_attr_to_uverbs(&resp.dest, &attr->ah_attr);
1905 copy_ah_attr_to_uverbs(&resp.alt_dest, &attr->alt_ah_attr);
1907 resp.max_send_wr = init_attr->cap.max_send_wr;
1908 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1909 resp.max_send_sge = init_attr->cap.max_send_sge;
1910 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1911 resp.max_inline_data = init_attr->cap.max_inline_data;
1912 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1914 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1915 &resp, sizeof resp))
1922 return ret ? ret : in_len;
1925 /* Remove ignored fields set in the attribute mask */
1926 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1929 case IB_QPT_XRC_INI:
1930 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1931 case IB_QPT_XRC_TGT:
1932 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1939 static void copy_ah_attr_from_uverbs(struct ib_device *dev,
1940 struct rdma_ah_attr *rdma_attr,
1941 struct ib_uverbs_qp_dest *uverb_attr)
1943 rdma_attr->type = rdma_ah_find_type(dev, uverb_attr->port_num);
1944 if (uverb_attr->is_global) {
1945 rdma_ah_set_grh(rdma_attr, NULL,
1946 uverb_attr->flow_label,
1947 uverb_attr->sgid_index,
1948 uverb_attr->hop_limit,
1949 uverb_attr->traffic_class);
1950 rdma_ah_set_dgid_raw(rdma_attr, uverb_attr->dgid);
1952 rdma_ah_set_ah_flags(rdma_attr, 0);
1954 rdma_ah_set_dlid(rdma_attr, uverb_attr->dlid);
1955 rdma_ah_set_sl(rdma_attr, uverb_attr->sl);
1956 rdma_ah_set_path_bits(rdma_attr, uverb_attr->src_path_bits);
1957 rdma_ah_set_static_rate(rdma_attr, uverb_attr->static_rate);
1958 rdma_ah_set_port_num(rdma_attr, uverb_attr->port_num);
1959 rdma_ah_set_make_grd(rdma_attr, false);
1962 static int modify_qp(struct ib_uverbs_file *file,
1963 struct ib_uverbs_ex_modify_qp *cmd, struct ib_udata *udata)
1965 struct ib_qp_attr *attr;
1969 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1973 qp = uobj_get_obj_read(qp, cmd->base.qp_handle, file->ucontext);
1979 if ((cmd->base.attr_mask & IB_QP_PORT) &&
1980 !rdma_is_port_valid(qp->device, cmd->base.port_num)) {
1985 attr->qp_state = cmd->base.qp_state;
1986 attr->cur_qp_state = cmd->base.cur_qp_state;
1987 attr->path_mtu = cmd->base.path_mtu;
1988 attr->path_mig_state = cmd->base.path_mig_state;
1989 attr->qkey = cmd->base.qkey;
1990 attr->rq_psn = cmd->base.rq_psn;
1991 attr->sq_psn = cmd->base.sq_psn;
1992 attr->dest_qp_num = cmd->base.dest_qp_num;
1993 attr->qp_access_flags = cmd->base.qp_access_flags;
1994 attr->pkey_index = cmd->base.pkey_index;
1995 attr->alt_pkey_index = cmd->base.alt_pkey_index;
1996 attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
1997 attr->max_rd_atomic = cmd->base.max_rd_atomic;
1998 attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
1999 attr->min_rnr_timer = cmd->base.min_rnr_timer;
2000 attr->port_num = cmd->base.port_num;
2001 attr->timeout = cmd->base.timeout;
2002 attr->retry_cnt = cmd->base.retry_cnt;
2003 attr->rnr_retry = cmd->base.rnr_retry;
2004 attr->alt_port_num = cmd->base.alt_port_num;
2005 attr->alt_timeout = cmd->base.alt_timeout;
2006 attr->rate_limit = cmd->rate_limit;
2008 if (cmd->base.attr_mask & IB_QP_AV)
2009 copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
2012 if (cmd->base.attr_mask & IB_QP_ALT_PATH)
2013 copy_ah_attr_from_uverbs(qp->device, &attr->alt_ah_attr,
2014 &cmd->base.alt_dest);
2016 ret = ib_modify_qp_with_udata(qp, attr,
2017 modify_qp_mask(qp->qp_type,
2018 cmd->base.attr_mask),
2022 uobj_put_obj_read(qp);
2029 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
2030 struct ib_device *ib_dev,
2031 const char __user *buf, int in_len,
2034 struct ib_uverbs_ex_modify_qp cmd = {};
2035 struct ib_udata udata;
2038 if (copy_from_user(&cmd.base, buf, sizeof(cmd.base)))
2041 if (cmd.base.attr_mask &
2042 ~((IB_USER_LEGACY_LAST_QP_ATTR_MASK << 1) - 1))
2045 INIT_UDATA(&udata, buf + sizeof(cmd.base), NULL,
2046 in_len - sizeof(cmd.base) - sizeof(struct ib_uverbs_cmd_hdr),
2049 ret = modify_qp(file, &cmd, &udata);
2056 int ib_uverbs_ex_modify_qp(struct ib_uverbs_file *file,
2057 struct ib_device *ib_dev,
2058 struct ib_udata *ucore,
2059 struct ib_udata *uhw)
2061 struct ib_uverbs_ex_modify_qp cmd = {};
2065 * Last bit is reserved for extending the attr_mask by
2066 * using another field.
2068 BUILD_BUG_ON(IB_USER_LAST_QP_ATTR_MASK == (1 << 31));
2070 if (ucore->inlen < sizeof(cmd.base))
2073 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2077 if (cmd.base.attr_mask &
2078 ~((IB_USER_LAST_QP_ATTR_MASK << 1) - 1))
2081 if (ucore->inlen > sizeof(cmd)) {
2082 if (ib_is_udata_cleared(ucore, sizeof(cmd),
2083 ucore->inlen - sizeof(cmd)))
2087 ret = modify_qp(file, &cmd, uhw);
2092 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2093 struct ib_device *ib_dev,
2094 const char __user *buf, int in_len,
2097 struct ib_uverbs_destroy_qp cmd;
2098 struct ib_uverbs_destroy_qp_resp resp;
2099 struct ib_uobject *uobj;
2100 struct ib_uqp_object *obj;
2103 if (copy_from_user(&cmd, buf, sizeof cmd))
2106 memset(&resp, 0, sizeof resp);
2108 uobj = uobj_get_write(uobj_get_type(qp), cmd.qp_handle,
2111 return PTR_ERR(uobj);
2113 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2115 * Make sure we don't free the memory in remove_commit as we still
2116 * needs the uobject memory to create the response.
2118 uverbs_uobject_get(uobj);
2120 ret = uobj_remove_commit(uobj);
2122 uverbs_uobject_put(uobj);
2126 resp.events_reported = obj->uevent.events_reported;
2127 uverbs_uobject_put(uobj);
2129 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2130 &resp, sizeof resp))
2136 static void *alloc_wr(size_t wr_size, __u32 num_sge)
2138 if (num_sge >= (U32_MAX - ALIGN(wr_size, sizeof (struct ib_sge))) /
2139 sizeof (struct ib_sge))
2142 return kmalloc(ALIGN(wr_size, sizeof (struct ib_sge)) +
2143 num_sge * sizeof (struct ib_sge), GFP_KERNEL);
2146 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2147 struct ib_device *ib_dev,
2148 const char __user *buf, int in_len,
2151 struct ib_uverbs_post_send cmd;
2152 struct ib_uverbs_post_send_resp resp;
2153 struct ib_uverbs_send_wr *user_wr;
2154 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2158 ssize_t ret = -EINVAL;
2161 if (copy_from_user(&cmd, buf, sizeof cmd))
2164 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2165 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2168 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2171 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2175 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2179 is_ud = qp->qp_type == IB_QPT_UD;
2182 for (i = 0; i < cmd.wr_count; ++i) {
2183 if (copy_from_user(user_wr,
2184 buf + sizeof cmd + i * cmd.wqe_size,
2190 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2196 struct ib_ud_wr *ud;
2198 if (user_wr->opcode != IB_WR_SEND &&
2199 user_wr->opcode != IB_WR_SEND_WITH_IMM) {
2204 next_size = sizeof(*ud);
2205 ud = alloc_wr(next_size, user_wr->num_sge);
2211 ud->ah = uobj_get_obj_read(ah, user_wr->wr.ud.ah,
2218 ud->remote_qpn = user_wr->wr.ud.remote_qpn;
2219 ud->remote_qkey = user_wr->wr.ud.remote_qkey;
2222 } else if (user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
2223 user_wr->opcode == IB_WR_RDMA_WRITE ||
2224 user_wr->opcode == IB_WR_RDMA_READ) {
2225 struct ib_rdma_wr *rdma;
2227 next_size = sizeof(*rdma);
2228 rdma = alloc_wr(next_size, user_wr->num_sge);
2234 rdma->remote_addr = user_wr->wr.rdma.remote_addr;
2235 rdma->rkey = user_wr->wr.rdma.rkey;
2238 } else if (user_wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
2239 user_wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2240 struct ib_atomic_wr *atomic;
2242 next_size = sizeof(*atomic);
2243 atomic = alloc_wr(next_size, user_wr->num_sge);
2249 atomic->remote_addr = user_wr->wr.atomic.remote_addr;
2250 atomic->compare_add = user_wr->wr.atomic.compare_add;
2251 atomic->swap = user_wr->wr.atomic.swap;
2252 atomic->rkey = user_wr->wr.atomic.rkey;
2255 } else if (user_wr->opcode == IB_WR_SEND ||
2256 user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2257 user_wr->opcode == IB_WR_SEND_WITH_INV) {
2258 next_size = sizeof(*next);
2259 next = alloc_wr(next_size, user_wr->num_sge);
2269 if (user_wr->opcode == IB_WR_SEND_WITH_IMM ||
2270 user_wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) {
2272 (__be32 __force) user_wr->ex.imm_data;
2273 } else if (user_wr->opcode == IB_WR_SEND_WITH_INV) {
2274 next->ex.invalidate_rkey = user_wr->ex.invalidate_rkey;
2284 next->wr_id = user_wr->wr_id;
2285 next->num_sge = user_wr->num_sge;
2286 next->opcode = user_wr->opcode;
2287 next->send_flags = user_wr->send_flags;
2289 if (next->num_sge) {
2290 next->sg_list = (void *) next +
2291 ALIGN(next_size, sizeof(struct ib_sge));
2292 if (copy_from_user(next->sg_list,
2294 cmd.wr_count * cmd.wqe_size +
2295 sg_ind * sizeof (struct ib_sge),
2296 next->num_sge * sizeof (struct ib_sge))) {
2300 sg_ind += next->num_sge;
2302 next->sg_list = NULL;
2306 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2308 for (next = wr; next; next = next->next) {
2314 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2315 &resp, sizeof resp))
2319 uobj_put_obj_read(qp);
2322 if (is_ud && ud_wr(wr)->ah)
2323 uobj_put_obj_read(ud_wr(wr)->ah);
2332 return ret ? ret : in_len;
2335 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2341 struct ib_uverbs_recv_wr *user_wr;
2342 struct ib_recv_wr *wr = NULL, *last, *next;
2347 if (in_len < wqe_size * wr_count +
2348 sge_count * sizeof (struct ib_uverbs_sge))
2349 return ERR_PTR(-EINVAL);
2351 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2352 return ERR_PTR(-EINVAL);
2354 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2356 return ERR_PTR(-ENOMEM);
2360 for (i = 0; i < wr_count; ++i) {
2361 if (copy_from_user(user_wr, buf + i * wqe_size,
2367 if (user_wr->num_sge + sg_ind > sge_count) {
2372 if (user_wr->num_sge >=
2373 (U32_MAX - ALIGN(sizeof *next, sizeof (struct ib_sge))) /
2374 sizeof (struct ib_sge)) {
2379 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2380 user_wr->num_sge * sizeof (struct ib_sge),
2394 next->wr_id = user_wr->wr_id;
2395 next->num_sge = user_wr->num_sge;
2397 if (next->num_sge) {
2398 next->sg_list = (void *) next +
2399 ALIGN(sizeof *next, sizeof (struct ib_sge));
2400 if (copy_from_user(next->sg_list,
2401 buf + wr_count * wqe_size +
2402 sg_ind * sizeof (struct ib_sge),
2403 next->num_sge * sizeof (struct ib_sge))) {
2407 sg_ind += next->num_sge;
2409 next->sg_list = NULL;
2424 return ERR_PTR(ret);
2427 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2428 struct ib_device *ib_dev,
2429 const char __user *buf, int in_len,
2432 struct ib_uverbs_post_recv cmd;
2433 struct ib_uverbs_post_recv_resp resp;
2434 struct ib_recv_wr *wr, *next, *bad_wr;
2436 ssize_t ret = -EINVAL;
2438 if (copy_from_user(&cmd, buf, sizeof cmd))
2441 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2442 in_len - sizeof cmd, cmd.wr_count,
2443 cmd.sge_count, cmd.wqe_size);
2447 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2452 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2454 uobj_put_obj_read(qp);
2456 for (next = wr; next; next = next->next) {
2463 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2464 &resp, sizeof resp))
2474 return ret ? ret : in_len;
2477 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2478 struct ib_device *ib_dev,
2479 const char __user *buf, int in_len,
2482 struct ib_uverbs_post_srq_recv cmd;
2483 struct ib_uverbs_post_srq_recv_resp resp;
2484 struct ib_recv_wr *wr, *next, *bad_wr;
2486 ssize_t ret = -EINVAL;
2488 if (copy_from_user(&cmd, buf, sizeof cmd))
2491 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2492 in_len - sizeof cmd, cmd.wr_count,
2493 cmd.sge_count, cmd.wqe_size);
2497 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
2502 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2504 uobj_put_obj_read(srq);
2507 for (next = wr; next; next = next->next) {
2513 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2514 &resp, sizeof resp))
2524 return ret ? ret : in_len;
2527 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2528 struct ib_device *ib_dev,
2529 const char __user *buf, int in_len,
2532 struct ib_uverbs_create_ah cmd;
2533 struct ib_uverbs_create_ah_resp resp;
2534 struct ib_uobject *uobj;
2537 struct rdma_ah_attr attr;
2539 struct ib_udata udata;
2542 if (out_len < sizeof resp)
2545 if (copy_from_user(&cmd, buf, sizeof cmd))
2548 if (!rdma_is_port_valid(ib_dev, cmd.attr.port_num))
2551 INIT_UDATA(&udata, buf + sizeof(cmd),
2552 (unsigned long)cmd.response + sizeof(resp),
2553 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
2554 out_len - sizeof(resp));
2556 uobj = uobj_alloc(uobj_get_type(ah), file->ucontext);
2558 return PTR_ERR(uobj);
2560 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
2566 attr.type = rdma_ah_find_type(ib_dev, cmd.attr.port_num);
2567 rdma_ah_set_make_grd(&attr, false);
2568 rdma_ah_set_dlid(&attr, cmd.attr.dlid);
2569 rdma_ah_set_sl(&attr, cmd.attr.sl);
2570 rdma_ah_set_path_bits(&attr, cmd.attr.src_path_bits);
2571 rdma_ah_set_static_rate(&attr, cmd.attr.static_rate);
2572 rdma_ah_set_port_num(&attr, cmd.attr.port_num);
2574 if (cmd.attr.is_global) {
2575 rdma_ah_set_grh(&attr, NULL, cmd.attr.grh.flow_label,
2576 cmd.attr.grh.sgid_index,
2577 cmd.attr.grh.hop_limit,
2578 cmd.attr.grh.traffic_class);
2579 rdma_ah_set_dgid_raw(&attr, cmd.attr.grh.dgid);
2581 rdma_ah_set_ah_flags(&attr, 0);
2583 dmac = rdma_ah_retrieve_dmac(&attr);
2585 memset(dmac, 0, ETH_ALEN);
2587 ah = pd->device->create_ah(pd, &attr, &udata);
2594 ah->device = pd->device;
2596 atomic_inc(&pd->usecnt);
2598 uobj->user_handle = cmd.user_handle;
2601 resp.ah_handle = uobj->id;
2603 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2604 &resp, sizeof resp)) {
2609 uobj_put_obj_read(pd);
2610 uobj_alloc_commit(uobj);
2615 rdma_destroy_ah(ah);
2618 uobj_put_obj_read(pd);
2621 uobj_alloc_abort(uobj);
2625 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2626 struct ib_device *ib_dev,
2627 const char __user *buf, int in_len, int out_len)
2629 struct ib_uverbs_destroy_ah cmd;
2630 struct ib_uobject *uobj;
2633 if (copy_from_user(&cmd, buf, sizeof cmd))
2636 uobj = uobj_get_write(uobj_get_type(ah), cmd.ah_handle,
2639 return PTR_ERR(uobj);
2641 ret = uobj_remove_commit(uobj);
2642 return ret ?: in_len;
2645 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2646 struct ib_device *ib_dev,
2647 const char __user *buf, int in_len,
2650 struct ib_uverbs_attach_mcast cmd;
2652 struct ib_uqp_object *obj;
2653 struct ib_uverbs_mcast_entry *mcast;
2656 if (copy_from_user(&cmd, buf, sizeof cmd))
2659 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2663 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2665 mutex_lock(&obj->mcast_lock);
2666 list_for_each_entry(mcast, &obj->mcast_list, list)
2667 if (cmd.mlid == mcast->lid &&
2668 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2673 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2679 mcast->lid = cmd.mlid;
2680 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2682 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2684 list_add_tail(&mcast->list, &obj->mcast_list);
2689 mutex_unlock(&obj->mcast_lock);
2690 uobj_put_obj_read(qp);
2692 return ret ? ret : in_len;
2695 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2696 struct ib_device *ib_dev,
2697 const char __user *buf, int in_len,
2700 struct ib_uverbs_detach_mcast cmd;
2701 struct ib_uqp_object *obj;
2703 struct ib_uverbs_mcast_entry *mcast;
2707 if (copy_from_user(&cmd, buf, sizeof cmd))
2710 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
2714 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2715 mutex_lock(&obj->mcast_lock);
2717 list_for_each_entry(mcast, &obj->mcast_list, list)
2718 if (cmd.mlid == mcast->lid &&
2719 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2720 list_del(&mcast->list);
2731 ret = ib_detach_mcast(qp, (union ib_gid *)cmd.gid, cmd.mlid);
2734 mutex_unlock(&obj->mcast_lock);
2735 uobj_put_obj_read(qp);
2736 return ret ? ret : in_len;
2739 static int kern_spec_to_ib_spec_action(struct ib_uverbs_flow_spec *kern_spec,
2740 union ib_flow_spec *ib_spec)
2742 ib_spec->type = kern_spec->type;
2743 switch (ib_spec->type) {
2744 case IB_FLOW_SPEC_ACTION_TAG:
2745 if (kern_spec->flow_tag.size !=
2746 sizeof(struct ib_uverbs_flow_spec_action_tag))
2749 ib_spec->flow_tag.size = sizeof(struct ib_flow_spec_action_tag);
2750 ib_spec->flow_tag.tag_id = kern_spec->flow_tag.tag_id;
2752 case IB_FLOW_SPEC_ACTION_DROP:
2753 if (kern_spec->drop.size !=
2754 sizeof(struct ib_uverbs_flow_spec_action_drop))
2757 ib_spec->drop.size = sizeof(struct ib_flow_spec_action_drop);
2765 static size_t kern_spec_filter_sz(struct ib_uverbs_flow_spec_hdr *spec)
2767 /* Returns user space filter size, includes padding */
2768 return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
2771 static ssize_t spec_filter_size(void *kern_spec_filter, u16 kern_filter_size,
2772 u16 ib_real_filter_sz)
2775 * User space filter structures must be 64 bit aligned, otherwise this
2776 * may pass, but we won't handle additional new attributes.
2779 if (kern_filter_size > ib_real_filter_sz) {
2780 if (memchr_inv(kern_spec_filter +
2781 ib_real_filter_sz, 0,
2782 kern_filter_size - ib_real_filter_sz))
2784 return ib_real_filter_sz;
2786 return kern_filter_size;
2789 static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
2790 union ib_flow_spec *ib_spec)
2792 ssize_t actual_filter_sz;
2793 ssize_t kern_filter_sz;
2794 ssize_t ib_filter_sz;
2795 void *kern_spec_mask;
2796 void *kern_spec_val;
2798 if (kern_spec->reserved)
2801 ib_spec->type = kern_spec->type;
2803 kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
2804 /* User flow spec size must be aligned to 4 bytes */
2805 if (kern_filter_sz != ALIGN(kern_filter_sz, 4))
2808 kern_spec_val = (void *)kern_spec +
2809 sizeof(struct ib_uverbs_flow_spec_hdr);
2810 kern_spec_mask = kern_spec_val + kern_filter_sz;
2811 if (ib_spec->type == (IB_FLOW_SPEC_INNER | IB_FLOW_SPEC_VXLAN_TUNNEL))
2814 switch (ib_spec->type & ~IB_FLOW_SPEC_INNER) {
2815 case IB_FLOW_SPEC_ETH:
2816 ib_filter_sz = offsetof(struct ib_flow_eth_filter, real_sz);
2817 actual_filter_sz = spec_filter_size(kern_spec_mask,
2820 if (actual_filter_sz <= 0)
2822 ib_spec->size = sizeof(struct ib_flow_spec_eth);
2823 memcpy(&ib_spec->eth.val, kern_spec_val, actual_filter_sz);
2824 memcpy(&ib_spec->eth.mask, kern_spec_mask, actual_filter_sz);
2826 case IB_FLOW_SPEC_IPV4:
2827 ib_filter_sz = offsetof(struct ib_flow_ipv4_filter, real_sz);
2828 actual_filter_sz = spec_filter_size(kern_spec_mask,
2831 if (actual_filter_sz <= 0)
2833 ib_spec->size = sizeof(struct ib_flow_spec_ipv4);
2834 memcpy(&ib_spec->ipv4.val, kern_spec_val, actual_filter_sz);
2835 memcpy(&ib_spec->ipv4.mask, kern_spec_mask, actual_filter_sz);
2837 case IB_FLOW_SPEC_IPV6:
2838 ib_filter_sz = offsetof(struct ib_flow_ipv6_filter, real_sz);
2839 actual_filter_sz = spec_filter_size(kern_spec_mask,
2842 if (actual_filter_sz <= 0)
2844 ib_spec->size = sizeof(struct ib_flow_spec_ipv6);
2845 memcpy(&ib_spec->ipv6.val, kern_spec_val, actual_filter_sz);
2846 memcpy(&ib_spec->ipv6.mask, kern_spec_mask, actual_filter_sz);
2848 if ((ntohl(ib_spec->ipv6.mask.flow_label)) >= BIT(20) ||
2849 (ntohl(ib_spec->ipv6.val.flow_label)) >= BIT(20))
2852 case IB_FLOW_SPEC_TCP:
2853 case IB_FLOW_SPEC_UDP:
2854 ib_filter_sz = offsetof(struct ib_flow_tcp_udp_filter, real_sz);
2855 actual_filter_sz = spec_filter_size(kern_spec_mask,
2858 if (actual_filter_sz <= 0)
2860 ib_spec->size = sizeof(struct ib_flow_spec_tcp_udp);
2861 memcpy(&ib_spec->tcp_udp.val, kern_spec_val, actual_filter_sz);
2862 memcpy(&ib_spec->tcp_udp.mask, kern_spec_mask, actual_filter_sz);
2864 case IB_FLOW_SPEC_VXLAN_TUNNEL:
2865 ib_filter_sz = offsetof(struct ib_flow_tunnel_filter, real_sz);
2866 actual_filter_sz = spec_filter_size(kern_spec_mask,
2869 if (actual_filter_sz <= 0)
2871 ib_spec->tunnel.size = sizeof(struct ib_flow_spec_tunnel);
2872 memcpy(&ib_spec->tunnel.val, kern_spec_val, actual_filter_sz);
2873 memcpy(&ib_spec->tunnel.mask, kern_spec_mask, actual_filter_sz);
2875 if ((ntohl(ib_spec->tunnel.mask.tunnel_id)) >= BIT(24) ||
2876 (ntohl(ib_spec->tunnel.val.tunnel_id)) >= BIT(24))
2885 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2886 union ib_flow_spec *ib_spec)
2888 if (kern_spec->reserved)
2891 if (kern_spec->type >= IB_FLOW_SPEC_ACTION_TAG)
2892 return kern_spec_to_ib_spec_action(kern_spec, ib_spec);
2894 return kern_spec_to_ib_spec_filter(kern_spec, ib_spec);
2897 int ib_uverbs_ex_create_wq(struct ib_uverbs_file *file,
2898 struct ib_device *ib_dev,
2899 struct ib_udata *ucore,
2900 struct ib_udata *uhw)
2902 struct ib_uverbs_ex_create_wq cmd = {};
2903 struct ib_uverbs_ex_create_wq_resp resp = {};
2904 struct ib_uwq_object *obj;
2909 struct ib_wq_init_attr wq_init_attr = {};
2910 size_t required_cmd_sz;
2911 size_t required_resp_len;
2913 required_cmd_sz = offsetof(typeof(cmd), max_sge) + sizeof(cmd.max_sge);
2914 required_resp_len = offsetof(typeof(resp), wqn) + sizeof(resp.wqn);
2916 if (ucore->inlen < required_cmd_sz)
2919 if (ucore->outlen < required_resp_len)
2922 if (ucore->inlen > sizeof(cmd) &&
2923 !ib_is_udata_cleared(ucore, sizeof(cmd),
2924 ucore->inlen - sizeof(cmd)))
2927 err = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
2934 obj = (struct ib_uwq_object *)uobj_alloc(uobj_get_type(wq),
2937 return PTR_ERR(obj);
2939 pd = uobj_get_obj_read(pd, cmd.pd_handle, file->ucontext);
2945 cq = uobj_get_obj_read(cq, cmd.cq_handle, file->ucontext);
2951 wq_init_attr.cq = cq;
2952 wq_init_attr.max_sge = cmd.max_sge;
2953 wq_init_attr.max_wr = cmd.max_wr;
2954 wq_init_attr.wq_context = file;
2955 wq_init_attr.wq_type = cmd.wq_type;
2956 wq_init_attr.event_handler = ib_uverbs_wq_event_handler;
2957 if (ucore->inlen >= (offsetof(typeof(cmd), create_flags) +
2958 sizeof(cmd.create_flags)))
2959 wq_init_attr.create_flags = cmd.create_flags;
2960 obj->uevent.events_reported = 0;
2961 INIT_LIST_HEAD(&obj->uevent.event_list);
2962 wq = pd->device->create_wq(pd, &wq_init_attr, uhw);
2968 wq->uobject = &obj->uevent.uobject;
2969 obj->uevent.uobject.object = wq;
2970 wq->wq_type = wq_init_attr.wq_type;
2973 wq->device = pd->device;
2974 wq->wq_context = wq_init_attr.wq_context;
2975 atomic_set(&wq->usecnt, 0);
2976 atomic_inc(&pd->usecnt);
2977 atomic_inc(&cq->usecnt);
2978 wq->uobject = &obj->uevent.uobject;
2979 obj->uevent.uobject.object = wq;
2981 memset(&resp, 0, sizeof(resp));
2982 resp.wq_handle = obj->uevent.uobject.id;
2983 resp.max_sge = wq_init_attr.max_sge;
2984 resp.max_wr = wq_init_attr.max_wr;
2985 resp.wqn = wq->wq_num;
2986 resp.response_length = required_resp_len;
2987 err = ib_copy_to_udata(ucore,
2988 &resp, resp.response_length);
2992 uobj_put_obj_read(pd);
2993 uobj_put_obj_read(cq);
2994 uobj_alloc_commit(&obj->uevent.uobject);
3000 uobj_put_obj_read(cq);
3002 uobj_put_obj_read(pd);
3004 uobj_alloc_abort(&obj->uevent.uobject);
3009 int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file,
3010 struct ib_device *ib_dev,
3011 struct ib_udata *ucore,
3012 struct ib_udata *uhw)
3014 struct ib_uverbs_ex_destroy_wq cmd = {};
3015 struct ib_uverbs_ex_destroy_wq_resp resp = {};
3016 struct ib_uobject *uobj;
3017 struct ib_uwq_object *obj;
3018 size_t required_cmd_sz;
3019 size_t required_resp_len;
3022 required_cmd_sz = offsetof(typeof(cmd), wq_handle) + sizeof(cmd.wq_handle);
3023 required_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved);
3025 if (ucore->inlen < required_cmd_sz)
3028 if (ucore->outlen < required_resp_len)
3031 if (ucore->inlen > sizeof(cmd) &&
3032 !ib_is_udata_cleared(ucore, sizeof(cmd),
3033 ucore->inlen - sizeof(cmd)))
3036 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3043 resp.response_length = required_resp_len;
3044 uobj = uobj_get_write(uobj_get_type(wq), cmd.wq_handle,
3047 return PTR_ERR(uobj);
3049 obj = container_of(uobj, struct ib_uwq_object, uevent.uobject);
3051 * Make sure we don't free the memory in remove_commit as we still
3052 * needs the uobject memory to create the response.
3054 uverbs_uobject_get(uobj);
3056 ret = uobj_remove_commit(uobj);
3057 resp.events_reported = obj->uevent.events_reported;
3058 uverbs_uobject_put(uobj);
3062 return ib_copy_to_udata(ucore, &resp, resp.response_length);
3065 int ib_uverbs_ex_modify_wq(struct ib_uverbs_file *file,
3066 struct ib_device *ib_dev,
3067 struct ib_udata *ucore,
3068 struct ib_udata *uhw)
3070 struct ib_uverbs_ex_modify_wq cmd = {};
3072 struct ib_wq_attr wq_attr = {};
3073 size_t required_cmd_sz;
3076 required_cmd_sz = offsetof(typeof(cmd), curr_wq_state) + sizeof(cmd.curr_wq_state);
3077 if (ucore->inlen < required_cmd_sz)
3080 if (ucore->inlen > sizeof(cmd) &&
3081 !ib_is_udata_cleared(ucore, sizeof(cmd),
3082 ucore->inlen - sizeof(cmd)))
3085 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3092 if (cmd.attr_mask > (IB_WQ_STATE | IB_WQ_CUR_STATE | IB_WQ_FLAGS))
3095 wq = uobj_get_obj_read(wq, cmd.wq_handle, file->ucontext);
3099 wq_attr.curr_wq_state = cmd.curr_wq_state;
3100 wq_attr.wq_state = cmd.wq_state;
3101 if (cmd.attr_mask & IB_WQ_FLAGS) {
3102 wq_attr.flags = cmd.flags;
3103 wq_attr.flags_mask = cmd.flags_mask;
3105 ret = wq->device->modify_wq(wq, &wq_attr, cmd.attr_mask, uhw);
3106 uobj_put_obj_read(wq);
3110 int ib_uverbs_ex_create_rwq_ind_table(struct ib_uverbs_file *file,
3111 struct ib_device *ib_dev,
3112 struct ib_udata *ucore,
3113 struct ib_udata *uhw)
3115 struct ib_uverbs_ex_create_rwq_ind_table cmd = {};
3116 struct ib_uverbs_ex_create_rwq_ind_table_resp resp = {};
3117 struct ib_uobject *uobj;
3119 struct ib_rwq_ind_table_init_attr init_attr = {};
3120 struct ib_rwq_ind_table *rwq_ind_tbl;
3121 struct ib_wq **wqs = NULL;
3122 u32 *wqs_handles = NULL;
3123 struct ib_wq *wq = NULL;
3124 int i, j, num_read_wqs;
3126 u32 expected_in_size;
3127 size_t required_cmd_sz_header;
3128 size_t required_resp_len;
3130 required_cmd_sz_header = offsetof(typeof(cmd), log_ind_tbl_size) + sizeof(cmd.log_ind_tbl_size);
3131 required_resp_len = offsetof(typeof(resp), ind_tbl_num) + sizeof(resp.ind_tbl_num);
3133 if (ucore->inlen < required_cmd_sz_header)
3136 if (ucore->outlen < required_resp_len)
3139 err = ib_copy_from_udata(&cmd, ucore, required_cmd_sz_header);
3143 ucore->inbuf += required_cmd_sz_header;
3144 ucore->inlen -= required_cmd_sz_header;
3149 if (cmd.log_ind_tbl_size > IB_USER_VERBS_MAX_LOG_IND_TBL_SIZE)
3152 num_wq_handles = 1 << cmd.log_ind_tbl_size;
3153 expected_in_size = num_wq_handles * sizeof(__u32);
3154 if (num_wq_handles == 1)
3155 /* input size for wq handles is u64 aligned */
3156 expected_in_size += sizeof(__u32);
3158 if (ucore->inlen < expected_in_size)
3161 if (ucore->inlen > expected_in_size &&
3162 !ib_is_udata_cleared(ucore, expected_in_size,
3163 ucore->inlen - expected_in_size))
3166 wqs_handles = kcalloc(num_wq_handles, sizeof(*wqs_handles),
3171 err = ib_copy_from_udata(wqs_handles, ucore,
3172 num_wq_handles * sizeof(__u32));
3176 wqs = kcalloc(num_wq_handles, sizeof(*wqs), GFP_KERNEL);
3182 for (num_read_wqs = 0; num_read_wqs < num_wq_handles;
3184 wq = uobj_get_obj_read(wq, wqs_handles[num_read_wqs],
3191 wqs[num_read_wqs] = wq;
3194 uobj = uobj_alloc(uobj_get_type(rwq_ind_table), file->ucontext);
3196 err = PTR_ERR(uobj);
3200 init_attr.log_ind_tbl_size = cmd.log_ind_tbl_size;
3201 init_attr.ind_tbl = wqs;
3202 rwq_ind_tbl = ib_dev->create_rwq_ind_table(ib_dev, &init_attr, uhw);
3204 if (IS_ERR(rwq_ind_tbl)) {
3205 err = PTR_ERR(rwq_ind_tbl);
3209 rwq_ind_tbl->ind_tbl = wqs;
3210 rwq_ind_tbl->log_ind_tbl_size = init_attr.log_ind_tbl_size;
3211 rwq_ind_tbl->uobject = uobj;
3212 uobj->object = rwq_ind_tbl;
3213 rwq_ind_tbl->device = ib_dev;
3214 atomic_set(&rwq_ind_tbl->usecnt, 0);
3216 for (i = 0; i < num_wq_handles; i++)
3217 atomic_inc(&wqs[i]->usecnt);
3219 resp.ind_tbl_handle = uobj->id;
3220 resp.ind_tbl_num = rwq_ind_tbl->ind_tbl_num;
3221 resp.response_length = required_resp_len;
3223 err = ib_copy_to_udata(ucore,
3224 &resp, resp.response_length);
3230 for (j = 0; j < num_read_wqs; j++)
3231 uobj_put_obj_read(wqs[j]);
3233 uobj_alloc_commit(uobj);
3237 ib_destroy_rwq_ind_table(rwq_ind_tbl);
3239 uobj_alloc_abort(uobj);
3241 for (j = 0; j < num_read_wqs; j++)
3242 uobj_put_obj_read(wqs[j]);
3249 int ib_uverbs_ex_destroy_rwq_ind_table(struct ib_uverbs_file *file,
3250 struct ib_device *ib_dev,
3251 struct ib_udata *ucore,
3252 struct ib_udata *uhw)
3254 struct ib_uverbs_ex_destroy_rwq_ind_table cmd = {};
3255 struct ib_uobject *uobj;
3257 size_t required_cmd_sz;
3259 required_cmd_sz = offsetof(typeof(cmd), ind_tbl_handle) + sizeof(cmd.ind_tbl_handle);
3261 if (ucore->inlen < required_cmd_sz)
3264 if (ucore->inlen > sizeof(cmd) &&
3265 !ib_is_udata_cleared(ucore, sizeof(cmd),
3266 ucore->inlen - sizeof(cmd)))
3269 ret = ib_copy_from_udata(&cmd, ucore, min(sizeof(cmd), ucore->inlen));
3276 uobj = uobj_get_write(uobj_get_type(rwq_ind_table), cmd.ind_tbl_handle,
3279 return PTR_ERR(uobj);
3281 return uobj_remove_commit(uobj);
3284 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
3285 struct ib_device *ib_dev,
3286 struct ib_udata *ucore,
3287 struct ib_udata *uhw)
3289 struct ib_uverbs_create_flow cmd;
3290 struct ib_uverbs_create_flow_resp resp;
3291 struct ib_uobject *uobj;
3292 struct ib_flow *flow_id;
3293 struct ib_uverbs_flow_attr *kern_flow_attr;
3294 struct ib_flow_attr *flow_attr;
3301 if (ucore->inlen < sizeof(cmd))
3304 if (ucore->outlen < sizeof(resp))
3307 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3311 ucore->inbuf += sizeof(cmd);
3312 ucore->inlen -= sizeof(cmd);
3317 if (!capable(CAP_NET_RAW))
3320 if (cmd.flow_attr.flags >= IB_FLOW_ATTR_FLAGS_RESERVED)
3323 if ((cmd.flow_attr.flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
3324 ((cmd.flow_attr.type == IB_FLOW_ATTR_ALL_DEFAULT) ||
3325 (cmd.flow_attr.type == IB_FLOW_ATTR_MC_DEFAULT)))
3328 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
3331 if (cmd.flow_attr.size > ucore->inlen ||
3332 cmd.flow_attr.size >
3333 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
3336 if (cmd.flow_attr.reserved[0] ||
3337 cmd.flow_attr.reserved[1])
3340 if (cmd.flow_attr.num_of_specs) {
3341 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
3343 if (!kern_flow_attr)
3346 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
3347 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
3348 cmd.flow_attr.size);
3352 kern_flow_attr = &cmd.flow_attr;
3355 uobj = uobj_alloc(uobj_get_type(flow), file->ucontext);
3357 err = PTR_ERR(uobj);
3361 qp = uobj_get_obj_read(qp, cmd.qp_handle, file->ucontext);
3367 flow_attr = kzalloc(sizeof(*flow_attr) + cmd.flow_attr.num_of_specs *
3368 sizeof(union ib_flow_spec), GFP_KERNEL);
3374 flow_attr->type = kern_flow_attr->type;
3375 flow_attr->priority = kern_flow_attr->priority;
3376 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
3377 flow_attr->port = kern_flow_attr->port;
3378 flow_attr->flags = kern_flow_attr->flags;
3379 flow_attr->size = sizeof(*flow_attr);
3381 kern_spec = kern_flow_attr + 1;
3382 ib_spec = flow_attr + 1;
3383 for (i = 0; i < flow_attr->num_of_specs &&
3384 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
3385 cmd.flow_attr.size >=
3386 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
3387 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
3391 ((union ib_flow_spec *) ib_spec)->size;
3392 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
3393 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
3394 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
3396 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
3397 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
3398 i, cmd.flow_attr.size);
3402 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
3403 if (IS_ERR(flow_id)) {
3404 err = PTR_ERR(flow_id);
3407 flow_id->uobject = uobj;
3408 uobj->object = flow_id;
3410 memset(&resp, 0, sizeof(resp));
3411 resp.flow_handle = uobj->id;
3413 err = ib_copy_to_udata(ucore,
3414 &resp, sizeof(resp));
3418 uobj_put_obj_read(qp);
3419 uobj_alloc_commit(uobj);
3421 if (cmd.flow_attr.num_of_specs)
3422 kfree(kern_flow_attr);
3425 ib_destroy_flow(flow_id);
3429 uobj_put_obj_read(qp);
3431 uobj_alloc_abort(uobj);
3433 if (cmd.flow_attr.num_of_specs)
3434 kfree(kern_flow_attr);
3438 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
3439 struct ib_device *ib_dev,
3440 struct ib_udata *ucore,
3441 struct ib_udata *uhw)
3443 struct ib_uverbs_destroy_flow cmd;
3444 struct ib_uobject *uobj;
3447 if (ucore->inlen < sizeof(cmd))
3450 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3457 uobj = uobj_get_write(uobj_get_type(flow), cmd.flow_handle,
3460 return PTR_ERR(uobj);
3462 ret = uobj_remove_commit(uobj);
3466 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
3467 struct ib_device *ib_dev,
3468 struct ib_uverbs_create_xsrq *cmd,
3469 struct ib_udata *udata)
3471 struct ib_uverbs_create_srq_resp resp;
3472 struct ib_usrq_object *obj;
3475 struct ib_uobject *uninitialized_var(xrcd_uobj);
3476 struct ib_srq_init_attr attr;
3479 obj = (struct ib_usrq_object *)uobj_alloc(uobj_get_type(srq),
3482 return PTR_ERR(obj);
3484 if (cmd->srq_type == IB_SRQT_TM)
3485 attr.ext.tag_matching.max_num_tags = cmd->max_num_tags;
3487 if (cmd->srq_type == IB_SRQT_XRC) {
3488 xrcd_uobj = uobj_get_read(uobj_get_type(xrcd), cmd->xrcd_handle,
3490 if (IS_ERR(xrcd_uobj)) {
3495 attr.ext.xrc.xrcd = (struct ib_xrcd *)xrcd_uobj->object;
3496 if (!attr.ext.xrc.xrcd) {
3501 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
3502 atomic_inc(&obj->uxrcd->refcnt);
3505 if (ib_srq_has_cq(cmd->srq_type)) {
3506 attr.ext.cq = uobj_get_obj_read(cq, cmd->cq_handle,
3514 pd = uobj_get_obj_read(pd, cmd->pd_handle, file->ucontext);
3520 attr.event_handler = ib_uverbs_srq_event_handler;
3521 attr.srq_context = file;
3522 attr.srq_type = cmd->srq_type;
3523 attr.attr.max_wr = cmd->max_wr;
3524 attr.attr.max_sge = cmd->max_sge;
3525 attr.attr.srq_limit = cmd->srq_limit;
3527 obj->uevent.events_reported = 0;
3528 INIT_LIST_HEAD(&obj->uevent.event_list);
3530 srq = pd->device->create_srq(pd, &attr, udata);
3536 srq->device = pd->device;
3538 srq->srq_type = cmd->srq_type;
3539 srq->uobject = &obj->uevent.uobject;
3540 srq->event_handler = attr.event_handler;
3541 srq->srq_context = attr.srq_context;
3543 if (ib_srq_has_cq(cmd->srq_type)) {
3544 srq->ext.cq = attr.ext.cq;
3545 atomic_inc(&attr.ext.cq->usecnt);
3548 if (cmd->srq_type == IB_SRQT_XRC) {
3549 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3550 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3553 atomic_inc(&pd->usecnt);
3554 atomic_set(&srq->usecnt, 0);
3556 obj->uevent.uobject.object = srq;
3557 obj->uevent.uobject.user_handle = cmd->user_handle;
3559 memset(&resp, 0, sizeof resp);
3560 resp.srq_handle = obj->uevent.uobject.id;
3561 resp.max_wr = attr.attr.max_wr;
3562 resp.max_sge = attr.attr.max_sge;
3563 if (cmd->srq_type == IB_SRQT_XRC)
3564 resp.srqn = srq->ext.xrc.srq_num;
3566 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3567 &resp, sizeof resp)) {
3572 if (cmd->srq_type == IB_SRQT_XRC)
3573 uobj_put_read(xrcd_uobj);
3575 if (ib_srq_has_cq(cmd->srq_type))
3576 uobj_put_obj_read(attr.ext.cq);
3578 uobj_put_obj_read(pd);
3579 uobj_alloc_commit(&obj->uevent.uobject);
3584 ib_destroy_srq(srq);
3587 uobj_put_obj_read(pd);
3590 if (ib_srq_has_cq(cmd->srq_type))
3591 uobj_put_obj_read(attr.ext.cq);
3594 if (cmd->srq_type == IB_SRQT_XRC) {
3595 atomic_dec(&obj->uxrcd->refcnt);
3596 uobj_put_read(xrcd_uobj);
3600 uobj_alloc_abort(&obj->uevent.uobject);
3604 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3605 struct ib_device *ib_dev,
3606 const char __user *buf, int in_len,
3609 struct ib_uverbs_create_srq cmd;
3610 struct ib_uverbs_create_xsrq xcmd;
3611 struct ib_uverbs_create_srq_resp resp;
3612 struct ib_udata udata;
3615 if (out_len < sizeof resp)
3618 if (copy_from_user(&cmd, buf, sizeof cmd))
3621 memset(&xcmd, 0, sizeof(xcmd));
3622 xcmd.response = cmd.response;
3623 xcmd.user_handle = cmd.user_handle;
3624 xcmd.srq_type = IB_SRQT_BASIC;
3625 xcmd.pd_handle = cmd.pd_handle;
3626 xcmd.max_wr = cmd.max_wr;
3627 xcmd.max_sge = cmd.max_sge;
3628 xcmd.srq_limit = cmd.srq_limit;
3630 INIT_UDATA(&udata, buf + sizeof(cmd),
3631 (unsigned long) cmd.response + sizeof(resp),
3632 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3633 out_len - sizeof(resp));
3635 ret = __uverbs_create_xsrq(file, ib_dev, &xcmd, &udata);
3642 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3643 struct ib_device *ib_dev,
3644 const char __user *buf, int in_len, int out_len)
3646 struct ib_uverbs_create_xsrq cmd;
3647 struct ib_uverbs_create_srq_resp resp;
3648 struct ib_udata udata;
3651 if (out_len < sizeof resp)
3654 if (copy_from_user(&cmd, buf, sizeof cmd))
3657 INIT_UDATA(&udata, buf + sizeof(cmd),
3658 (unsigned long) cmd.response + sizeof(resp),
3659 in_len - sizeof(cmd) - sizeof(struct ib_uverbs_cmd_hdr),
3660 out_len - sizeof(resp));
3662 ret = __uverbs_create_xsrq(file, ib_dev, &cmd, &udata);
3669 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3670 struct ib_device *ib_dev,
3671 const char __user *buf, int in_len,
3674 struct ib_uverbs_modify_srq cmd;
3675 struct ib_udata udata;
3677 struct ib_srq_attr attr;
3680 if (copy_from_user(&cmd, buf, sizeof cmd))
3683 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3686 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
3690 attr.max_wr = cmd.max_wr;
3691 attr.srq_limit = cmd.srq_limit;
3693 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3695 uobj_put_obj_read(srq);
3697 return ret ? ret : in_len;
3700 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3701 struct ib_device *ib_dev,
3702 const char __user *buf,
3703 int in_len, int out_len)
3705 struct ib_uverbs_query_srq cmd;
3706 struct ib_uverbs_query_srq_resp resp;
3707 struct ib_srq_attr attr;
3711 if (out_len < sizeof resp)
3714 if (copy_from_user(&cmd, buf, sizeof cmd))
3717 srq = uobj_get_obj_read(srq, cmd.srq_handle, file->ucontext);
3721 ret = ib_query_srq(srq, &attr);
3723 uobj_put_obj_read(srq);
3728 memset(&resp, 0, sizeof resp);
3730 resp.max_wr = attr.max_wr;
3731 resp.max_sge = attr.max_sge;
3732 resp.srq_limit = attr.srq_limit;
3734 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3735 &resp, sizeof resp))
3741 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3742 struct ib_device *ib_dev,
3743 const char __user *buf, int in_len,
3746 struct ib_uverbs_destroy_srq cmd;
3747 struct ib_uverbs_destroy_srq_resp resp;
3748 struct ib_uobject *uobj;
3749 struct ib_uevent_object *obj;
3752 if (copy_from_user(&cmd, buf, sizeof cmd))
3755 uobj = uobj_get_write(uobj_get_type(srq), cmd.srq_handle,
3758 return PTR_ERR(uobj);
3760 obj = container_of(uobj, struct ib_uevent_object, uobject);
3762 * Make sure we don't free the memory in remove_commit as we still
3763 * needs the uobject memory to create the response.
3765 uverbs_uobject_get(uobj);
3767 memset(&resp, 0, sizeof(resp));
3769 ret = uobj_remove_commit(uobj);
3771 uverbs_uobject_put(uobj);
3774 resp.events_reported = obj->events_reported;
3775 uverbs_uobject_put(uobj);
3776 if (copy_to_user((void __user *)(unsigned long)cmd.response,
3777 &resp, sizeof(resp)))
3783 int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
3784 struct ib_device *ib_dev,
3785 struct ib_udata *ucore,
3786 struct ib_udata *uhw)
3788 struct ib_uverbs_ex_query_device_resp resp = { {0} };
3789 struct ib_uverbs_ex_query_device cmd;
3790 struct ib_device_attr attr = {0};
3793 if (ucore->inlen < sizeof(cmd))
3796 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
3806 resp.response_length = offsetof(typeof(resp), odp_caps);
3808 if (ucore->outlen < resp.response_length)
3811 err = ib_dev->query_device(ib_dev, &attr, uhw);
3815 copy_query_dev_fields(file, ib_dev, &resp.base, &attr);
3817 if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
3820 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
3821 resp.odp_caps.general_caps = attr.odp_caps.general_caps;
3822 resp.odp_caps.per_transport_caps.rc_odp_caps =
3823 attr.odp_caps.per_transport_caps.rc_odp_caps;
3824 resp.odp_caps.per_transport_caps.uc_odp_caps =
3825 attr.odp_caps.per_transport_caps.uc_odp_caps;
3826 resp.odp_caps.per_transport_caps.ud_odp_caps =
3827 attr.odp_caps.per_transport_caps.ud_odp_caps;
3829 resp.response_length += sizeof(resp.odp_caps);
3831 if (ucore->outlen < resp.response_length + sizeof(resp.timestamp_mask))
3834 resp.timestamp_mask = attr.timestamp_mask;
3835 resp.response_length += sizeof(resp.timestamp_mask);
3837 if (ucore->outlen < resp.response_length + sizeof(resp.hca_core_clock))
3840 resp.hca_core_clock = attr.hca_core_clock;
3841 resp.response_length += sizeof(resp.hca_core_clock);
3843 if (ucore->outlen < resp.response_length + sizeof(resp.device_cap_flags_ex))
3846 resp.device_cap_flags_ex = attr.device_cap_flags;
3847 resp.response_length += sizeof(resp.device_cap_flags_ex);
3849 if (ucore->outlen < resp.response_length + sizeof(resp.rss_caps))
3852 resp.rss_caps.supported_qpts = attr.rss_caps.supported_qpts;
3853 resp.rss_caps.max_rwq_indirection_tables =
3854 attr.rss_caps.max_rwq_indirection_tables;
3855 resp.rss_caps.max_rwq_indirection_table_size =
3856 attr.rss_caps.max_rwq_indirection_table_size;
3858 resp.response_length += sizeof(resp.rss_caps);
3860 if (ucore->outlen < resp.response_length + sizeof(resp.max_wq_type_rq))
3863 resp.max_wq_type_rq = attr.max_wq_type_rq;
3864 resp.response_length += sizeof(resp.max_wq_type_rq);
3866 if (ucore->outlen < resp.response_length + sizeof(resp.raw_packet_caps))
3869 resp.raw_packet_caps = attr.raw_packet_caps;
3870 resp.response_length += sizeof(resp.raw_packet_caps);
3872 if (ucore->outlen < resp.response_length + sizeof(resp.xrq_caps))
3875 resp.xrq_caps.max_rndv_hdr_size = attr.xrq_caps.max_rndv_hdr_size;
3876 resp.xrq_caps.max_num_tags = attr.xrq_caps.max_num_tags;
3877 resp.xrq_caps.max_ops = attr.xrq_caps.max_ops;
3878 resp.xrq_caps.max_sge = attr.xrq_caps.max_sge;
3879 resp.xrq_caps.flags = attr.xrq_caps.flags;
3880 resp.response_length += sizeof(resp.xrq_caps);
3882 err = ib_copy_to_udata(ucore, &resp, resp.response_length);