]> Git Repo - J-linux.git/blob - drivers/infiniband/sw/rxe/rxe_qp.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / infiniband / sw / rxe / rxe_qp.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4  * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5  */
6
7 #include <linux/skbuff.h>
8 #include <linux/delay.h>
9 #include <linux/sched.h>
10 #include <linux/vmalloc.h>
11 #include <rdma/uverbs_ioctl.h>
12
13 #include "rxe.h"
14 #include "rxe_loc.h"
15 #include "rxe_queue.h"
16 #include "rxe_task.h"
17
18 static int rxe_qp_chk_cap(struct rxe_dev *rxe, struct ib_qp_cap *cap,
19                           int has_srq)
20 {
21         if (cap->max_send_wr > rxe->attr.max_qp_wr) {
22                 rxe_dbg_dev(rxe, "invalid send wr = %u > %d\n",
23                          cap->max_send_wr, rxe->attr.max_qp_wr);
24                 goto err1;
25         }
26
27         if (cap->max_send_sge > rxe->attr.max_send_sge) {
28                 rxe_dbg_dev(rxe, "invalid send sge = %u > %d\n",
29                          cap->max_send_sge, rxe->attr.max_send_sge);
30                 goto err1;
31         }
32
33         if (!has_srq) {
34                 if (cap->max_recv_wr > rxe->attr.max_qp_wr) {
35                         rxe_dbg_dev(rxe, "invalid recv wr = %u > %d\n",
36                                  cap->max_recv_wr, rxe->attr.max_qp_wr);
37                         goto err1;
38                 }
39
40                 if (cap->max_recv_sge > rxe->attr.max_recv_sge) {
41                         rxe_dbg_dev(rxe, "invalid recv sge = %u > %d\n",
42                                  cap->max_recv_sge, rxe->attr.max_recv_sge);
43                         goto err1;
44                 }
45         }
46
47         if (cap->max_inline_data > rxe->max_inline_data) {
48                 rxe_dbg_dev(rxe, "invalid max inline data = %u > %d\n",
49                          cap->max_inline_data, rxe->max_inline_data);
50                 goto err1;
51         }
52
53         return 0;
54
55 err1:
56         return -EINVAL;
57 }
58
59 int rxe_qp_chk_init(struct rxe_dev *rxe, struct ib_qp_init_attr *init)
60 {
61         struct ib_qp_cap *cap = &init->cap;
62         struct rxe_port *port;
63         int port_num = init->port_num;
64
65         switch (init->qp_type) {
66         case IB_QPT_GSI:
67         case IB_QPT_RC:
68         case IB_QPT_UC:
69         case IB_QPT_UD:
70                 break;
71         default:
72                 return -EOPNOTSUPP;
73         }
74
75         if (!init->recv_cq || !init->send_cq) {
76                 rxe_dbg_dev(rxe, "missing cq\n");
77                 goto err1;
78         }
79
80         if (rxe_qp_chk_cap(rxe, cap, !!init->srq))
81                 goto err1;
82
83         if (init->qp_type == IB_QPT_GSI) {
84                 if (!rdma_is_port_valid(&rxe->ib_dev, port_num)) {
85                         rxe_dbg_dev(rxe, "invalid port = %d\n", port_num);
86                         goto err1;
87                 }
88
89                 port = &rxe->port;
90
91                 if (init->qp_type == IB_QPT_GSI && port->qp_gsi_index) {
92                         rxe_dbg_dev(rxe, "GSI QP exists for port %d\n", port_num);
93                         goto err1;
94                 }
95         }
96
97         return 0;
98
99 err1:
100         return -EINVAL;
101 }
102
103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n)
104 {
105         qp->resp.res_head = 0;
106         qp->resp.res_tail = 0;
107         qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL);
108
109         if (!qp->resp.resources)
110                 return -ENOMEM;
111
112         return 0;
113 }
114
115 static void free_rd_atomic_resources(struct rxe_qp *qp)
116 {
117         if (qp->resp.resources) {
118                 int i;
119
120                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
121                         struct resp_res *res = &qp->resp.resources[i];
122
123                         free_rd_atomic_resource(res);
124                 }
125                 kfree(qp->resp.resources);
126                 qp->resp.resources = NULL;
127         }
128 }
129
130 void free_rd_atomic_resource(struct resp_res *res)
131 {
132         res->type = 0;
133 }
134
135 static void cleanup_rd_atomic_resources(struct rxe_qp *qp)
136 {
137         int i;
138         struct resp_res *res;
139
140         if (qp->resp.resources) {
141                 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
142                         res = &qp->resp.resources[i];
143                         free_rd_atomic_resource(res);
144                 }
145         }
146 }
147
148 static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
149                              struct ib_qp_init_attr *init)
150 {
151         struct rxe_port *port;
152         u32 qpn;
153
154         qp->sq_sig_type         = init->sq_sig_type;
155         qp->attr.path_mtu       = 1;
156         qp->mtu                 = ib_mtu_enum_to_int(qp->attr.path_mtu);
157
158         qpn                     = qp->elem.index;
159         port                    = &rxe->port;
160
161         switch (init->qp_type) {
162         case IB_QPT_GSI:
163                 qp->ibqp.qp_num         = 1;
164                 port->qp_gsi_index      = qpn;
165                 qp->attr.port_num       = init->port_num;
166                 break;
167
168         default:
169                 qp->ibqp.qp_num         = qpn;
170                 break;
171         }
172
173         spin_lock_init(&qp->state_lock);
174
175         spin_lock_init(&qp->sq.sq_lock);
176         spin_lock_init(&qp->rq.producer_lock);
177         spin_lock_init(&qp->rq.consumer_lock);
178
179         skb_queue_head_init(&qp->req_pkts);
180         skb_queue_head_init(&qp->resp_pkts);
181
182         atomic_set(&qp->ssn, 0);
183         atomic_set(&qp->skb_out, 0);
184 }
185
186 static int rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
187                        struct ib_udata *udata,
188                        struct rxe_create_qp_resp __user *uresp)
189 {
190         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
191         int wqe_size;
192         int err;
193
194         qp->sq.max_wr = init->cap.max_send_wr;
195         wqe_size = max_t(int, init->cap.max_send_sge * sizeof(struct ib_sge),
196                          init->cap.max_inline_data);
197         qp->sq.max_sge = wqe_size / sizeof(struct ib_sge);
198         qp->sq.max_inline = wqe_size;
199         wqe_size += sizeof(struct rxe_send_wqe);
200
201         qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size,
202                                       QUEUE_TYPE_FROM_CLIENT);
203         if (!qp->sq.queue) {
204                 rxe_err_qp(qp, "Unable to allocate send queue\n");
205                 err = -ENOMEM;
206                 goto err_out;
207         }
208
209         /* prepare info for caller to mmap send queue if user space qp */
210         err = do_mmap_info(rxe, uresp ? &uresp->sq_mi : NULL, udata,
211                            qp->sq.queue->buf, qp->sq.queue->buf_size,
212                            &qp->sq.queue->ip);
213         if (err) {
214                 rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
215                 goto err_free;
216         }
217
218         /* return actual capabilities to caller which may be larger
219          * than requested
220          */
221         init->cap.max_send_wr = qp->sq.max_wr;
222         init->cap.max_send_sge = qp->sq.max_sge;
223         init->cap.max_inline_data = qp->sq.max_inline;
224
225         return 0;
226
227 err_free:
228         vfree(qp->sq.queue->buf);
229         kfree(qp->sq.queue);
230         qp->sq.queue = NULL;
231 err_out:
232         return err;
233 }
234
235 static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
236                            struct ib_qp_init_attr *init, struct ib_udata *udata,
237                            struct rxe_create_qp_resp __user *uresp)
238 {
239         int err;
240
241         /* if we don't finish qp create make sure queue is valid */
242         skb_queue_head_init(&qp->req_pkts);
243
244         err = sock_create_kern(&init_net, AF_INET, SOCK_DGRAM, 0, &qp->sk);
245         if (err < 0)
246                 return err;
247         qp->sk->sk->sk_user_data = (void *)(uintptr_t)qp->elem.index;
248
249         /* pick a source UDP port number for this QP based on
250          * the source QPN. this spreads traffic for different QPs
251          * across different NIC RX queues (while using a single
252          * flow for a given QP to maintain packet order).
253          * the port number must be in the Dynamic Ports range
254          * (0xc000 - 0xffff).
255          */
256         qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff);
257
258         err = rxe_init_sq(qp, init, udata, uresp);
259         if (err)
260                 return err;
261
262         qp->req.wqe_index = queue_get_producer(qp->sq.queue,
263                                                QUEUE_TYPE_FROM_CLIENT);
264
265         qp->req.opcode          = -1;
266         qp->comp.opcode         = -1;
267
268         rxe_init_task(&qp->send_task, qp, rxe_sender);
269
270         qp->qp_timeout_jiffies = 0; /* Can't be set for UD/UC in modify_qp */
271         if (init->qp_type == IB_QPT_RC) {
272                 timer_setup(&qp->rnr_nak_timer, rnr_nak_timer, 0);
273                 timer_setup(&qp->retrans_timer, retransmit_timer, 0);
274         }
275         return 0;
276 }
277
278 static int rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init,
279                        struct ib_udata *udata,
280                        struct rxe_create_qp_resp __user *uresp)
281 {
282         struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
283         int wqe_size;
284         int err;
285
286         qp->rq.max_wr = init->cap.max_recv_wr;
287         qp->rq.max_sge = init->cap.max_recv_sge;
288         wqe_size = sizeof(struct rxe_recv_wqe) +
289                                 qp->rq.max_sge*sizeof(struct ib_sge);
290
291         qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size,
292                                       QUEUE_TYPE_FROM_CLIENT);
293         if (!qp->rq.queue) {
294                 rxe_err_qp(qp, "Unable to allocate recv queue\n");
295                 err = -ENOMEM;
296                 goto err_out;
297         }
298
299         /* prepare info for caller to mmap recv queue if user space qp */
300         err = do_mmap_info(rxe, uresp ? &uresp->rq_mi : NULL, udata,
301                            qp->rq.queue->buf, qp->rq.queue->buf_size,
302                            &qp->rq.queue->ip);
303         if (err) {
304                 rxe_err_qp(qp, "do_mmap_info failed, err = %d\n", err);
305                 goto err_free;
306         }
307
308         /* return actual capabilities to caller which may be larger
309          * than requested
310          */
311         init->cap.max_recv_wr = qp->rq.max_wr;
312
313         return 0;
314
315 err_free:
316         vfree(qp->rq.queue->buf);
317         kfree(qp->rq.queue);
318         qp->rq.queue = NULL;
319 err_out:
320         return err;
321 }
322
323 static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
324                             struct ib_qp_init_attr *init,
325                             struct ib_udata *udata,
326                             struct rxe_create_qp_resp __user *uresp)
327 {
328         int err;
329
330         /* if we don't finish qp create make sure queue is valid */
331         skb_queue_head_init(&qp->resp_pkts);
332
333         if (!qp->srq) {
334                 err = rxe_init_rq(qp, init, udata, uresp);
335                 if (err)
336                         return err;
337         }
338
339         rxe_init_task(&qp->recv_task, qp, rxe_receiver);
340
341         qp->resp.opcode         = OPCODE_NONE;
342         qp->resp.msn            = 0;
343
344         return 0;
345 }
346
347 /* called by the create qp verb */
348 int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
349                      struct ib_qp_init_attr *init,
350                      struct rxe_create_qp_resp __user *uresp,
351                      struct ib_pd *ibpd,
352                      struct ib_udata *udata)
353 {
354         int err;
355         struct rxe_cq *rcq = to_rcq(init->recv_cq);
356         struct rxe_cq *scq = to_rcq(init->send_cq);
357         struct rxe_srq *srq = init->srq ? to_rsrq(init->srq) : NULL;
358         unsigned long flags;
359
360         rxe_get(pd);
361         rxe_get(rcq);
362         rxe_get(scq);
363         if (srq)
364                 rxe_get(srq);
365
366         qp->pd = pd;
367         qp->rcq = rcq;
368         qp->scq = scq;
369         qp->srq = srq;
370
371         atomic_inc(&rcq->num_wq);
372         atomic_inc(&scq->num_wq);
373
374         rxe_qp_init_misc(rxe, qp, init);
375
376         err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
377         if (err)
378                 goto err1;
379
380         err = rxe_qp_init_resp(rxe, qp, init, udata, uresp);
381         if (err)
382                 goto err2;
383
384         spin_lock_irqsave(&qp->state_lock, flags);
385         qp->attr.qp_state = IB_QPS_RESET;
386         qp->valid = 1;
387         spin_unlock_irqrestore(&qp->state_lock, flags);
388
389         return 0;
390
391 err2:
392         rxe_queue_cleanup(qp->sq.queue);
393         qp->sq.queue = NULL;
394 err1:
395         atomic_dec(&rcq->num_wq);
396         atomic_dec(&scq->num_wq);
397
398         qp->pd = NULL;
399         qp->rcq = NULL;
400         qp->scq = NULL;
401         qp->srq = NULL;
402
403         if (srq)
404                 rxe_put(srq);
405         rxe_put(scq);
406         rxe_put(rcq);
407         rxe_put(pd);
408
409         return err;
410 }
411
412 /* called by the query qp verb */
413 int rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init)
414 {
415         init->event_handler             = qp->ibqp.event_handler;
416         init->qp_context                = qp->ibqp.qp_context;
417         init->send_cq                   = qp->ibqp.send_cq;
418         init->recv_cq                   = qp->ibqp.recv_cq;
419         init->srq                       = qp->ibqp.srq;
420
421         init->cap.max_send_wr           = qp->sq.max_wr;
422         init->cap.max_send_sge          = qp->sq.max_sge;
423         init->cap.max_inline_data       = qp->sq.max_inline;
424
425         if (!qp->srq) {
426                 init->cap.max_recv_wr           = qp->rq.max_wr;
427                 init->cap.max_recv_sge          = qp->rq.max_sge;
428         }
429
430         init->sq_sig_type               = qp->sq_sig_type;
431
432         init->qp_type                   = qp->ibqp.qp_type;
433         init->port_num                  = 1;
434
435         return 0;
436 }
437
438 int rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp,
439                     struct ib_qp_attr *attr, int mask)
440 {
441         if (mask & IB_QP_PORT) {
442                 if (!rdma_is_port_valid(&rxe->ib_dev, attr->port_num)) {
443                         rxe_dbg_qp(qp, "invalid port %d\n", attr->port_num);
444                         goto err1;
445                 }
446         }
447
448         if (mask & IB_QP_CAP && rxe_qp_chk_cap(rxe, &attr->cap, !!qp->srq))
449                 goto err1;
450
451         if (mask & IB_QP_ACCESS_FLAGS) {
452                 if (!(qp_type(qp) == IB_QPT_RC || qp_type(qp) == IB_QPT_UC))
453                         goto err1;
454                 if (attr->qp_access_flags & ~RXE_ACCESS_SUPPORTED_QP)
455                         goto err1;
456         }
457
458         if (mask & IB_QP_AV && rxe_av_chk_attr(qp, &attr->ah_attr))
459                 goto err1;
460
461         if (mask & IB_QP_ALT_PATH) {
462                 if (rxe_av_chk_attr(qp, &attr->alt_ah_attr))
463                         goto err1;
464                 if (!rdma_is_port_valid(&rxe->ib_dev, attr->alt_port_num))  {
465                         rxe_dbg_qp(qp, "invalid alt port %d\n", attr->alt_port_num);
466                         goto err1;
467                 }
468                 if (attr->alt_timeout > 31) {
469                         rxe_dbg_qp(qp, "invalid alt timeout %d > 31\n",
470                                  attr->alt_timeout);
471                         goto err1;
472                 }
473         }
474
475         if (mask & IB_QP_PATH_MTU) {
476                 struct rxe_port *port = &rxe->port;
477
478                 enum ib_mtu max_mtu = port->attr.max_mtu;
479                 enum ib_mtu mtu = attr->path_mtu;
480
481                 if (mtu > max_mtu) {
482                         rxe_dbg_qp(qp, "invalid mtu (%d) > (%d)\n",
483                                  ib_mtu_enum_to_int(mtu),
484                                  ib_mtu_enum_to_int(max_mtu));
485                         goto err1;
486                 }
487         }
488
489         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
490                 if (attr->max_rd_atomic > rxe->attr.max_qp_rd_atom) {
491                         rxe_dbg_qp(qp, "invalid max_rd_atomic %d > %d\n",
492                                  attr->max_rd_atomic,
493                                  rxe->attr.max_qp_rd_atom);
494                         goto err1;
495                 }
496         }
497
498         if (mask & IB_QP_TIMEOUT) {
499                 if (attr->timeout > 31) {
500                         rxe_dbg_qp(qp, "invalid timeout %d > 31\n",
501                                         attr->timeout);
502                         goto err1;
503                 }
504         }
505
506         return 0;
507
508 err1:
509         return -EINVAL;
510 }
511
512 /* move the qp to the reset state */
513 static void rxe_qp_reset(struct rxe_qp *qp)
514 {
515         /* stop tasks from running */
516         rxe_disable_task(&qp->recv_task);
517         rxe_disable_task(&qp->send_task);
518
519         /* drain work and packet queuesc */
520         rxe_sender(qp);
521         rxe_receiver(qp);
522
523         if (qp->rq.queue)
524                 rxe_queue_reset(qp->rq.queue);
525         if (qp->sq.queue)
526                 rxe_queue_reset(qp->sq.queue);
527
528         /* cleanup attributes */
529         atomic_set(&qp->ssn, 0);
530         qp->req.opcode = -1;
531         qp->req.need_retry = 0;
532         qp->req.wait_for_rnr_timer = 0;
533         qp->req.noack_pkts = 0;
534         qp->resp.msn = 0;
535         qp->resp.opcode = -1;
536         qp->resp.drop_msg = 0;
537         qp->resp.goto_error = 0;
538         qp->resp.sent_psn_nak = 0;
539
540         if (qp->resp.mr) {
541                 rxe_put(qp->resp.mr);
542                 qp->resp.mr = NULL;
543         }
544
545         cleanup_rd_atomic_resources(qp);
546
547         /* reenable tasks */
548         rxe_enable_task(&qp->recv_task);
549         rxe_enable_task(&qp->send_task);
550 }
551
552 /* move the qp to the error state */
553 void rxe_qp_error(struct rxe_qp *qp)
554 {
555         unsigned long flags;
556
557         spin_lock_irqsave(&qp->state_lock, flags);
558         qp->attr.qp_state = IB_QPS_ERR;
559
560         /* drain work and packet queues */
561         rxe_sched_task(&qp->recv_task);
562         rxe_sched_task(&qp->send_task);
563         spin_unlock_irqrestore(&qp->state_lock, flags);
564 }
565
566 static void rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr,
567                        int mask)
568 {
569         unsigned long flags;
570
571         spin_lock_irqsave(&qp->state_lock, flags);
572         qp->attr.sq_draining = 1;
573         rxe_sched_task(&qp->send_task);
574         spin_unlock_irqrestore(&qp->state_lock, flags);
575 }
576
577 /* caller should hold qp->state_lock */
578 static int __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr,
579                             int mask)
580 {
581         enum ib_qp_state cur_state;
582         enum ib_qp_state new_state;
583
584         cur_state = (mask & IB_QP_CUR_STATE) ?
585                                 attr->cur_qp_state : qp->attr.qp_state;
586         new_state = (mask & IB_QP_STATE) ?
587                                 attr->qp_state : cur_state;
588
589         if (!ib_modify_qp_is_ok(cur_state, new_state, qp_type(qp), mask))
590                 return -EINVAL;
591
592         if (mask & IB_QP_STATE && cur_state == IB_QPS_SQD) {
593                 if (qp->attr.sq_draining && new_state != IB_QPS_ERR)
594                         return -EINVAL;
595         }
596
597         return 0;
598 }
599
600 static const char *const qps2str[] = {
601         [IB_QPS_RESET]  = "RESET",
602         [IB_QPS_INIT]   = "INIT",
603         [IB_QPS_RTR]    = "RTR",
604         [IB_QPS_RTS]    = "RTS",
605         [IB_QPS_SQD]    = "SQD",
606         [IB_QPS_SQE]    = "SQE",
607         [IB_QPS_ERR]    = "ERR",
608 };
609
610 /* called by the modify qp verb */
611 int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
612                      struct ib_udata *udata)
613 {
614         int err;
615
616         if (mask & IB_QP_CUR_STATE)
617                 qp->attr.cur_qp_state = attr->qp_state;
618
619         if (mask & IB_QP_STATE) {
620                 unsigned long flags;
621
622                 spin_lock_irqsave(&qp->state_lock, flags);
623                 err = __qp_chk_state(qp, attr, mask);
624                 if (!err) {
625                         qp->attr.qp_state = attr->qp_state;
626                         rxe_dbg_qp(qp, "state -> %s\n",
627                                         qps2str[attr->qp_state]);
628                 }
629                 spin_unlock_irqrestore(&qp->state_lock, flags);
630
631                 if (err)
632                         return err;
633
634                 switch (attr->qp_state) {
635                 case IB_QPS_RESET:
636                         rxe_qp_reset(qp);
637                         break;
638                 case IB_QPS_SQD:
639                         rxe_qp_sqd(qp, attr, mask);
640                         break;
641                 case IB_QPS_ERR:
642                         rxe_qp_error(qp);
643                         break;
644                 default:
645                         break;
646                 }
647         }
648
649         if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
650                 int max_rd_atomic = attr->max_rd_atomic ?
651                         roundup_pow_of_two(attr->max_rd_atomic) : 0;
652
653                 qp->attr.max_rd_atomic = max_rd_atomic;
654                 atomic_set(&qp->req.rd_atomic, max_rd_atomic);
655         }
656
657         if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
658                 int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
659                         roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
660
661                 qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
662
663                 free_rd_atomic_resources(qp);
664
665                 err = alloc_rd_atomic_resources(qp, max_dest_rd_atomic);
666                 if (err)
667                         return err;
668         }
669
670         if (mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
671                 qp->attr.en_sqd_async_notify = attr->en_sqd_async_notify;
672
673         if (mask & IB_QP_ACCESS_FLAGS)
674                 qp->attr.qp_access_flags = attr->qp_access_flags;
675
676         if (mask & IB_QP_PKEY_INDEX)
677                 qp->attr.pkey_index = attr->pkey_index;
678
679         if (mask & IB_QP_PORT)
680                 qp->attr.port_num = attr->port_num;
681
682         if (mask & IB_QP_QKEY)
683                 qp->attr.qkey = attr->qkey;
684
685         if (mask & IB_QP_AV)
686                 rxe_init_av(&attr->ah_attr, &qp->pri_av);
687
688         if (mask & IB_QP_ALT_PATH) {
689                 rxe_init_av(&attr->alt_ah_attr, &qp->alt_av);
690                 qp->attr.alt_port_num = attr->alt_port_num;
691                 qp->attr.alt_pkey_index = attr->alt_pkey_index;
692                 qp->attr.alt_timeout = attr->alt_timeout;
693         }
694
695         if (mask & IB_QP_PATH_MTU) {
696                 qp->attr.path_mtu = attr->path_mtu;
697                 qp->mtu = ib_mtu_enum_to_int(attr->path_mtu);
698         }
699
700         if (mask & IB_QP_TIMEOUT) {
701                 qp->attr.timeout = attr->timeout;
702                 if (attr->timeout == 0) {
703                         qp->qp_timeout_jiffies = 0;
704                 } else {
705                         /* According to the spec, timeout = 4.096 * 2 ^ attr->timeout [us] */
706                         int j = nsecs_to_jiffies(4096ULL << attr->timeout);
707
708                         qp->qp_timeout_jiffies = j ? j : 1;
709                 }
710         }
711
712         if (mask & IB_QP_RETRY_CNT) {
713                 qp->attr.retry_cnt = attr->retry_cnt;
714                 qp->comp.retry_cnt = attr->retry_cnt;
715                 rxe_dbg_qp(qp, "set retry count = %d\n", attr->retry_cnt);
716         }
717
718         if (mask & IB_QP_RNR_RETRY) {
719                 qp->attr.rnr_retry = attr->rnr_retry;
720                 qp->comp.rnr_retry = attr->rnr_retry;
721                 rxe_dbg_qp(qp, "set rnr retry count = %d\n", attr->rnr_retry);
722         }
723
724         if (mask & IB_QP_RQ_PSN) {
725                 qp->attr.rq_psn = (attr->rq_psn & BTH_PSN_MASK);
726                 qp->resp.psn = qp->attr.rq_psn;
727                 rxe_dbg_qp(qp, "set resp psn = 0x%x\n", qp->resp.psn);
728         }
729
730         if (mask & IB_QP_MIN_RNR_TIMER) {
731                 qp->attr.min_rnr_timer = attr->min_rnr_timer;
732                 rxe_dbg_qp(qp, "set min rnr timer = 0x%x\n",
733                          attr->min_rnr_timer);
734         }
735
736         if (mask & IB_QP_SQ_PSN) {
737                 qp->attr.sq_psn = (attr->sq_psn & BTH_PSN_MASK);
738                 qp->req.psn = qp->attr.sq_psn;
739                 qp->comp.psn = qp->attr.sq_psn;
740                 rxe_dbg_qp(qp, "set req psn = 0x%x\n", qp->req.psn);
741         }
742
743         if (mask & IB_QP_PATH_MIG_STATE)
744                 qp->attr.path_mig_state = attr->path_mig_state;
745
746         if (mask & IB_QP_DEST_QPN)
747                 qp->attr.dest_qp_num = attr->dest_qp_num;
748
749         return 0;
750 }
751
752 /* called by the query qp verb */
753 int rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask)
754 {
755         unsigned long flags;
756
757         *attr = qp->attr;
758
759         attr->rq_psn                            = qp->resp.psn;
760         attr->sq_psn                            = qp->req.psn;
761
762         attr->cap.max_send_wr                   = qp->sq.max_wr;
763         attr->cap.max_send_sge                  = qp->sq.max_sge;
764         attr->cap.max_inline_data               = qp->sq.max_inline;
765
766         if (!qp->srq) {
767                 attr->cap.max_recv_wr           = qp->rq.max_wr;
768                 attr->cap.max_recv_sge          = qp->rq.max_sge;
769         }
770
771         rxe_av_to_attr(&qp->pri_av, &attr->ah_attr);
772         rxe_av_to_attr(&qp->alt_av, &attr->alt_ah_attr);
773
774         /* Applications that get this state typically spin on it.
775          * Yield the processor
776          */
777         spin_lock_irqsave(&qp->state_lock, flags);
778         attr->cur_qp_state = qp_state(qp);
779         if (qp->attr.sq_draining) {
780                 spin_unlock_irqrestore(&qp->state_lock, flags);
781                 cond_resched();
782         } else {
783                 spin_unlock_irqrestore(&qp->state_lock, flags);
784         }
785
786         return 0;
787 }
788
789 int rxe_qp_chk_destroy(struct rxe_qp *qp)
790 {
791         /* See IBA o10-2.2.3
792          * An attempt to destroy a QP while attached to a mcast group
793          * will fail immediately.
794          */
795         if (atomic_read(&qp->mcg_num)) {
796                 rxe_dbg_qp(qp, "Attempt to destroy while attached to multicast group\n");
797                 return -EBUSY;
798         }
799
800         return 0;
801 }
802
803 /* called when the last reference to the qp is dropped */
804 static void rxe_qp_do_cleanup(struct work_struct *work)
805 {
806         struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work);
807         unsigned long flags;
808
809         spin_lock_irqsave(&qp->state_lock, flags);
810         qp->valid = 0;
811         spin_unlock_irqrestore(&qp->state_lock, flags);
812         qp->qp_timeout_jiffies = 0;
813
814         if (qp_type(qp) == IB_QPT_RC) {
815                 del_timer_sync(&qp->retrans_timer);
816                 del_timer_sync(&qp->rnr_nak_timer);
817         }
818
819         if (qp->recv_task.func)
820                 rxe_cleanup_task(&qp->recv_task);
821
822         if (qp->send_task.func)
823                 rxe_cleanup_task(&qp->send_task);
824
825         /* flush out any receive wr's or pending requests */
826         rxe_sender(qp);
827         rxe_receiver(qp);
828
829         if (qp->sq.queue)
830                 rxe_queue_cleanup(qp->sq.queue);
831
832         if (qp->srq)
833                 rxe_put(qp->srq);
834
835         if (qp->rq.queue)
836                 rxe_queue_cleanup(qp->rq.queue);
837
838         if (qp->scq) {
839                 atomic_dec(&qp->scq->num_wq);
840                 rxe_put(qp->scq);
841         }
842
843         if (qp->rcq) {
844                 atomic_dec(&qp->rcq->num_wq);
845                 rxe_put(qp->rcq);
846         }
847
848         if (qp->pd)
849                 rxe_put(qp->pd);
850
851         if (qp->resp.mr)
852                 rxe_put(qp->resp.mr);
853
854         free_rd_atomic_resources(qp);
855
856         if (qp->sk) {
857                 if (qp_type(qp) == IB_QPT_RC)
858                         sk_dst_reset(qp->sk->sk);
859
860                 kernel_sock_shutdown(qp->sk, SHUT_RDWR);
861                 sock_release(qp->sk);
862         }
863 }
864
865 /* called when the last reference to the qp is dropped */
866 void rxe_qp_cleanup(struct rxe_pool_elem *elem)
867 {
868         struct rxe_qp *qp = container_of(elem, typeof(*qp), elem);
869
870         execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work);
871 }
This page took 0.078112 seconds and 4 git commands to generate.