1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
7 #include <linux/skbuff.h>
11 #include "rxe_queue.h"
27 RESPST_DUPLICATE_REQUEST,
28 RESPST_ERR_MALFORMED_WQE,
29 RESPST_ERR_UNSUPPORTED_OPCODE,
30 RESPST_ERR_MISALIGNED_ATOMIC,
31 RESPST_ERR_PSN_OUT_OF_SEQ,
32 RESPST_ERR_MISSING_OPCODE_FIRST,
33 RESPST_ERR_MISSING_OPCODE_LAST_C,
34 RESPST_ERR_MISSING_OPCODE_LAST_D1E,
35 RESPST_ERR_TOO_MANY_RDMA_ATM_REQ,
37 RESPST_ERR_RKEY_VIOLATION,
39 RESPST_ERR_CQ_OVERFLOW,
46 static char *resp_state_name[] = {
47 [RESPST_NONE] = "NONE",
48 [RESPST_GET_REQ] = "GET_REQ",
49 [RESPST_CHK_PSN] = "CHK_PSN",
50 [RESPST_CHK_OP_SEQ] = "CHK_OP_SEQ",
51 [RESPST_CHK_OP_VALID] = "CHK_OP_VALID",
52 [RESPST_CHK_RESOURCE] = "CHK_RESOURCE",
53 [RESPST_CHK_LENGTH] = "CHK_LENGTH",
54 [RESPST_CHK_RKEY] = "CHK_RKEY",
55 [RESPST_EXECUTE] = "EXECUTE",
56 [RESPST_READ_REPLY] = "READ_REPLY",
57 [RESPST_COMPLETE] = "COMPLETE",
58 [RESPST_ACKNOWLEDGE] = "ACKNOWLEDGE",
59 [RESPST_CLEANUP] = "CLEANUP",
60 [RESPST_DUPLICATE_REQUEST] = "DUPLICATE_REQUEST",
61 [RESPST_ERR_MALFORMED_WQE] = "ERR_MALFORMED_WQE",
62 [RESPST_ERR_UNSUPPORTED_OPCODE] = "ERR_UNSUPPORTED_OPCODE",
63 [RESPST_ERR_MISALIGNED_ATOMIC] = "ERR_MISALIGNED_ATOMIC",
64 [RESPST_ERR_PSN_OUT_OF_SEQ] = "ERR_PSN_OUT_OF_SEQ",
65 [RESPST_ERR_MISSING_OPCODE_FIRST] = "ERR_MISSING_OPCODE_FIRST",
66 [RESPST_ERR_MISSING_OPCODE_LAST_C] = "ERR_MISSING_OPCODE_LAST_C",
67 [RESPST_ERR_MISSING_OPCODE_LAST_D1E] = "ERR_MISSING_OPCODE_LAST_D1E",
68 [RESPST_ERR_TOO_MANY_RDMA_ATM_REQ] = "ERR_TOO_MANY_RDMA_ATM_REQ",
69 [RESPST_ERR_RNR] = "ERR_RNR",
70 [RESPST_ERR_RKEY_VIOLATION] = "ERR_RKEY_VIOLATION",
71 [RESPST_ERR_LENGTH] = "ERR_LENGTH",
72 [RESPST_ERR_CQ_OVERFLOW] = "ERR_CQ_OVERFLOW",
73 [RESPST_ERROR] = "ERROR",
74 [RESPST_RESET] = "RESET",
75 [RESPST_DONE] = "DONE",
76 [RESPST_EXIT] = "EXIT",
79 /* rxe_recv calls here to add a request packet to the input queue */
80 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
83 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
85 skb_queue_tail(&qp->req_pkts, skb);
87 must_sched = (pkt->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST) ||
88 (skb_queue_len(&qp->req_pkts) > 1);
90 rxe_run_task(&qp->resp.task, must_sched);
93 static inline enum resp_states get_req(struct rxe_qp *qp,
94 struct rxe_pkt_info **pkt_p)
98 if (qp->resp.state == QP_STATE_ERROR) {
99 while ((skb = skb_dequeue(&qp->req_pkts))) {
104 /* go drain recv wr queue */
105 return RESPST_CHK_RESOURCE;
108 skb = skb_peek(&qp->req_pkts);
112 *pkt_p = SKB_TO_PKT(skb);
114 return (qp->resp.res) ? RESPST_READ_REPLY : RESPST_CHK_PSN;
117 static enum resp_states check_psn(struct rxe_qp *qp,
118 struct rxe_pkt_info *pkt)
120 int diff = psn_compare(pkt->psn, qp->resp.psn);
121 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
123 switch (qp_type(qp)) {
126 if (qp->resp.sent_psn_nak)
127 return RESPST_CLEANUP;
129 qp->resp.sent_psn_nak = 1;
130 rxe_counter_inc(rxe, RXE_CNT_OUT_OF_SEQ_REQ);
131 return RESPST_ERR_PSN_OUT_OF_SEQ;
133 } else if (diff < 0) {
134 rxe_counter_inc(rxe, RXE_CNT_DUP_REQ);
135 return RESPST_DUPLICATE_REQUEST;
138 if (qp->resp.sent_psn_nak)
139 qp->resp.sent_psn_nak = 0;
144 if (qp->resp.drop_msg || diff != 0) {
145 if (pkt->mask & RXE_START_MASK) {
146 qp->resp.drop_msg = 0;
147 return RESPST_CHK_OP_SEQ;
150 qp->resp.drop_msg = 1;
151 return RESPST_CLEANUP;
158 return RESPST_CHK_OP_SEQ;
161 static enum resp_states check_op_seq(struct rxe_qp *qp,
162 struct rxe_pkt_info *pkt)
164 switch (qp_type(qp)) {
166 switch (qp->resp.opcode) {
167 case IB_OPCODE_RC_SEND_FIRST:
168 case IB_OPCODE_RC_SEND_MIDDLE:
169 switch (pkt->opcode) {
170 case IB_OPCODE_RC_SEND_MIDDLE:
171 case IB_OPCODE_RC_SEND_LAST:
172 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
173 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
174 return RESPST_CHK_OP_VALID;
176 return RESPST_ERR_MISSING_OPCODE_LAST_C;
179 case IB_OPCODE_RC_RDMA_WRITE_FIRST:
180 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
181 switch (pkt->opcode) {
182 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
183 case IB_OPCODE_RC_RDMA_WRITE_LAST:
184 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
185 return RESPST_CHK_OP_VALID;
187 return RESPST_ERR_MISSING_OPCODE_LAST_C;
191 switch (pkt->opcode) {
192 case IB_OPCODE_RC_SEND_MIDDLE:
193 case IB_OPCODE_RC_SEND_LAST:
194 case IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE:
195 case IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE:
196 case IB_OPCODE_RC_RDMA_WRITE_MIDDLE:
197 case IB_OPCODE_RC_RDMA_WRITE_LAST:
198 case IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
199 return RESPST_ERR_MISSING_OPCODE_FIRST;
201 return RESPST_CHK_OP_VALID;
207 switch (qp->resp.opcode) {
208 case IB_OPCODE_UC_SEND_FIRST:
209 case IB_OPCODE_UC_SEND_MIDDLE:
210 switch (pkt->opcode) {
211 case IB_OPCODE_UC_SEND_MIDDLE:
212 case IB_OPCODE_UC_SEND_LAST:
213 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
214 return RESPST_CHK_OP_VALID;
216 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
219 case IB_OPCODE_UC_RDMA_WRITE_FIRST:
220 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
221 switch (pkt->opcode) {
222 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
223 case IB_OPCODE_UC_RDMA_WRITE_LAST:
224 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
225 return RESPST_CHK_OP_VALID;
227 return RESPST_ERR_MISSING_OPCODE_LAST_D1E;
231 switch (pkt->opcode) {
232 case IB_OPCODE_UC_SEND_MIDDLE:
233 case IB_OPCODE_UC_SEND_LAST:
234 case IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE:
235 case IB_OPCODE_UC_RDMA_WRITE_MIDDLE:
236 case IB_OPCODE_UC_RDMA_WRITE_LAST:
237 case IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE:
238 qp->resp.drop_msg = 1;
239 return RESPST_CLEANUP;
241 return RESPST_CHK_OP_VALID;
247 return RESPST_CHK_OP_VALID;
251 static enum resp_states check_op_valid(struct rxe_qp *qp,
252 struct rxe_pkt_info *pkt)
254 switch (qp_type(qp)) {
256 if (((pkt->mask & RXE_READ_MASK) &&
257 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_READ)) ||
258 ((pkt->mask & RXE_WRITE_MASK) &&
259 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) ||
260 ((pkt->mask & RXE_ATOMIC_MASK) &&
261 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) {
262 return RESPST_ERR_UNSUPPORTED_OPCODE;
268 if ((pkt->mask & RXE_WRITE_MASK) &&
269 !(qp->attr.qp_access_flags & IB_ACCESS_REMOTE_WRITE)) {
270 qp->resp.drop_msg = 1;
271 return RESPST_CLEANUP;
286 return RESPST_CHK_RESOURCE;
289 static enum resp_states get_srq_wqe(struct rxe_qp *qp)
291 struct rxe_srq *srq = qp->srq;
292 struct rxe_queue *q = srq->rq.queue;
293 struct rxe_recv_wqe *wqe;
297 return RESPST_ERR_RNR;
299 spin_lock_bh(&srq->rq.consumer_lock);
303 spin_unlock_bh(&srq->rq.consumer_lock);
304 return RESPST_ERR_RNR;
307 /* note kernel and user space recv wqes have same size */
308 memcpy(&qp->resp.srq_wqe, wqe, sizeof(qp->resp.srq_wqe));
310 qp->resp.wqe = &qp->resp.srq_wqe.wqe;
313 if (srq->limit && srq->ibsrq.event_handler &&
314 (queue_count(q) < srq->limit)) {
319 spin_unlock_bh(&srq->rq.consumer_lock);
320 return RESPST_CHK_LENGTH;
323 spin_unlock_bh(&srq->rq.consumer_lock);
324 ev.device = qp->ibqp.device;
325 ev.element.srq = qp->ibqp.srq;
326 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
327 srq->ibsrq.event_handler(&ev, srq->ibsrq.srq_context);
328 return RESPST_CHK_LENGTH;
331 static enum resp_states check_resource(struct rxe_qp *qp,
332 struct rxe_pkt_info *pkt)
334 struct rxe_srq *srq = qp->srq;
336 if (qp->resp.state == QP_STATE_ERROR) {
338 qp->resp.status = IB_WC_WR_FLUSH_ERR;
339 return RESPST_COMPLETE;
341 qp->resp.wqe = queue_head(qp->rq.queue);
343 qp->resp.status = IB_WC_WR_FLUSH_ERR;
344 return RESPST_COMPLETE;
353 if (pkt->mask & RXE_READ_OR_ATOMIC) {
354 /* it is the requesters job to not send
355 * too many read/atomic ops, we just
356 * recycle the responder resource queue
358 if (likely(qp->attr.max_dest_rd_atomic > 0))
359 return RESPST_CHK_LENGTH;
361 return RESPST_ERR_TOO_MANY_RDMA_ATM_REQ;
364 if (pkt->mask & RXE_RWR_MASK) {
366 return get_srq_wqe(qp);
368 qp->resp.wqe = queue_head(qp->rq.queue);
369 return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
372 return RESPST_CHK_LENGTH;
375 static enum resp_states check_length(struct rxe_qp *qp,
376 struct rxe_pkt_info *pkt)
378 switch (qp_type(qp)) {
380 return RESPST_CHK_RKEY;
383 return RESPST_CHK_RKEY;
386 return RESPST_CHK_RKEY;
390 static enum resp_states check_rkey(struct rxe_qp *qp,
391 struct rxe_pkt_info *pkt)
393 struct rxe_mem *mem = NULL;
399 enum resp_states state;
402 if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
403 if (pkt->mask & RXE_RETH_MASK) {
404 qp->resp.va = reth_va(pkt);
405 qp->resp.rkey = reth_rkey(pkt);
406 qp->resp.resid = reth_len(pkt);
407 qp->resp.length = reth_len(pkt);
409 access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
410 : IB_ACCESS_REMOTE_WRITE;
411 } else if (pkt->mask & RXE_ATOMIC_MASK) {
412 qp->resp.va = atmeth_va(pkt);
413 qp->resp.rkey = atmeth_rkey(pkt);
414 qp->resp.resid = sizeof(u64);
415 access = IB_ACCESS_REMOTE_ATOMIC;
417 return RESPST_EXECUTE;
420 /* A zero-byte op is not required to set an addr or rkey. */
421 if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
422 (pkt->mask & RXE_RETH_MASK) &&
423 reth_len(pkt) == 0) {
424 return RESPST_EXECUTE;
428 rkey = qp->resp.rkey;
429 resid = qp->resp.resid;
430 pktlen = payload_size(pkt);
432 mem = lookup_mem(qp->pd, access, rkey, lookup_remote);
434 state = RESPST_ERR_RKEY_VIOLATION;
438 if (unlikely(mem->state == RXE_MEM_STATE_FREE)) {
439 state = RESPST_ERR_RKEY_VIOLATION;
443 if (mem_check_range(mem, va, resid)) {
444 state = RESPST_ERR_RKEY_VIOLATION;
448 if (pkt->mask & RXE_WRITE_MASK) {
450 if (pktlen != mtu || bth_pad(pkt)) {
451 state = RESPST_ERR_LENGTH;
455 if (pktlen != resid) {
456 state = RESPST_ERR_LENGTH;
459 if ((bth_pad(pkt) != (0x3 & (-resid)))) {
460 /* This case may not be exactly that
461 * but nothing else fits.
463 state = RESPST_ERR_LENGTH;
469 WARN_ON_ONCE(qp->resp.mr);
472 return RESPST_EXECUTE;
480 static enum resp_states send_data_in(struct rxe_qp *qp, void *data_addr,
485 err = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE, &qp->resp.wqe->dma,
486 data_addr, data_len, to_mem_obj, NULL);
488 return (err == -ENOSPC) ? RESPST_ERR_LENGTH
489 : RESPST_ERR_MALFORMED_WQE;
494 static enum resp_states write_data_in(struct rxe_qp *qp,
495 struct rxe_pkt_info *pkt)
497 enum resp_states rc = RESPST_NONE;
499 int data_len = payload_size(pkt);
501 err = rxe_mem_copy(qp->resp.mr, qp->resp.va, payload_addr(pkt),
502 data_len, to_mem_obj, NULL);
504 rc = RESPST_ERR_RKEY_VIOLATION;
508 qp->resp.va += data_len;
509 qp->resp.resid -= data_len;
515 /* Guarantee atomicity of atomic operations at the machine level. */
516 static DEFINE_SPINLOCK(atomic_ops_lock);
518 static enum resp_states process_atomic(struct rxe_qp *qp,
519 struct rxe_pkt_info *pkt)
521 u64 iova = atmeth_va(pkt);
523 enum resp_states ret;
524 struct rxe_mem *mr = qp->resp.mr;
526 if (mr->state != RXE_MEM_STATE_VALID) {
527 ret = RESPST_ERR_RKEY_VIOLATION;
531 vaddr = iova_to_vaddr(mr, iova, sizeof(u64));
533 /* check vaddr is 8 bytes aligned. */
534 if (!vaddr || (uintptr_t)vaddr & 7) {
535 ret = RESPST_ERR_MISALIGNED_ATOMIC;
539 spin_lock_bh(&atomic_ops_lock);
541 qp->resp.atomic_orig = *vaddr;
543 if (pkt->opcode == IB_OPCODE_RC_COMPARE_SWAP ||
544 pkt->opcode == IB_OPCODE_RD_COMPARE_SWAP) {
545 if (*vaddr == atmeth_comp(pkt))
546 *vaddr = atmeth_swap_add(pkt);
548 *vaddr += atmeth_swap_add(pkt);
551 spin_unlock_bh(&atomic_ops_lock);
558 static struct sk_buff *prepare_ack_packet(struct rxe_qp *qp,
559 struct rxe_pkt_info *pkt,
560 struct rxe_pkt_info *ack,
567 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
578 pad = (-payload) & 0x3;
579 paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
581 skb = rxe_init_packet(rxe, &qp->pri_av, paylen, ack);
586 ack->opcode = opcode;
587 ack->mask = rxe_opcode[opcode].mask;
588 ack->offset = pkt->offset;
589 ack->paylen = paylen;
591 /* fill in bth using the request packet headers */
592 memcpy(ack->hdr, pkt->hdr, pkt->offset + RXE_BTH_BYTES);
594 bth_set_opcode(ack, opcode);
595 bth_set_qpn(ack, qp->attr.dest_qp_num);
596 bth_set_pad(ack, pad);
598 bth_set_psn(ack, psn);
602 if (ack->mask & RXE_AETH_MASK) {
603 aeth_set_syn(ack, syndrome);
604 aeth_set_msn(ack, qp->resp.msn);
607 if (ack->mask & RXE_ATMACK_MASK)
608 atmack_set_orig(ack, qp->resp.atomic_orig);
610 err = rxe_prepare(ack, skb, &crc);
617 /* CRC computation will be continued by the caller */
620 p = payload_addr(ack) + payload + bth_pad(ack);
627 /* RDMA read response. If res is not NULL, then we have a current RDMA request
628 * being processed or replayed.
630 static enum resp_states read_reply(struct rxe_qp *qp,
631 struct rxe_pkt_info *req_pkt)
633 struct rxe_pkt_info ack_pkt;
636 enum resp_states state;
640 struct resp_res *res = qp->resp.res;
645 /* This is the first time we process that request. Get a
648 res = &qp->resp.resources[qp->resp.res_head];
650 free_rd_atomic_resource(qp, res);
651 rxe_advance_resp_resource(qp);
653 res->type = RXE_READ_MASK;
656 res->read.va = qp->resp.va;
657 res->read.va_org = qp->resp.va;
659 res->first_psn = req_pkt->psn;
661 if (reth_len(req_pkt)) {
662 res->last_psn = (req_pkt->psn +
663 (reth_len(req_pkt) + mtu - 1) /
664 mtu - 1) & BTH_PSN_MASK;
666 res->last_psn = res->first_psn;
668 res->cur_psn = req_pkt->psn;
670 res->read.resid = qp->resp.resid;
671 res->read.length = qp->resp.resid;
672 res->read.rkey = qp->resp.rkey;
674 /* note res inherits the reference to mr from qp */
675 res->read.mr = qp->resp.mr;
679 res->state = rdatm_res_state_new;
682 if (res->state == rdatm_res_state_new) {
683 if (res->read.resid <= mtu)
684 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY;
686 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST;
688 if (res->read.resid > mtu)
689 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE;
691 opcode = IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST;
694 res->state = rdatm_res_state_next;
696 payload = min_t(int, res->read.resid, mtu);
698 skb = prepare_ack_packet(qp, req_pkt, &ack_pkt, opcode, payload,
699 res->cur_psn, AETH_ACK_UNLIMITED, &icrc);
701 return RESPST_ERR_RNR;
703 err = rxe_mem_copy(res->read.mr, res->read.va, payload_addr(&ack_pkt),
704 payload, from_mem_obj, &icrc);
706 pr_err("Failed copying memory\n");
708 if (bth_pad(&ack_pkt)) {
709 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
710 u8 *pad = payload_addr(&ack_pkt) + payload;
712 memset(pad, 0, bth_pad(&ack_pkt));
713 icrc = rxe_crc32(rxe, icrc, pad, bth_pad(&ack_pkt));
715 p = payload_addr(&ack_pkt) + payload + bth_pad(&ack_pkt);
718 err = rxe_xmit_packet(qp, &ack_pkt, skb);
720 pr_err("Failed sending RDMA reply.\n");
721 return RESPST_ERR_RNR;
724 res->read.va += payload;
725 res->read.resid -= payload;
726 res->cur_psn = (res->cur_psn + 1) & BTH_PSN_MASK;
728 if (res->read.resid > 0) {
733 qp->resp.opcode = -1;
734 if (psn_compare(res->cur_psn, qp->resp.psn) >= 0)
735 qp->resp.psn = res->cur_psn;
736 state = RESPST_CLEANUP;
742 static void build_rdma_network_hdr(union rdma_network_hdr *hdr,
743 struct rxe_pkt_info *pkt)
745 struct sk_buff *skb = PKT_TO_SKB(pkt);
747 memset(hdr, 0, sizeof(*hdr));
748 if (skb->protocol == htons(ETH_P_IP))
749 memcpy(&hdr->roce4grh, ip_hdr(skb), sizeof(hdr->roce4grh));
750 else if (skb->protocol == htons(ETH_P_IPV6))
751 memcpy(&hdr->ibgrh, ipv6_hdr(skb), sizeof(hdr->ibgrh));
754 /* Executes a new request. A retried request never reach that function (send
755 * and writes are discarded, and reads and atomics are retried elsewhere.
757 static enum resp_states execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt)
759 enum resp_states err;
761 if (pkt->mask & RXE_SEND_MASK) {
762 if (qp_type(qp) == IB_QPT_UD ||
763 qp_type(qp) == IB_QPT_SMI ||
764 qp_type(qp) == IB_QPT_GSI) {
765 union rdma_network_hdr hdr;
767 build_rdma_network_hdr(&hdr, pkt);
769 err = send_data_in(qp, &hdr, sizeof(hdr));
773 err = send_data_in(qp, payload_addr(pkt), payload_size(pkt));
776 } else if (pkt->mask & RXE_WRITE_MASK) {
777 err = write_data_in(qp, pkt);
780 } else if (pkt->mask & RXE_READ_MASK) {
781 /* For RDMA Read we can increment the msn now. See C9-148. */
783 return RESPST_READ_REPLY;
784 } else if (pkt->mask & RXE_ATOMIC_MASK) {
785 err = process_atomic(qp, pkt);
793 /* next expected psn, read handles this separately */
794 qp->resp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
795 qp->resp.ack_psn = qp->resp.psn;
797 qp->resp.opcode = pkt->opcode;
798 qp->resp.status = IB_WC_SUCCESS;
800 if (pkt->mask & RXE_COMP_MASK) {
801 /* We successfully processed this new request. */
803 return RESPST_COMPLETE;
804 } else if (qp_type(qp) == IB_QPT_RC)
805 return RESPST_ACKNOWLEDGE;
807 return RESPST_CLEANUP;
810 static enum resp_states do_complete(struct rxe_qp *qp,
811 struct rxe_pkt_info *pkt)
814 struct ib_wc *wc = &cqe.ibwc;
815 struct ib_uverbs_wc *uwc = &cqe.uibwc;
816 struct rxe_recv_wqe *wqe = qp->resp.wqe;
817 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
820 return RESPST_CLEANUP;
822 memset(&cqe, 0, sizeof(cqe));
824 if (qp->rcq->is_user) {
825 uwc->status = qp->resp.status;
826 uwc->qp_num = qp->ibqp.qp_num;
827 uwc->wr_id = wqe->wr_id;
829 wc->status = qp->resp.status;
831 wc->wr_id = wqe->wr_id;
834 if (wc->status == IB_WC_SUCCESS) {
835 rxe_counter_inc(rxe, RXE_CNT_RDMA_RECV);
836 wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
837 pkt->mask & RXE_WRITE_MASK) ?
838 IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
840 wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
841 pkt->mask & RXE_WRITE_MASK) ?
842 qp->resp.length : wqe->dma.length - wqe->dma.resid;
844 /* fields after byte_len are different between kernel and user
847 if (qp->rcq->is_user) {
848 uwc->wc_flags = IB_WC_GRH;
850 if (pkt->mask & RXE_IMMDT_MASK) {
851 uwc->wc_flags |= IB_WC_WITH_IMM;
852 uwc->ex.imm_data = immdt_imm(pkt);
855 if (pkt->mask & RXE_IETH_MASK) {
856 uwc->wc_flags |= IB_WC_WITH_INVALIDATE;
857 uwc->ex.invalidate_rkey = ieth_rkey(pkt);
860 uwc->qp_num = qp->ibqp.qp_num;
862 if (pkt->mask & RXE_DETH_MASK)
863 uwc->src_qp = deth_sqp(pkt);
865 uwc->port_num = qp->attr.port_num;
867 struct sk_buff *skb = PKT_TO_SKB(pkt);
869 wc->wc_flags = IB_WC_GRH | IB_WC_WITH_NETWORK_HDR_TYPE;
870 if (skb->protocol == htons(ETH_P_IP))
871 wc->network_hdr_type = RDMA_NETWORK_IPV4;
873 wc->network_hdr_type = RDMA_NETWORK_IPV6;
875 if (pkt->mask & RXE_IMMDT_MASK) {
876 wc->wc_flags |= IB_WC_WITH_IMM;
877 wc->ex.imm_data = immdt_imm(pkt);
880 if (pkt->mask & RXE_IETH_MASK) {
883 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
884 wc->ex.invalidate_rkey = ieth_rkey(pkt);
886 rmr = rxe_pool_get_index(&rxe->mr_pool,
887 wc->ex.invalidate_rkey >> 8);
888 if (unlikely(!rmr)) {
889 pr_err("Bad rkey %#x invalidation\n",
890 wc->ex.invalidate_rkey);
893 rmr->state = RXE_MEM_STATE_FREE;
899 if (pkt->mask & RXE_DETH_MASK)
900 wc->src_qp = deth_sqp(pkt);
902 wc->port_num = qp->attr.port_num;
906 /* have copy for srq and reference for !srq */
908 advance_consumer(qp->rq.queue);
912 if (rxe_cq_post(qp->rcq, &cqe, pkt ? bth_se(pkt) : 1))
913 return RESPST_ERR_CQ_OVERFLOW;
915 if (qp->resp.state == QP_STATE_ERROR)
916 return RESPST_CHK_RESOURCE;
920 else if (qp_type(qp) == IB_QPT_RC)
921 return RESPST_ACKNOWLEDGE;
923 return RESPST_CLEANUP;
926 static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
927 u8 syndrome, u32 psn)
930 struct rxe_pkt_info ack_pkt;
933 skb = prepare_ack_packet(qp, pkt, &ack_pkt, IB_OPCODE_RC_ACKNOWLEDGE,
934 0, psn, syndrome, NULL);
940 err = rxe_xmit_packet(qp, &ack_pkt, skb);
942 pr_err_ratelimited("Failed sending ack\n");
948 static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
952 struct rxe_pkt_info ack_pkt;
954 struct resp_res *res;
956 skb = prepare_ack_packet(qp, pkt, &ack_pkt,
957 IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE, 0, pkt->psn,
966 res = &qp->resp.resources[qp->resp.res_head];
967 free_rd_atomic_resource(qp, res);
968 rxe_advance_resp_resource(qp);
970 memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt));
971 memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0,
972 sizeof(skb->cb) - sizeof(ack_pkt));
975 res->type = RXE_ATOMIC_MASK;
976 res->atomic.skb = skb;
977 res->first_psn = ack_pkt.psn;
978 res->last_psn = ack_pkt.psn;
979 res->cur_psn = ack_pkt.psn;
981 rc = rxe_xmit_packet(qp, &ack_pkt, skb);
983 pr_err_ratelimited("Failed sending ack\n");
990 static enum resp_states acknowledge(struct rxe_qp *qp,
991 struct rxe_pkt_info *pkt)
993 if (qp_type(qp) != IB_QPT_RC)
994 return RESPST_CLEANUP;
996 if (qp->resp.aeth_syndrome != AETH_ACK_UNLIMITED)
997 send_ack(qp, pkt, qp->resp.aeth_syndrome, pkt->psn);
998 else if (pkt->mask & RXE_ATOMIC_MASK)
999 send_atomic_ack(qp, pkt, AETH_ACK_UNLIMITED);
1000 else if (bth_ack(pkt))
1001 send_ack(qp, pkt, AETH_ACK_UNLIMITED, pkt->psn);
1003 return RESPST_CLEANUP;
1006 static enum resp_states cleanup(struct rxe_qp *qp,
1007 struct rxe_pkt_info *pkt)
1009 struct sk_buff *skb;
1012 skb = skb_dequeue(&qp->req_pkts);
1018 rxe_drop_ref(qp->resp.mr);
1025 static struct resp_res *find_resource(struct rxe_qp *qp, u32 psn)
1029 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) {
1030 struct resp_res *res = &qp->resp.resources[i];
1035 if (psn_compare(psn, res->first_psn) >= 0 &&
1036 psn_compare(psn, res->last_psn) <= 0) {
1044 static enum resp_states duplicate_request(struct rxe_qp *qp,
1045 struct rxe_pkt_info *pkt)
1047 enum resp_states rc;
1048 u32 prev_psn = (qp->resp.ack_psn - 1) & BTH_PSN_MASK;
1050 if (pkt->mask & RXE_SEND_MASK ||
1051 pkt->mask & RXE_WRITE_MASK) {
1052 /* SEND. Ack again and cleanup. C9-105. */
1054 send_ack(qp, pkt, AETH_ACK_UNLIMITED, prev_psn);
1055 rc = RESPST_CLEANUP;
1057 } else if (pkt->mask & RXE_READ_MASK) {
1058 struct resp_res *res;
1060 res = find_resource(qp, pkt->psn);
1062 /* Resource not found. Class D error. Drop the
1065 rc = RESPST_CLEANUP;
1068 /* Ensure this new request is the same as the previous
1069 * one or a subset of it.
1071 u64 iova = reth_va(pkt);
1072 u32 resid = reth_len(pkt);
1074 if (iova < res->read.va_org ||
1075 resid > res->read.length ||
1076 (iova + resid) > (res->read.va_org +
1077 res->read.length)) {
1078 rc = RESPST_CLEANUP;
1082 if (reth_rkey(pkt) != res->read.rkey) {
1083 rc = RESPST_CLEANUP;
1087 res->cur_psn = pkt->psn;
1088 res->state = (pkt->psn == res->first_psn) ?
1089 rdatm_res_state_new :
1090 rdatm_res_state_replay;
1093 /* Reset the resource, except length. */
1094 res->read.va_org = iova;
1095 res->read.va = iova;
1096 res->read.resid = resid;
1098 /* Replay the RDMA read reply. */
1100 rc = RESPST_READ_REPLY;
1104 struct resp_res *res;
1106 /* Find the operation in our list of responder resources. */
1107 res = find_resource(qp, pkt->psn);
1109 skb_get(res->atomic.skb);
1110 /* Resend the result. */
1111 rc = rxe_xmit_packet(qp, pkt, res->atomic.skb);
1113 pr_err("Failed resending result. This flow is not handled - skb ignored\n");
1114 rc = RESPST_CLEANUP;
1119 /* Resource not found. Class D error. Drop the request. */
1120 rc = RESPST_CLEANUP;
1127 /* Process a class A or C. Both are treated the same in this implementation. */
1128 static void do_class_ac_error(struct rxe_qp *qp, u8 syndrome,
1129 enum ib_wc_status status)
1131 qp->resp.aeth_syndrome = syndrome;
1132 qp->resp.status = status;
1134 /* indicate that we should go through the ERROR state */
1135 qp->resp.goto_error = 1;
1138 static enum resp_states do_class_d1e_error(struct rxe_qp *qp)
1143 qp->resp.drop_msg = 1;
1145 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1146 return RESPST_COMPLETE;
1148 return RESPST_CLEANUP;
1151 /* Class D1. This packet may be the start of a
1152 * new message and could be valid. The previous
1153 * message is invalid and ignored. reset the
1154 * recv wr to its original state
1157 qp->resp.wqe->dma.resid = qp->resp.wqe->dma.length;
1158 qp->resp.wqe->dma.cur_sge = 0;
1159 qp->resp.wqe->dma.sge_offset = 0;
1160 qp->resp.opcode = -1;
1164 rxe_drop_ref(qp->resp.mr);
1168 return RESPST_CLEANUP;
1172 static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
1174 struct sk_buff *skb;
1176 while ((skb = skb_dequeue(&qp->req_pkts))) {
1184 while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue))
1185 advance_consumer(qp->rq.queue);
1188 int rxe_responder(void *arg)
1190 struct rxe_qp *qp = (struct rxe_qp *)arg;
1191 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
1192 enum resp_states state;
1193 struct rxe_pkt_info *pkt = NULL;
1198 qp->resp.aeth_syndrome = AETH_ACK_UNLIMITED;
1205 switch (qp->resp.state) {
1206 case QP_STATE_RESET:
1207 state = RESPST_RESET;
1211 state = RESPST_GET_REQ;
1216 pr_debug("qp#%d state = %s\n", qp_num(qp),
1217 resp_state_name[state]);
1219 case RESPST_GET_REQ:
1220 state = get_req(qp, &pkt);
1222 case RESPST_CHK_PSN:
1223 state = check_psn(qp, pkt);
1225 case RESPST_CHK_OP_SEQ:
1226 state = check_op_seq(qp, pkt);
1228 case RESPST_CHK_OP_VALID:
1229 state = check_op_valid(qp, pkt);
1231 case RESPST_CHK_RESOURCE:
1232 state = check_resource(qp, pkt);
1234 case RESPST_CHK_LENGTH:
1235 state = check_length(qp, pkt);
1237 case RESPST_CHK_RKEY:
1238 state = check_rkey(qp, pkt);
1240 case RESPST_EXECUTE:
1241 state = execute(qp, pkt);
1243 case RESPST_COMPLETE:
1244 state = do_complete(qp, pkt);
1246 case RESPST_READ_REPLY:
1247 state = read_reply(qp, pkt);
1249 case RESPST_ACKNOWLEDGE:
1250 state = acknowledge(qp, pkt);
1252 case RESPST_CLEANUP:
1253 state = cleanup(qp, pkt);
1255 case RESPST_DUPLICATE_REQUEST:
1256 state = duplicate_request(qp, pkt);
1258 case RESPST_ERR_PSN_OUT_OF_SEQ:
1259 /* RC only - Class B. Drop packet. */
1260 send_ack(qp, pkt, AETH_NAK_PSN_SEQ_ERROR, qp->resp.psn);
1261 state = RESPST_CLEANUP;
1264 case RESPST_ERR_TOO_MANY_RDMA_ATM_REQ:
1265 case RESPST_ERR_MISSING_OPCODE_FIRST:
1266 case RESPST_ERR_MISSING_OPCODE_LAST_C:
1267 case RESPST_ERR_UNSUPPORTED_OPCODE:
1268 case RESPST_ERR_MISALIGNED_ATOMIC:
1269 /* RC Only - Class C. */
1270 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1271 IB_WC_REM_INV_REQ_ERR);
1272 state = RESPST_COMPLETE;
1275 case RESPST_ERR_MISSING_OPCODE_LAST_D1E:
1276 state = do_class_d1e_error(qp);
1278 case RESPST_ERR_RNR:
1279 if (qp_type(qp) == IB_QPT_RC) {
1280 rxe_counter_inc(rxe, RXE_CNT_SND_RNR);
1282 send_ack(qp, pkt, AETH_RNR_NAK |
1284 qp->attr.min_rnr_timer),
1287 /* UD/UC - class D */
1288 qp->resp.drop_msg = 1;
1290 state = RESPST_CLEANUP;
1293 case RESPST_ERR_RKEY_VIOLATION:
1294 if (qp_type(qp) == IB_QPT_RC) {
1296 do_class_ac_error(qp, AETH_NAK_REM_ACC_ERR,
1297 IB_WC_REM_ACCESS_ERR);
1298 state = RESPST_COMPLETE;
1300 qp->resp.drop_msg = 1;
1302 /* UC/SRQ Class D */
1303 qp->resp.status = IB_WC_REM_ACCESS_ERR;
1304 state = RESPST_COMPLETE;
1306 /* UC/non-SRQ Class E. */
1307 state = RESPST_CLEANUP;
1312 case RESPST_ERR_LENGTH:
1313 if (qp_type(qp) == IB_QPT_RC) {
1315 do_class_ac_error(qp, AETH_NAK_INVALID_REQ,
1316 IB_WC_REM_INV_REQ_ERR);
1317 state = RESPST_COMPLETE;
1318 } else if (qp->srq) {
1319 /* UC/UD - class E */
1320 qp->resp.status = IB_WC_REM_INV_REQ_ERR;
1321 state = RESPST_COMPLETE;
1323 /* UC/UD - class D */
1324 qp->resp.drop_msg = 1;
1325 state = RESPST_CLEANUP;
1329 case RESPST_ERR_MALFORMED_WQE:
1331 do_class_ac_error(qp, AETH_NAK_REM_OP_ERR,
1332 IB_WC_LOC_QP_OP_ERR);
1333 state = RESPST_COMPLETE;
1336 case RESPST_ERR_CQ_OVERFLOW:
1338 state = RESPST_ERROR;
1342 if (qp->resp.goto_error) {
1343 state = RESPST_ERROR;
1350 if (qp->resp.goto_error) {
1351 state = RESPST_ERROR;
1358 rxe_drain_req_pkts(qp, false);
1359 qp->resp.wqe = NULL;
1363 qp->resp.goto_error = 0;
1364 pr_warn("qp#%d moved to error state\n", qp_num(qp));