]> Git Repo - J-linux.git/blob - drivers/infiniband/hw/erdma/erdma_cq.c
Merge tag 'amd-drm-next-6.5-2023-06-09' of https://gitlab.freedesktop.org/agd5f/linux...
[J-linux.git] / drivers / infiniband / hw / erdma / erdma_cq.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
3 /* Authors: Cheng Xu <[email protected]> */
4 /*          Kai Shen <[email protected]> */
5 /* Copyright (c) 2020-2022, Alibaba Group. */
6
7 #include "erdma_verbs.h"
8
9 static void *get_next_valid_cqe(struct erdma_cq *cq)
10 {
11         __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci,
12                                       cq->depth, CQE_SHIFT);
13         u32 owner = FIELD_GET(ERDMA_CQE_HDR_OWNER_MASK,
14                               be32_to_cpu(READ_ONCE(*cqe)));
15
16         return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL;
17 }
18
19 static void notify_cq(struct erdma_cq *cq, u8 solcitied)
20 {
21         u64 db_data =
22                 FIELD_PREP(ERDMA_CQDB_IDX_MASK, (cq->kern_cq.notify_cnt)) |
23                 FIELD_PREP(ERDMA_CQDB_CQN_MASK, cq->cqn) |
24                 FIELD_PREP(ERDMA_CQDB_ARM_MASK, 1) |
25                 FIELD_PREP(ERDMA_CQDB_SOL_MASK, solcitied) |
26                 FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cq->kern_cq.cmdsn) |
27                 FIELD_PREP(ERDMA_CQDB_CI_MASK, cq->kern_cq.ci);
28
29         *cq->kern_cq.db_record = db_data;
30         writeq(db_data, cq->kern_cq.db);
31 }
32
33 int erdma_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
34 {
35         struct erdma_cq *cq = to_ecq(ibcq);
36         unsigned long irq_flags;
37         int ret = 0;
38
39         spin_lock_irqsave(&cq->kern_cq.lock, irq_flags);
40
41         notify_cq(cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
42
43         if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && get_next_valid_cqe(cq))
44                 ret = 1;
45
46         cq->kern_cq.notify_cnt++;
47
48         spin_unlock_irqrestore(&cq->kern_cq.lock, irq_flags);
49
50         return ret;
51 }
52
53 static const enum ib_wc_opcode wc_mapping_table[ERDMA_NUM_OPCODES] = {
54         [ERDMA_OP_WRITE] = IB_WC_RDMA_WRITE,
55         [ERDMA_OP_READ] = IB_WC_RDMA_READ,
56         [ERDMA_OP_SEND] = IB_WC_SEND,
57         [ERDMA_OP_SEND_WITH_IMM] = IB_WC_SEND,
58         [ERDMA_OP_RECEIVE] = IB_WC_RECV,
59         [ERDMA_OP_RECV_IMM] = IB_WC_RECV_RDMA_WITH_IMM,
60         [ERDMA_OP_RECV_INV] = IB_WC_RECV,
61         [ERDMA_OP_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE,
62         [ERDMA_OP_RSP_SEND_IMM] = IB_WC_RECV,
63         [ERDMA_OP_SEND_WITH_INV] = IB_WC_SEND,
64         [ERDMA_OP_REG_MR] = IB_WC_REG_MR,
65         [ERDMA_OP_LOCAL_INV] = IB_WC_LOCAL_INV,
66         [ERDMA_OP_READ_WITH_INV] = IB_WC_RDMA_READ,
67         [ERDMA_OP_ATOMIC_CAS] = IB_WC_COMP_SWAP,
68         [ERDMA_OP_ATOMIC_FAA] = IB_WC_FETCH_ADD,
69 };
70
71 static const struct {
72         enum erdma_wc_status erdma;
73         enum ib_wc_status base;
74         enum erdma_vendor_err vendor;
75 } map_cqe_status[ERDMA_NUM_WC_STATUS] = {
76         { ERDMA_WC_SUCCESS, IB_WC_SUCCESS, ERDMA_WC_VENDOR_NO_ERR },
77         { ERDMA_WC_GENERAL_ERR, IB_WC_GENERAL_ERR, ERDMA_WC_VENDOR_NO_ERR },
78         { ERDMA_WC_RECV_WQE_FORMAT_ERR, IB_WC_GENERAL_ERR,
79           ERDMA_WC_VENDOR_INVALID_RQE },
80         { ERDMA_WC_RECV_STAG_INVALID_ERR, IB_WC_REM_ACCESS_ERR,
81           ERDMA_WC_VENDOR_RQE_INVALID_STAG },
82         { ERDMA_WC_RECV_ADDR_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
83           ERDMA_WC_VENDOR_RQE_ADDR_VIOLATION },
84         { ERDMA_WC_RECV_RIGHT_VIOLATION_ERR, IB_WC_REM_ACCESS_ERR,
85           ERDMA_WC_VENDOR_RQE_ACCESS_RIGHT_ERR },
86         { ERDMA_WC_RECV_PDID_ERR, IB_WC_REM_ACCESS_ERR,
87           ERDMA_WC_VENDOR_RQE_INVALID_PD },
88         { ERDMA_WC_RECV_WARRPING_ERR, IB_WC_REM_ACCESS_ERR,
89           ERDMA_WC_VENDOR_RQE_WRAP_ERR },
90         { ERDMA_WC_SEND_WQE_FORMAT_ERR, IB_WC_LOC_QP_OP_ERR,
91           ERDMA_WC_VENDOR_INVALID_SQE },
92         { ERDMA_WC_SEND_WQE_ORD_EXCEED, IB_WC_GENERAL_ERR,
93           ERDMA_WC_VENDOR_ZERO_ORD },
94         { ERDMA_WC_SEND_STAG_INVALID_ERR, IB_WC_LOC_ACCESS_ERR,
95           ERDMA_WC_VENDOR_SQE_INVALID_STAG },
96         { ERDMA_WC_SEND_ADDR_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
97           ERDMA_WC_VENDOR_SQE_ADDR_VIOLATION },
98         { ERDMA_WC_SEND_RIGHT_VIOLATION_ERR, IB_WC_LOC_ACCESS_ERR,
99           ERDMA_WC_VENDOR_SQE_ACCESS_ERR },
100         { ERDMA_WC_SEND_PDID_ERR, IB_WC_LOC_ACCESS_ERR,
101           ERDMA_WC_VENDOR_SQE_INVALID_PD },
102         { ERDMA_WC_SEND_WARRPING_ERR, IB_WC_LOC_ACCESS_ERR,
103           ERDMA_WC_VENDOR_SQE_WARP_ERR },
104         { ERDMA_WC_FLUSH_ERR, IB_WC_WR_FLUSH_ERR, ERDMA_WC_VENDOR_NO_ERR },
105         { ERDMA_WC_RETRY_EXC_ERR, IB_WC_RETRY_EXC_ERR, ERDMA_WC_VENDOR_NO_ERR },
106 };
107
108 #define ERDMA_POLLCQ_NO_QP 1
109
110 static int erdma_poll_one_cqe(struct erdma_cq *cq, struct ib_wc *wc)
111 {
112         struct erdma_dev *dev = to_edev(cq->ibcq.device);
113         u8 opcode, syndrome, qtype;
114         struct erdma_kqp *kern_qp;
115         struct erdma_cqe *cqe;
116         struct erdma_qp *qp;
117         u16 wqe_idx, depth;
118         u32 qpn, cqe_hdr;
119         u64 *id_table;
120         u64 *wqe_hdr;
121
122         cqe = get_next_valid_cqe(cq);
123         if (!cqe)
124                 return -EAGAIN;
125
126         cq->kern_cq.ci++;
127
128         /* cqbuf should be ready when we poll */
129         dma_rmb();
130
131         qpn = be32_to_cpu(cqe->qpn);
132         wqe_idx = be32_to_cpu(cqe->qe_idx);
133         cqe_hdr = be32_to_cpu(cqe->hdr);
134
135         qp = find_qp_by_qpn(dev, qpn);
136         if (!qp)
137                 return ERDMA_POLLCQ_NO_QP;
138
139         kern_qp = &qp->kern_qp;
140
141         qtype = FIELD_GET(ERDMA_CQE_HDR_QTYPE_MASK, cqe_hdr);
142         syndrome = FIELD_GET(ERDMA_CQE_HDR_SYNDROME_MASK, cqe_hdr);
143         opcode = FIELD_GET(ERDMA_CQE_HDR_OPCODE_MASK, cqe_hdr);
144
145         if (qtype == ERDMA_CQE_QTYPE_SQ) {
146                 id_table = kern_qp->swr_tbl;
147                 depth = qp->attrs.sq_size;
148                 wqe_hdr = get_queue_entry(qp->kern_qp.sq_buf, wqe_idx,
149                                           qp->attrs.sq_size, SQEBB_SHIFT);
150                 kern_qp->sq_ci =
151                         FIELD_GET(ERDMA_SQE_HDR_WQEBB_CNT_MASK, *wqe_hdr) +
152                         wqe_idx + 1;
153         } else {
154                 id_table = kern_qp->rwr_tbl;
155                 depth = qp->attrs.rq_size;
156         }
157         wc->wr_id = id_table[wqe_idx & (depth - 1)];
158         wc->byte_len = be32_to_cpu(cqe->size);
159
160         wc->wc_flags = 0;
161
162         wc->opcode = wc_mapping_table[opcode];
163         if (opcode == ERDMA_OP_RECV_IMM || opcode == ERDMA_OP_RSP_SEND_IMM) {
164                 wc->ex.imm_data = cpu_to_be32(le32_to_cpu(cqe->imm_data));
165                 wc->wc_flags |= IB_WC_WITH_IMM;
166         } else if (opcode == ERDMA_OP_RECV_INV) {
167                 wc->ex.invalidate_rkey = be32_to_cpu(cqe->inv_rkey);
168                 wc->wc_flags |= IB_WC_WITH_INVALIDATE;
169         }
170
171         if (syndrome >= ERDMA_NUM_WC_STATUS)
172                 syndrome = ERDMA_WC_GENERAL_ERR;
173
174         wc->status = map_cqe_status[syndrome].base;
175         wc->vendor_err = map_cqe_status[syndrome].vendor;
176         wc->qp = &qp->ibqp;
177
178         return 0;
179 }
180
181 int erdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
182 {
183         struct erdma_cq *cq = to_ecq(ibcq);
184         unsigned long flags;
185         int npolled, ret;
186
187         spin_lock_irqsave(&cq->kern_cq.lock, flags);
188
189         for (npolled = 0; npolled < num_entries;) {
190                 ret = erdma_poll_one_cqe(cq, wc + npolled);
191
192                 if (ret == -EAGAIN) /* no received new CQEs. */
193                         break;
194                 else if (ret) /* ignore invalid CQEs. */
195                         continue;
196
197                 npolled++;
198         }
199
200         spin_unlock_irqrestore(&cq->kern_cq.lock, flags);
201
202         return npolled;
203 }
This page took 0.041382 seconds and 4 git commands to generate.