]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
5f08318f UB |
2 | /* |
3 | * Shared Memory Communications over RDMA (SMC-R) and RoCE | |
4 | * | |
5 | * Connection Data Control (CDC) | |
6 | * handles flow control | |
7 | * | |
8 | * Copyright IBM Corp. 2016 | |
9 | * | |
10 | * Author(s): Ursula Braun <[email protected]> | |
11 | */ | |
12 | ||
13 | #include <linux/spinlock.h> | |
14 | ||
15 | #include "smc.h" | |
16 | #include "smc_wr.h" | |
17 | #include "smc_cdc.h" | |
e6727f39 | 18 | #include "smc_tx.h" |
952310cc | 19 | #include "smc_rx.h" |
b38d7324 | 20 | #include "smc_close.h" |
5f08318f UB |
21 | |
22 | /********************************** send *************************************/ | |
23 | ||
24 | struct smc_cdc_tx_pend { | |
25 | struct smc_connection *conn; /* socket connection */ | |
26 | union smc_host_cursor cursor; /* tx sndbuf cursor sent */ | |
27 | union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ | |
28 | u16 ctrl_seq; /* conn. tx sequence # */ | |
29 | }; | |
30 | ||
31 | /* handler for send/transmission completion of a CDC msg */ | |
32 | static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, | |
33 | struct smc_link *link, | |
34 | enum ib_wc_status wc_status) | |
35 | { | |
36 | struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd; | |
bac6de7b | 37 | struct smc_connection *conn = cdcpend->conn; |
5f08318f UB |
38 | struct smc_sock *smc; |
39 | int diff; | |
40 | ||
bac6de7b | 41 | if (!conn) |
5f08318f UB |
42 | /* already dismissed */ |
43 | return; | |
44 | ||
bac6de7b | 45 | smc = container_of(conn, struct smc_sock, conn); |
5f08318f UB |
46 | bh_lock_sock(&smc->sk); |
47 | if (!wc_status) { | |
69cb7dc0 | 48 | diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len, |
5f08318f UB |
49 | &cdcpend->conn->tx_curs_fin, |
50 | &cdcpend->cursor); | |
51 | /* sndbuf_space is decreased in smc_sendmsg */ | |
52 | smp_mb__before_atomic(); | |
53 | atomic_add(diff, &cdcpend->conn->sndbuf_space); | |
69cb7dc0 | 54 | /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ |
5f08318f | 55 | smp_mb__after_atomic(); |
bac6de7b | 56 | smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn); |
5f08318f | 57 | } |
e6727f39 | 58 | smc_tx_sndbuf_nonfull(smc); |
5f08318f UB |
59 | bh_unlock_sock(&smc->sk); |
60 | } | |
61 | ||
51957bc5 | 62 | int smc_cdc_get_free_slot(struct smc_connection *conn, |
5f08318f UB |
63 | struct smc_wr_buf **wr_buf, |
64 | struct smc_cdc_tx_pend **pend) | |
65 | { | |
51957bc5 | 66 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; |
1a0a04c7 | 67 | int rc; |
51957bc5 | 68 | |
1a0a04c7 UB |
69 | rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, |
70 | (struct smc_wr_tx_pend_priv **)pend); | |
71 | if (!conn->alert_token_local) | |
72 | /* abnormal termination */ | |
73 | rc = -EPIPE; | |
74 | return rc; | |
5f08318f UB |
75 | } |
76 | ||
77 | static inline void smc_cdc_add_pending_send(struct smc_connection *conn, | |
78 | struct smc_cdc_tx_pend *pend) | |
79 | { | |
80 | BUILD_BUG_ON_MSG( | |
81 | sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE, | |
82 | "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)"); | |
83 | BUILD_BUG_ON_MSG( | |
b9a22dd9 | 84 | offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE, |
5f08318f UB |
85 | "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); |
86 | BUILD_BUG_ON_MSG( | |
87 | sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, | |
88 | "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)"); | |
89 | pend->conn = conn; | |
90 | pend->cursor = conn->tx_curs_sent; | |
91 | pend->p_cursor = conn->local_tx_ctrl.prod; | |
92 | pend->ctrl_seq = conn->tx_cdc_seq; | |
93 | } | |
94 | ||
95 | int smc_cdc_msg_send(struct smc_connection *conn, | |
96 | struct smc_wr_buf *wr_buf, | |
97 | struct smc_cdc_tx_pend *pend) | |
98 | { | |
99 | struct smc_link *link; | |
100 | int rc; | |
101 | ||
102 | link = &conn->lgr->lnk[SMC_SINGLE_LINK]; | |
103 | ||
104 | smc_cdc_add_pending_send(conn, pend); | |
105 | ||
106 | conn->tx_cdc_seq++; | |
107 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; | |
108 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, | |
109 | &conn->local_tx_ctrl, conn); | |
110 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); | |
111 | if (!rc) | |
bac6de7b SR |
112 | smc_curs_copy(&conn->rx_curs_confirmed, |
113 | &conn->local_tx_ctrl.cons, conn); | |
5f08318f UB |
114 | |
115 | return rc; | |
116 | } | |
117 | ||
be244f28 | 118 | static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn) |
5f08318f UB |
119 | { |
120 | struct smc_cdc_tx_pend *pend; | |
121 | struct smc_wr_buf *wr_buf; | |
122 | int rc; | |
123 | ||
51957bc5 | 124 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); |
5f08318f UB |
125 | if (rc) |
126 | return rc; | |
127 | ||
128 | return smc_cdc_msg_send(conn, wr_buf, pend); | |
129 | } | |
130 | ||
be244f28 HW |
131 | int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) |
132 | { | |
133 | int rc; | |
134 | ||
135 | if (conn->lgr->is_smcd) { | |
136 | spin_lock_bh(&conn->send_lock); | |
137 | rc = smcd_cdc_msg_send(conn); | |
138 | spin_unlock_bh(&conn->send_lock); | |
139 | } else { | |
140 | rc = smcr_cdc_get_slot_and_msg_send(conn); | |
141 | } | |
142 | ||
143 | return rc; | |
144 | } | |
145 | ||
5f08318f UB |
146 | static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend, |
147 | unsigned long data) | |
148 | { | |
149 | struct smc_connection *conn = (struct smc_connection *)data; | |
150 | struct smc_cdc_tx_pend *cdc_pend = | |
151 | (struct smc_cdc_tx_pend *)tx_pend; | |
152 | ||
153 | return cdc_pend->conn == conn; | |
154 | } | |
155 | ||
156 | static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend) | |
157 | { | |
158 | struct smc_cdc_tx_pend *cdc_pend = | |
159 | (struct smc_cdc_tx_pend *)tx_pend; | |
160 | ||
161 | cdc_pend->conn = NULL; | |
162 | } | |
163 | ||
164 | void smc_cdc_tx_dismiss_slots(struct smc_connection *conn) | |
165 | { | |
166 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; | |
167 | ||
168 | smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE, | |
169 | smc_cdc_tx_filter, smc_cdc_tx_dismisser, | |
170 | (unsigned long)conn); | |
171 | } | |
172 | ||
be244f28 HW |
173 | /* Send a SMC-D CDC header. |
174 | * This increments the free space available in our send buffer. | |
175 | * Also update the confirmed receive buffer with what was sent to the peer. | |
176 | */ | |
177 | int smcd_cdc_msg_send(struct smc_connection *conn) | |
178 | { | |
179 | struct smc_sock *smc = container_of(conn, struct smc_sock, conn); | |
b9a22dd9 | 180 | union smc_host_cursor curs; |
be244f28 HW |
181 | struct smcd_cdc_msg cdc; |
182 | int rc, diff; | |
183 | ||
184 | memset(&cdc, 0, sizeof(cdc)); | |
185 | cdc.common.type = SMC_CDC_MSG_TYPE; | |
b9a22dd9 UB |
186 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs); |
187 | cdc.prod.wrap = curs.wrap; | |
188 | cdc.prod.count = curs.count; | |
189 | curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs); | |
190 | cdc.cons.wrap = curs.wrap; | |
191 | cdc.cons.count = curs.count; | |
192 | cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags; | |
193 | cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags; | |
be244f28 HW |
194 | rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1); |
195 | if (rc) | |
196 | return rc; | |
b9a22dd9 | 197 | smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn); |
be244f28 HW |
198 | /* Calculate transmitted data and increment free send buffer space */ |
199 | diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin, | |
200 | &conn->tx_curs_sent); | |
201 | /* increased by confirmed number of bytes */ | |
202 | smp_mb__before_atomic(); | |
203 | atomic_add(diff, &conn->sndbuf_space); | |
204 | /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ | |
205 | smp_mb__after_atomic(); | |
bac6de7b | 206 | smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn); |
be244f28 HW |
207 | |
208 | smc_tx_sndbuf_nonfull(smc); | |
209 | return rc; | |
210 | } | |
211 | ||
5f08318f UB |
212 | /********************************* receive ***********************************/ |
213 | ||
214 | static inline bool smc_cdc_before(u16 seq1, u16 seq2) | |
215 | { | |
216 | return (s16)(seq1 - seq2) < 0; | |
217 | } | |
218 | ||
de8474eb SR |
219 | static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc, |
220 | int *diff_prod) | |
221 | { | |
222 | struct smc_connection *conn = &smc->conn; | |
223 | char *base; | |
224 | ||
225 | /* new data included urgent business */ | |
bac6de7b | 226 | smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn); |
de8474eb SR |
227 | conn->urg_state = SMC_URG_VALID; |
228 | if (!sock_flag(&smc->sk, SOCK_URGINLINE)) | |
229 | /* we'll skip the urgent byte, so don't account for it */ | |
230 | (*diff_prod)--; | |
be244f28 | 231 | base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off; |
de8474eb SR |
232 | if (conn->urg_curs.count) |
233 | conn->urg_rx_byte = *(base + conn->urg_curs.count - 1); | |
234 | else | |
235 | conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1); | |
236 | sk_send_sigurg(&smc->sk); | |
237 | } | |
238 | ||
5f08318f | 239 | static void smc_cdc_msg_recv_action(struct smc_sock *smc, |
5f08318f UB |
240 | struct smc_cdc_msg *cdc) |
241 | { | |
242 | union smc_host_cursor cons_old, prod_old; | |
243 | struct smc_connection *conn = &smc->conn; | |
244 | int diff_cons, diff_prod; | |
245 | ||
bac6de7b SR |
246 | smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn); |
247 | smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn); | |
5f08318f UB |
248 | smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn); |
249 | ||
250 | diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old, | |
251 | &conn->local_rx_ctrl.cons); | |
252 | if (diff_cons) { | |
253 | /* peer_rmbe_space is decreased during data transfer with RDMA | |
254 | * write | |
255 | */ | |
256 | smp_mb__before_atomic(); | |
257 | atomic_add(diff_cons, &conn->peer_rmbe_space); | |
258 | /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ | |
259 | smp_mb__after_atomic(); | |
260 | } | |
261 | ||
69cb7dc0 | 262 | diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old, |
5f08318f UB |
263 | &conn->local_rx_ctrl.prod); |
264 | if (diff_prod) { | |
de8474eb SR |
265 | if (conn->local_rx_ctrl.prod_flags.urg_data_present) |
266 | smc_cdc_handle_urg_data_arrival(smc, &diff_prod); | |
5f08318f UB |
267 | /* bytes_to_rcv is decreased in smc_recvmsg */ |
268 | smp_mb__before_atomic(); | |
269 | atomic_add(diff_prod, &conn->bytes_to_rcv); | |
69cb7dc0 | 270 | /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */ |
5f08318f | 271 | smp_mb__after_atomic(); |
952310cc | 272 | smc->sk.sk_data_ready(&smc->sk); |
de8474eb SR |
273 | } else { |
274 | if (conn->local_rx_ctrl.prod_flags.write_blocked || | |
275 | conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || | |
276 | conn->local_rx_ctrl.prod_flags.urg_data_pending) { | |
277 | if (conn->local_rx_ctrl.prod_flags.urg_data_pending) | |
278 | conn->urg_state = SMC_URG_NOTYET; | |
279 | /* force immediate tx of current consumer cursor, but | |
280 | * under send_lock to guarantee arrival in seqno-order | |
281 | */ | |
5607016c UB |
282 | if (smc->sk.sk_state != SMC_INIT) |
283 | smc_tx_sndbuf_nonempty(conn); | |
de8474eb | 284 | } |
5f08318f UB |
285 | } |
286 | ||
51f1de79 UB |
287 | /* piggy backed tx info */ |
288 | /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */ | |
289 | if (diff_cons && smc_tx_prepared_sends(conn)) { | |
290 | smc_tx_sndbuf_nonempty(conn); | |
291 | /* trigger socket release if connection closed */ | |
292 | smc_close_wake_tx_prepared(smc); | |
293 | } | |
de8474eb SR |
294 | if (diff_cons && conn->urg_tx_pend && |
295 | atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) { | |
296 | /* urg data confirmed by peer, indicate we're ready for more */ | |
297 | conn->urg_tx_pend = false; | |
298 | smc->sk.sk_write_space(&smc->sk); | |
299 | } | |
51f1de79 | 300 | |
b38d7324 | 301 | if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { |
5f08318f | 302 | smc->sk.sk_err = ECONNRESET; |
b38d7324 UB |
303 | conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; |
304 | } | |
46c28dbd UB |
305 | if (smc_cdc_rxed_any_close_or_senddone(conn)) { |
306 | smc->sk.sk_shutdown |= RCV_SHUTDOWN; | |
307 | if (smc->clcsock && smc->clcsock->sk) | |
308 | smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN; | |
309 | sock_set_flag(&smc->sk, SOCK_DONE); | |
51f1de79 UB |
310 | sock_hold(&smc->sk); /* sock_put in close_work */ |
311 | if (!schedule_work(&conn->close_work)) | |
312 | sock_put(&smc->sk); | |
b38d7324 | 313 | } |
5f08318f UB |
314 | } |
315 | ||
316 | /* called under tasklet context */ | |
d7b0e37c | 317 | static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc) |
5f08318f | 318 | { |
5f08318f | 319 | sock_hold(&smc->sk); |
5f08318f | 320 | bh_lock_sock(&smc->sk); |
d7b0e37c | 321 | smc_cdc_msg_recv_action(smc, cdc); |
5f08318f UB |
322 | bh_unlock_sock(&smc->sk); |
323 | sock_put(&smc->sk); /* no free sk in softirq-context */ | |
324 | } | |
325 | ||
be244f28 HW |
326 | /* Schedule a tasklet for this connection. Triggered from the ISM device IRQ |
327 | * handler to indicate update in the DMBE. | |
328 | * | |
329 | * Context: | |
330 | * - tasklet context | |
331 | */ | |
332 | static void smcd_cdc_rx_tsklet(unsigned long data) | |
333 | { | |
334 | struct smc_connection *conn = (struct smc_connection *)data; | |
b9a22dd9 | 335 | struct smcd_cdc_msg *data_cdc; |
be244f28 HW |
336 | struct smcd_cdc_msg cdc; |
337 | struct smc_sock *smc; | |
338 | ||
339 | if (!conn) | |
340 | return; | |
341 | ||
b9a22dd9 UB |
342 | data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr; |
343 | smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn); | |
344 | smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn); | |
be244f28 HW |
345 | smc = container_of(conn, struct smc_sock, conn); |
346 | smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc); | |
347 | } | |
348 | ||
349 | /* Initialize receive tasklet. Called from ISM device IRQ handler to start | |
350 | * receiver side. | |
351 | */ | |
352 | void smcd_cdc_rx_init(struct smc_connection *conn) | |
353 | { | |
354 | tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn); | |
355 | } | |
356 | ||
5f08318f UB |
357 | /***************************** init, exit, misc ******************************/ |
358 | ||
359 | static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf) | |
360 | { | |
361 | struct smc_link *link = (struct smc_link *)wc->qp->qp_context; | |
362 | struct smc_cdc_msg *cdc = buf; | |
d7b0e37c HW |
363 | struct smc_connection *conn; |
364 | struct smc_link_group *lgr; | |
365 | struct smc_sock *smc; | |
5f08318f UB |
366 | |
367 | if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved)) | |
368 | return; /* short message */ | |
cbba07a7 | 369 | if (cdc->len != SMC_WR_TX_SIZE) |
5f08318f | 370 | return; /* invalid message */ |
d7b0e37c HW |
371 | |
372 | /* lookup connection */ | |
00e5fb26 | 373 | lgr = smc_get_lgr(link); |
d7b0e37c HW |
374 | read_lock_bh(&lgr->conns_lock); |
375 | conn = smc_lgr_find_conn(ntohl(cdc->token), lgr); | |
376 | read_unlock_bh(&lgr->conns_lock); | |
377 | if (!conn) | |
378 | return; | |
379 | smc = container_of(conn, struct smc_sock, conn); | |
380 | ||
381 | if (!cdc->prod_flags.failover_validation) { | |
382 | if (smc_cdc_before(ntohs(cdc->seqno), | |
383 | conn->local_rx_ctrl.seqno)) | |
384 | /* received seqno is old */ | |
385 | return; | |
386 | } | |
387 | smc_cdc_msg_recv(smc, cdc); | |
5f08318f UB |
388 | } |
389 | ||
390 | static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = { | |
391 | { | |
392 | .handler = smc_cdc_rx_handler, | |
393 | .type = SMC_CDC_MSG_TYPE | |
394 | }, | |
395 | { | |
396 | .handler = NULL, | |
397 | } | |
398 | }; | |
399 | ||
400 | int __init smc_cdc_init(void) | |
401 | { | |
402 | struct smc_wr_rx_handler *handler; | |
403 | int rc = 0; | |
404 | ||
405 | for (handler = smc_cdc_rx_handlers; handler->handler; handler++) { | |
406 | INIT_HLIST_NODE(&handler->list); | |
407 | rc = smc_wr_rx_register_handler(handler); | |
408 | if (rc) | |
409 | break; | |
410 | } | |
411 | return rc; | |
412 | } |