1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
6 * copy new RMBE data into user space
8 * Copyright IBM Corp. 2016
13 #include <linux/net.h>
14 #include <linux/rcupdate.h>
15 #include <linux/sched/signal.h>
22 #include "smc_tx.h" /* smc_tx_consumer_update() */
24 #include "smc_stats.h"
25 #include "smc_tracepoint.h"
27 /* callback implementation to wakeup consumers blocked with smc_rx_wait().
28 * indirectly called by smc_cdc_msg_recv_action().
30 static void smc_rx_wake_up(struct sock *sk)
34 /* derived from sock_def_readable() */
35 /* called already in smc_listen_work() */
37 wq = rcu_dereference(sk->sk_wq);
38 if (skwq_has_sleeper(wq))
39 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
40 EPOLLRDNORM | EPOLLRDBAND);
41 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
42 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
43 (sk->sk_state == SMC_CLOSED))
44 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
48 /* Update consumer cursor
49 * @conn connection to update
50 * @cons consumer cursor
51 * @len number of Bytes consumed
53 * 1 if we should end our receive, 0 otherwise
55 static int smc_rx_update_consumer(struct smc_sock *smc,
56 union smc_host_cursor cons, size_t len)
58 struct smc_connection *conn = &smc->conn;
59 struct sock *sk = &smc->sk;
63 smc_curs_add(conn->rmb_desc->len, &cons, len);
65 /* did we process urgent data? */
66 if (conn->urg_state == SMC_URG_VALID || conn->urg_rx_skip_pend) {
67 diff = smc_curs_comp(conn->rmb_desc->len, &cons,
69 if (sock_flag(sk, SOCK_URGINLINE)) {
73 conn->urg_state = SMC_URG_READ;
77 /* skip urgent byte */
79 smc_curs_add(conn->rmb_desc->len, &cons, 1);
80 conn->urg_rx_skip_pend = false;
82 /* we read past urgent byte */
83 conn->urg_state = SMC_URG_READ;
87 smc_curs_copy(&conn->local_tx_ctrl.cons, &cons, conn);
89 /* send consumer cursor update if required */
90 /* similar to advertising new TCP rcv_wnd if required */
91 smc_tx_consumer_update(conn, force);
96 static void smc_rx_update_cons(struct smc_sock *smc, size_t len)
98 struct smc_connection *conn = &smc->conn;
99 union smc_host_cursor cons;
101 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
102 smc_rx_update_consumer(smc, cons, len);
105 struct smc_spd_priv {
106 struct smc_sock *smc;
110 static void smc_rx_pipe_buf_release(struct pipe_inode_info *pipe,
111 struct pipe_buffer *buf)
113 struct smc_spd_priv *priv = (struct smc_spd_priv *)buf->private;
114 struct smc_sock *smc = priv->smc;
115 struct smc_connection *conn;
116 struct sock *sk = &smc->sk;
118 if (sk->sk_state == SMC_CLOSED ||
119 sk->sk_state == SMC_PEERFINCLOSEWAIT ||
120 sk->sk_state == SMC_APPFINCLOSEWAIT)
124 smc_rx_update_cons(smc, priv->len);
126 if (atomic_sub_and_test(priv->len, &conn->splice_pending))
134 static const struct pipe_buf_operations smc_pipe_ops = {
135 .release = smc_rx_pipe_buf_release,
136 .get = generic_pipe_buf_get
139 static void smc_rx_spd_release(struct splice_pipe_desc *spd,
142 put_page(spd->pages[i]);
145 static int smc_rx_splice(struct pipe_inode_info *pipe, char *src, size_t len,
146 struct smc_sock *smc)
148 struct smc_link_group *lgr = smc->conn.lgr;
149 int offset = offset_in_page(src);
150 struct partial_page *partial;
151 struct splice_pipe_desc spd;
152 struct smc_spd_priv **priv;
157 nr_pages = !lgr->is_smcd && smc->conn.rmb_desc->is_vm ?
158 PAGE_ALIGN(len + offset) / PAGE_SIZE : 1;
160 pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
163 partial = kcalloc(nr_pages, sizeof(*partial), GFP_KERNEL);
166 priv = kcalloc(nr_pages, sizeof(*priv), GFP_KERNEL);
169 for (i = 0; i < nr_pages; i++) {
170 priv[i] = kzalloc(sizeof(**priv), GFP_KERNEL);
176 (!lgr->is_smcd && !smc->conn.rmb_desc->is_vm)) {
177 /* smcd or smcr that uses physically contiguous RMBs */
180 partial[0].offset = src - (char *)smc->conn.rmb_desc->cpu_addr;
181 partial[0].len = len;
182 partial[0].private = (unsigned long)priv[0];
183 pages[0] = smc->conn.rmb_desc->pages;
185 int size, left = len;
187 /* smcr that uses virtually contiguous RMBs*/
188 for (i = 0; i < nr_pages; i++) {
189 size = min_t(int, PAGE_SIZE - offset, left);
192 pages[i] = vmalloc_to_page(buf);
193 partial[i].offset = offset;
194 partial[i].len = size;
195 partial[i].private = (unsigned long)priv[i];
196 buf += size / sizeof(*buf);
201 spd.nr_pages_max = nr_pages;
202 spd.nr_pages = nr_pages;
204 spd.partial = partial;
205 spd.ops = &smc_pipe_ops;
206 spd.spd_release = smc_rx_spd_release;
208 bytes = splice_to_pipe(pipe, &spd);
211 if (!lgr->is_smcd && smc->conn.rmb_desc->is_vm) {
212 for (i = 0; i < PAGE_ALIGN(bytes + offset) / PAGE_SIZE; i++)
215 get_page(smc->conn.rmb_desc->pages);
217 atomic_add(bytes, &smc->conn.splice_pending);
226 for (i = (i - 1); i >= 0; i--)
237 static int smc_rx_data_available_and_no_splice_pend(struct smc_connection *conn)
239 return atomic_read(&conn->bytes_to_rcv) &&
240 !atomic_read(&conn->splice_pending);
243 /* blocks rcvbuf consumer until >=len bytes available or timeout or interrupted
245 * @timeo pointer to max seconds to wait, pointer to value 0 for no timeout
246 * @fcrit add'l criterion to evaluate as function pointer
248 * 1 if at least 1 byte available in rcvbuf or if socket error/shutdown.
249 * 0 otherwise (nothing in rcvbuf nor timeout, e.g. interrupted).
251 int smc_rx_wait(struct smc_sock *smc, long *timeo,
252 int (*fcrit)(struct smc_connection *conn))
254 DEFINE_WAIT_FUNC(wait, woken_wake_function);
255 struct smc_connection *conn = &smc->conn;
256 struct smc_cdc_conn_state_flags *cflags =
257 &conn->local_tx_ctrl.conn_state_flags;
258 struct sock *sk = &smc->sk;
263 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
264 add_wait_queue(sk_sleep(sk), &wait);
265 rc = sk_wait_event(sk, timeo,
267 cflags->peer_conn_abort ||
268 sk->sk_shutdown & RCV_SHUTDOWN ||
272 remove_wait_queue(sk_sleep(sk), &wait);
273 sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk);
277 static int smc_rx_recv_urg(struct smc_sock *smc, struct msghdr *msg, int len,
280 struct smc_connection *conn = &smc->conn;
281 union smc_host_cursor cons;
282 struct sock *sk = &smc->sk;
285 if (sock_flag(sk, SOCK_URGINLINE) ||
286 !(conn->urg_state == SMC_URG_VALID) ||
287 conn->urg_state == SMC_URG_READ)
290 SMC_STAT_INC(smc, urg_data_cnt);
291 if (conn->urg_state == SMC_URG_VALID) {
292 if (!(flags & MSG_PEEK))
293 smc->conn.urg_state = SMC_URG_READ;
294 msg->msg_flags |= MSG_OOB;
296 if (!(flags & MSG_TRUNC))
297 rc = memcpy_to_msg(msg, &conn->urg_rx_byte, 1);
299 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
300 if (smc_curs_diff(conn->rmb_desc->len, &cons,
301 &conn->urg_curs) > 1)
302 conn->urg_rx_skip_pend = true;
303 /* Urgent Byte was already accounted for, but trigger
304 * skipping the urgent byte in non-inline case
306 if (!(flags & MSG_PEEK))
307 smc_rx_update_consumer(smc, cons, 0);
309 msg->msg_flags |= MSG_TRUNC;
312 return rc ? -EFAULT : len;
315 if (sk->sk_state == SMC_CLOSED || sk->sk_shutdown & RCV_SHUTDOWN)
321 static bool smc_rx_recvmsg_data_available(struct smc_sock *smc)
323 struct smc_connection *conn = &smc->conn;
325 if (smc_rx_data_available(conn))
327 else if (conn->urg_state == SMC_URG_VALID)
328 /* we received a single urgent Byte - skip */
329 smc_rx_update_cons(smc, 0);
333 /* smc_rx_recvmsg - receive data from RMBE
334 * @msg: copy data to receive buffer
335 * @pipe: copy data to pipe if set - indicates splice() call
337 * rcvbuf consumer: main API called by socket layer.
338 * Called under sk lock.
340 int smc_rx_recvmsg(struct smc_sock *smc, struct msghdr *msg,
341 struct pipe_inode_info *pipe, size_t len, int flags)
343 size_t copylen, read_done = 0, read_remaining = len;
344 size_t chunk_len, chunk_off, chunk_len_sum;
345 struct smc_connection *conn = &smc->conn;
346 int (*func)(struct smc_connection *conn);
347 union smc_host_cursor cons;
353 int target; /* Read at least these many bytes */
356 if (unlikely(flags & MSG_ERRQUEUE))
357 return -EINVAL; /* future work for sk.sk_family == AF_SMC */
360 if (sk->sk_state == SMC_LISTEN)
363 return smc_rx_recv_urg(smc, msg, len, flags);
364 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
365 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
367 readable = atomic_read(&conn->bytes_to_rcv);
368 if (readable >= conn->rmb_desc->len)
369 SMC_STAT_RMB_RX_FULL(smc, !conn->lnk);
372 SMC_STAT_RMB_RX_SIZE_SMALL(smc, !conn->lnk);
373 /* we currently use 1 RMBE per RMB, so RMBE == RMB base addr */
374 rcvbuf_base = conn->rx_off + conn->rmb_desc->cpu_addr;
376 do { /* while (read_remaining) */
377 if (read_done >= target || (pipe && read_done))
383 if (smc_rx_recvmsg_data_available(smc))
386 if (sk->sk_shutdown & RCV_SHUTDOWN) {
387 /* smc_cdc_msg_recv_action() could have run after
388 * above smc_rx_recvmsg_data_available()
390 if (smc_rx_recvmsg_data_available(smc))
397 sk->sk_state == SMC_CLOSED ||
399 signal_pending(current))
403 read_done = sock_error(sk);
406 if (sk->sk_state == SMC_CLOSED) {
407 if (!sock_flag(sk, SOCK_DONE)) {
408 /* This occurs when user tries to read
409 * from never connected socket.
411 read_done = -ENOTCONN;
418 if (signal_pending(current)) {
419 read_done = sock_intr_errno(timeo);
424 if (!smc_rx_data_available(conn)) {
425 smc_rx_wait(smc, &timeo, smc_rx_data_available);
430 /* initialize variables for 1st iteration of subsequent loop */
431 /* could be just 1 byte, even after waiting on data above */
432 readable = atomic_read(&conn->bytes_to_rcv);
433 splbytes = atomic_read(&conn->splice_pending);
434 if (!readable || (msg && splbytes)) {
436 func = smc_rx_data_available_and_no_splice_pend;
438 func = smc_rx_data_available;
439 smc_rx_wait(smc, &timeo, func);
443 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
444 /* subsequent splice() calls pick up where previous left */
446 smc_curs_add(conn->rmb_desc->len, &cons, splbytes);
447 if (conn->urg_state == SMC_URG_VALID &&
448 sock_flag(&smc->sk, SOCK_URGINLINE) &&
450 readable--; /* always stop at urgent Byte */
451 /* not more than what user space asked for */
452 copylen = min_t(size_t, read_remaining, readable);
453 /* determine chunks where to read from rcvbuf */
454 /* either unwrapped case, or 1st chunk of wrapped case */
455 chunk_len = min_t(size_t, copylen, conn->rmb_desc->len -
457 chunk_len_sum = chunk_len;
458 chunk_off = cons.count;
459 smc_rmb_sync_sg_for_cpu(conn);
460 for (chunk = 0; chunk < 2; chunk++) {
461 if (!(flags & MSG_TRUNC)) {
463 rc = memcpy_to_msg(msg, rcvbuf_base +
467 rc = smc_rx_splice(pipe, rcvbuf_base +
468 chunk_off, chunk_len,
477 read_remaining -= chunk_len;
478 read_done += chunk_len;
480 if (chunk_len_sum == copylen)
481 break; /* either on 1st or 2nd iteration */
482 /* prepare next (== 2nd) iteration */
483 chunk_len = copylen - chunk_len; /* remainder */
484 chunk_len_sum += chunk_len;
485 chunk_off = 0; /* modulo offset in recv ring buffer */
489 if (!(flags & MSG_PEEK)) {
490 /* increased in recv tasklet smc_cdc_msg_rcv() */
491 smp_mb__before_atomic();
492 atomic_sub(copylen, &conn->bytes_to_rcv);
493 /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
494 smp_mb__after_atomic();
495 if (msg && smc_rx_update_consumer(smc, cons, copylen))
499 trace_smc_rx_recvmsg(smc, copylen);
500 } while (read_remaining);
505 /* Initialize receive properties on connection establishment. NB: not __init! */
506 void smc_rx_init(struct smc_sock *smc)
508 smc->sk.sk_data_ready = smc_rx_wake_up;
509 atomic_set(&smc->conn.splice_pending, 0);
510 smc->conn.urg_state = SMC_URG_READ;