1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
7 * Copy user space data into send buffer, if send buffer space available.
9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
11 * Copyright IBM Corp. 2016
16 #include <linux/net.h>
17 #include <linux/rcupdate.h>
18 #include <linux/workqueue.h>
19 #include <linux/sched/signal.h>
27 #include "smc_close.h"
30 #include "smc_stats.h"
31 #include "smc_tracepoint.h"
33 #define SMC_TX_WORK_DELAY 0
35 /***************************** sndbuf producer *******************************/
37 /* callback implementation for sk.sk_write_space()
38 * to wakeup sndbuf producers that blocked with smc_tx_wait().
39 * called under sk_socket lock.
41 static void smc_tx_write_space(struct sock *sk)
43 struct socket *sock = sk->sk_socket;
44 struct smc_sock *smc = smc_sk(sk);
47 /* similar to sk_stream_write_space */
48 if (atomic_read(&smc->conn.sndbuf_space) && sock) {
49 if (test_bit(SOCK_NOSPACE, &sock->flags))
50 SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk);
51 clear_bit(SOCK_NOSPACE, &sock->flags);
53 wq = rcu_dereference(sk->sk_wq);
54 if (skwq_has_sleeper(wq))
55 wake_up_interruptible_poll(&wq->wait,
56 EPOLLOUT | EPOLLWRNORM |
58 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
59 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
64 /* Wakeup sndbuf producers that blocked with smc_tx_wait().
65 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
67 void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
69 if (smc->sk.sk_socket &&
70 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
71 smc->sk.sk_write_space(&smc->sk);
74 /* blocks sndbuf producer until at least one byte of free space available
75 * or urgent Byte was consumed
77 static int smc_tx_wait(struct smc_sock *smc, int flags)
79 DEFINE_WAIT_FUNC(wait, woken_wake_function);
80 struct smc_connection *conn = &smc->conn;
81 struct sock *sk = &smc->sk;
85 /* similar to sk_stream_wait_memory */
86 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
87 add_wait_queue(sk_sleep(sk), &wait);
89 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
91 (sk->sk_shutdown & SEND_SHUTDOWN) ||
93 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
97 if (smc_cdc_rxed_any_close(conn)) {
102 /* ensure EPOLLOUT is subsequently generated */
103 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
107 if (signal_pending(current)) {
108 rc = sock_intr_errno(timeo);
111 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
112 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend)
113 break; /* at least 1 byte of free & no urgent data */
114 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
115 sk_wait_event(sk, &timeo,
117 (sk->sk_shutdown & SEND_SHUTDOWN) ||
118 smc_cdc_rxed_any_close(conn) ||
119 (atomic_read(&conn->sndbuf_space) &&
123 remove_wait_queue(sk_sleep(sk), &wait);
127 static bool smc_tx_is_corked(struct smc_sock *smc)
129 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
131 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false;
134 /* If we have pending CDC messages, do not send:
135 * Because CQE of this CDC message will happen shortly, it gives
136 * a chance to coalesce future sendmsg() payload in to one RDMA Write,
137 * without need for a timer, and with no latency trade off.
139 * 1. First message should never cork
140 * 2. If we have pending Tx CDC messages, wait for the first CDC
141 * message's completion
142 * 3. Don't cork to much data in a single RDMA Write to prevent burst
143 * traffic, total corked message should not exceed sendbuf/2
145 static bool smc_should_autocork(struct smc_sock *smc)
147 struct smc_connection *conn = &smc->conn;
150 corking_size = min_t(unsigned int, conn->sndbuf_desc->len >> 1,
151 sock_net(&smc->sk)->smc.sysctl_autocorking_size);
153 if (atomic_read(&conn->cdc_pend_tx_wr) == 0 ||
154 smc_tx_prepared_sends(conn) > corking_size)
159 static bool smc_tx_should_cork(struct smc_sock *smc, struct msghdr *msg)
161 struct smc_connection *conn = &smc->conn;
163 if (smc_should_autocork(smc))
166 /* for a corked socket defer the RDMA writes if
167 * sndbuf_space is still available. The applications
168 * should known how/when to uncork it.
170 if ((msg->msg_flags & MSG_MORE ||
171 smc_tx_is_corked(smc) ||
172 msg->msg_flags & MSG_SENDPAGE_NOTLAST) &&
173 atomic_read(&conn->sndbuf_space))
179 /* sndbuf producer: main API called by socket layer.
180 * called under sock lock.
182 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
184 size_t copylen, send_done = 0, send_remaining = len;
185 size_t chunk_len, chunk_off, chunk_len_sum;
186 struct smc_connection *conn = &smc->conn;
187 union smc_host_cursor prep;
188 struct sock *sk = &smc->sk;
194 /* This should be in poll */
195 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
197 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
202 if (sk->sk_state == SMC_INIT)
205 if (len > conn->sndbuf_desc->len)
206 SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk);
208 if (len > conn->peer_rmbe_size)
209 SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk);
211 if (msg->msg_flags & MSG_OOB)
212 SMC_STAT_INC(smc, urg_data_cnt);
214 while (msg_data_left(msg)) {
215 if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
216 (smc->sk.sk_err == ECONNABORTED) ||
219 if (smc_cdc_rxed_any_close(conn))
220 return send_done ?: -ECONNRESET;
222 if (msg->msg_flags & MSG_OOB)
223 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
225 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
228 rc = smc_tx_wait(smc, msg->msg_flags);
234 /* initialize variables for 1st iteration of subsequent loop */
235 /* could be just 1 byte, even after smc_tx_wait above */
236 writespace = atomic_read(&conn->sndbuf_space);
237 /* not more than what user space asked for */
238 copylen = min_t(size_t, send_remaining, writespace);
239 /* determine start of sndbuf */
240 sndbuf_base = conn->sndbuf_desc->cpu_addr;
241 smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
242 tx_cnt_prep = prep.count;
243 /* determine chunks where to write into sndbuf */
244 /* either unwrapped case, or 1st chunk of wrapped case */
245 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len -
247 chunk_len_sum = chunk_len;
248 chunk_off = tx_cnt_prep;
249 for (chunk = 0; chunk < 2; chunk++) {
250 rc = memcpy_from_msg(sndbuf_base + chunk_off,
253 smc_sndbuf_sync_sg_for_device(conn);
258 send_done += chunk_len;
259 send_remaining -= chunk_len;
261 if (chunk_len_sum == copylen)
262 break; /* either on 1st or 2nd iteration */
263 /* prepare next (== 2nd) iteration */
264 chunk_len = copylen - chunk_len; /* remainder */
265 chunk_len_sum += chunk_len;
266 chunk_off = 0; /* modulo offset in send ring buffer */
268 smc_sndbuf_sync_sg_for_device(conn);
270 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
271 smc_curs_copy(&conn->tx_curs_prep, &prep, conn);
272 /* increased in send tasklet smc_cdc_tx_handler() */
273 smp_mb__before_atomic();
274 atomic_sub(copylen, &conn->sndbuf_space);
275 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
276 smp_mb__after_atomic();
277 /* since we just produced more new data into sndbuf,
278 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
280 if ((msg->msg_flags & MSG_OOB) && !send_remaining)
281 conn->urg_tx_pend = true;
282 /* If we need to cork, do nothing and wait for the next
283 * sendmsg() call or push on tx completion
285 if (!smc_tx_should_cork(smc, msg))
286 smc_tx_sndbuf_nonempty(conn);
288 trace_smc_tx_sendmsg(smc, copylen);
289 } /* while (msg_data_left(msg)) */
294 rc = sk_stream_error(sk, msg->msg_flags, rc);
295 /* make sure we wake any epoll edge trigger waiter */
296 if (unlikely(rc == -EAGAIN))
297 sk->sk_write_space(sk);
301 int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset,
302 size_t size, int flags)
304 struct msghdr msg = {.msg_flags = flags};
305 char *kaddr = kmap(page);
309 iov.iov_base = kaddr + offset;
311 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iov, 1, size);
312 rc = smc_tx_sendmsg(smc, &msg, size);
317 /***************************** sndbuf consumer *******************************/
319 /* sndbuf consumer: actual data transfer of one target chunk with ISM write */
320 int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
321 u32 offset, int signal)
325 rc = smc_ism_write(conn->lgr->smcd, conn->peer_token,
326 conn->peer_rmbe_idx, signal, conn->tx_off + offset,
329 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
333 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
334 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
335 int num_sges, struct ib_rdma_wr *rdma_wr)
337 struct smc_link_group *lgr = conn->lgr;
338 struct smc_link *link = conn->lnk;
341 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
342 rdma_wr->wr.num_sge = num_sges;
343 rdma_wr->remote_addr =
344 lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr +
345 /* RMBE within RMB */
347 /* offset within RMBE */
349 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey;
350 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
352 smcr_link_down_cond_sched(link);
356 /* sndbuf consumer */
357 static inline void smc_tx_advance_cursors(struct smc_connection *conn,
358 union smc_host_cursor *prod,
359 union smc_host_cursor *sent,
362 smc_curs_add(conn->peer_rmbe_size, prod, len);
363 /* increased in recv tasklet smc_cdc_msg_rcv() */
364 smp_mb__before_atomic();
365 /* data in flight reduces usable snd_wnd */
366 atomic_sub(len, &conn->peer_rmbe_space);
367 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
368 smp_mb__after_atomic();
369 smc_curs_add(conn->sndbuf_desc->len, sent, len);
372 /* SMC-R helper for smc_tx_rdma_writes() */
373 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
374 size_t src_off, size_t src_len,
375 size_t dst_off, size_t dst_len,
376 struct smc_rdma_wr *wr_rdma_buf)
378 struct smc_link *link = conn->lnk;
380 dma_addr_t dma_addr =
381 sg_dma_address(conn->sndbuf_desc->sgt[link->link_idx].sgl);
382 u64 virt_addr = (uintptr_t)conn->sndbuf_desc->cpu_addr;
383 int src_len_sum = src_len, dst_len_sum = dst_len;
384 int sent_count = src_off;
385 int srcchunk, dstchunk;
389 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
390 struct ib_rdma_wr *wr = &wr_rdma_buf->wr_tx_rdma[dstchunk];
391 struct ib_sge *sge = wr->wr.sg_list;
392 u64 base_addr = dma_addr;
394 if (dst_len < link->qp_attr.cap.max_inline_data) {
395 base_addr = virt_addr;
396 wr->wr.send_flags |= IB_SEND_INLINE;
398 wr->wr.send_flags &= ~IB_SEND_INLINE;
402 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
403 sge[srcchunk].addr = conn->sndbuf_desc->is_vm ?
404 (virt_addr + src_off) : (base_addr + src_off);
405 sge[srcchunk].length = src_len;
406 if (conn->sndbuf_desc->is_vm)
408 conn->sndbuf_desc->mr[link->link_idx]->lkey;
412 if (src_off >= conn->sndbuf_desc->len)
413 src_off -= conn->sndbuf_desc->len;
414 /* modulo in send ring */
415 if (src_len_sum == dst_len)
416 break; /* either on 1st or 2nd iteration */
417 /* prepare next (== 2nd) iteration */
418 src_len = dst_len - src_len; /* remainder */
419 src_len_sum += src_len;
421 rc = smc_tx_rdma_write(conn, dst_off, num_sges, wr);
424 if (dst_len_sum == len)
425 break; /* either on 1st or 2nd iteration */
426 /* prepare next (== 2nd) iteration */
427 dst_off = 0; /* modulo offset in RMBE ring buffer */
428 dst_len = len - dst_len; /* remainder */
429 dst_len_sum += dst_len;
430 src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
432 src_len_sum = src_len;
437 /* SMC-D helper for smc_tx_rdma_writes() */
438 static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
439 size_t src_off, size_t src_len,
440 size_t dst_off, size_t dst_len)
442 int src_len_sum = src_len, dst_len_sum = dst_len;
443 int srcchunk, dstchunk;
446 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
447 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
448 void *data = conn->sndbuf_desc->cpu_addr + src_off;
450 rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
451 sizeof(struct smcd_cdc_msg), 0);
456 if (src_off >= conn->sndbuf_desc->len)
457 src_off -= conn->sndbuf_desc->len;
458 /* modulo in send ring */
459 if (src_len_sum == dst_len)
460 break; /* either on 1st or 2nd iteration */
461 /* prepare next (== 2nd) iteration */
462 src_len = dst_len - src_len; /* remainder */
463 src_len_sum += src_len;
465 if (dst_len_sum == len)
466 break; /* either on 1st or 2nd iteration */
467 /* prepare next (== 2nd) iteration */
468 dst_off = 0; /* modulo offset in RMBE ring buffer */
469 dst_len = len - dst_len; /* remainder */
470 dst_len_sum += dst_len;
471 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
472 src_len_sum = src_len;
477 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
478 * usable snd_wnd as max transmit
480 static int smc_tx_rdma_writes(struct smc_connection *conn,
481 struct smc_rdma_wr *wr_rdma_buf)
483 size_t len, src_len, dst_off, dst_len; /* current chunk values */
484 union smc_host_cursor sent, prep, prod, cons;
485 struct smc_cdc_producer_flags *pflags;
486 int to_send, rmbespace;
490 smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
491 smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
492 /* cf. wmem_alloc - (snd_max - snd_una) */
493 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
497 /* destination: RMBE */
499 rmbespace = atomic_read(&conn->peer_rmbe_space);
500 if (rmbespace <= 0) {
501 struct smc_sock *smc = container_of(conn, struct smc_sock,
503 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk);
506 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
507 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
509 /* if usable snd_wnd closes ask peer to advertise once it opens again */
510 pflags = &conn->local_tx_ctrl.prod_flags;
511 pflags->write_blocked = (to_send >= rmbespace);
512 /* cf. usable snd_wnd */
513 len = min(to_send, rmbespace);
515 /* initialize variables for first iteration of subsequent nested loop */
516 dst_off = prod.count;
517 if (prod.wrap == cons.wrap) {
518 /* the filled destination area is unwrapped,
519 * hence the available free destination space is wrapped
520 * and we need 2 destination chunks of sum len; start with 1st
521 * which is limited by what's available in sndbuf
523 dst_len = min_t(size_t,
524 conn->peer_rmbe_size - prod.count, len);
526 /* the filled destination area is wrapped,
527 * hence the available free destination space is unwrapped
528 * and we need a single destination chunk of entire len
532 /* dst_len determines the maximum src_len */
533 if (sent.count + dst_len <= conn->sndbuf_desc->len) {
534 /* unwrapped src case: single chunk of entire dst_len */
537 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
538 src_len = conn->sndbuf_desc->len - sent.count;
541 if (conn->lgr->is_smcd)
542 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
545 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
546 dst_off, dst_len, wr_rdma_buf);
550 if (conn->urg_tx_pend && len == to_send)
551 pflags->urg_data_present = 1;
552 smc_tx_advance_cursors(conn, &prod, &sent, len);
553 /* update connection's cursors with advanced local cursors */
554 smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn);
556 smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */
561 /* Wakeup sndbuf consumers from any context (IRQ or process)
562 * since there is more data to transmit; usable snd_wnd as max transmit
564 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
566 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
567 struct smc_link *link = conn->lnk;
568 struct smc_rdma_wr *wr_rdma_buf;
569 struct smc_cdc_tx_pend *pend;
570 struct smc_wr_buf *wr_buf;
573 if (!link || !smc_wr_tx_link_hold(link))
575 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
577 smc_wr_tx_link_put(link);
579 struct smc_sock *smc =
580 container_of(conn, struct smc_sock, conn);
582 if (smc->sk.sk_err == ECONNABORTED)
583 return sock_error(&smc->sk);
587 mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
593 spin_lock_bh(&conn->send_lock);
594 if (link != conn->lnk) {
595 /* link of connection changed, tx_work will restart */
596 smc_wr_tx_put_slot(link,
597 (struct smc_wr_tx_pend_priv *)pend);
601 if (!pflags->urg_data_present) {
602 rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
604 smc_wr_tx_put_slot(link,
605 (struct smc_wr_tx_pend_priv *)pend);
610 rc = smc_cdc_msg_send(conn, wr_buf, pend);
611 if (!rc && pflags->urg_data_present) {
612 pflags->urg_data_pending = 0;
613 pflags->urg_data_present = 0;
617 spin_unlock_bh(&conn->send_lock);
618 smc_wr_tx_link_put(link);
622 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
624 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
627 spin_lock_bh(&conn->send_lock);
628 if (!pflags->urg_data_present)
629 rc = smc_tx_rdma_writes(conn, NULL);
631 rc = smcd_cdc_msg_send(conn);
633 if (!rc && pflags->urg_data_present) {
634 pflags->urg_data_pending = 0;
635 pflags->urg_data_present = 0;
637 spin_unlock_bh(&conn->send_lock);
641 static int __smc_tx_sndbuf_nonempty(struct smc_connection *conn)
643 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
646 /* No data in the send queue */
647 if (unlikely(smc_tx_prepared_sends(conn) <= 0))
650 /* Peer don't have RMBE space */
651 if (unlikely(atomic_read(&conn->peer_rmbe_space) <= 0)) {
652 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk);
657 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
658 rc = -EPIPE; /* connection being aborted */
661 if (conn->lgr->is_smcd)
662 rc = smcd_tx_sndbuf_nonempty(conn);
664 rc = smcr_tx_sndbuf_nonempty(conn);
667 /* trigger socket release if connection is closing */
668 smc_close_wake_tx_prepared(smc);
675 int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
679 /* This make sure only one can send simultaneously to prevent wasting
680 * of CPU and CDC slot.
681 * Record whether someone has tried to push while we are pushing.
683 if (atomic_inc_return(&conn->tx_pushing) > 1)
687 atomic_set(&conn->tx_pushing, 1);
688 smp_wmb(); /* Make sure tx_pushing is 1 before real send */
689 rc = __smc_tx_sndbuf_nonempty(conn);
691 /* We need to check whether someone else have added some data into
692 * the send queue and tried to push but failed after the atomic_set()
693 * when we are pushing.
694 * If so, we need to push again to prevent those data hang in the send
697 if (unlikely(!atomic_dec_and_test(&conn->tx_pushing)))
703 /* Wakeup sndbuf consumers from process context
704 * since there is more data to transmit. The caller
705 * must hold sock lock.
707 void smc_tx_pending(struct smc_connection *conn)
709 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
715 rc = smc_tx_sndbuf_nonempty(conn);
716 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
717 !atomic_read(&conn->bytes_to_rcv))
718 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
721 /* Wakeup sndbuf consumers from process context
722 * since there is more data to transmit in locked
725 void smc_tx_work(struct work_struct *work)
727 struct smc_connection *conn = container_of(to_delayed_work(work),
728 struct smc_connection,
730 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
733 smc_tx_pending(conn);
734 release_sock(&smc->sk);
737 void smc_tx_consumer_update(struct smc_connection *conn, bool force)
739 union smc_host_cursor cfed, cons, prod;
740 int sender_free = conn->rmb_desc->len;
743 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
744 smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn);
745 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
746 if (to_confirm > conn->rmbe_update_limit) {
747 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
748 sender_free = conn->rmb_desc->len -
749 smc_curs_diff_large(conn->rmb_desc->len,
753 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
755 ((to_confirm > conn->rmbe_update_limit) &&
756 ((sender_free <= (conn->rmb_desc->len / 2)) ||
757 conn->local_rx_ctrl.prod_flags.write_blocked))) {
759 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
761 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
763 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
768 if (conn->local_rx_ctrl.prod_flags.write_blocked &&
769 !atomic_read(&conn->bytes_to_rcv))
770 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
773 /***************************** send initialize *******************************/
775 /* Initialize send properties on connection establishment. NB: not __init! */
776 void smc_tx_init(struct smc_sock *smc)
778 smc->sk.sk_write_space = smc_tx_write_space;