1 // SPDX-License-Identifier: GPL-2.0-only
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
5 * AF_SMC protocol family socket handler keeping the AF_INET sock address type
6 * applies to SOCK_STREAM sockets only
7 * offers an alternative communication option for TCP-protocol sockets
8 * applicable with RoCE-cards only
10 * Initial restrictions:
11 * - support for alternate links postponed
13 * Copyright IBM Corp. 2016, 2018
16 * based on prototype from Frank Blaschka
19 #define KMSG_COMPONENT "smc"
20 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
22 #include <linux/module.h>
23 #include <linux/socket.h>
24 #include <linux/workqueue.h>
26 #include <linux/sched/signal.h>
27 #include <linux/if_vlan.h>
32 #include <asm/ioctls.h>
34 #include <net/net_namespace.h>
35 #include <net/netns/generic.h>
36 #include "smc_netns.h"
48 #include "smc_close.h"
50 static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group
53 static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group
57 static void smc_tcp_listen_work(struct work_struct *);
58 static void smc_connect_work(struct work_struct *);
60 static void smc_set_keepalive(struct sock *sk, int val)
62 struct smc_sock *smc = smc_sk(sk);
64 smc->clcsock->sk->sk_prot->keepalive(smc->clcsock->sk, val);
67 static struct smc_hashinfo smc_v4_hashinfo = {
68 .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock),
71 static struct smc_hashinfo smc_v6_hashinfo = {
72 .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock),
75 int smc_hash_sk(struct sock *sk)
77 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
78 struct hlist_head *head;
82 write_lock_bh(&h->lock);
83 sk_add_node(sk, head);
84 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
85 write_unlock_bh(&h->lock);
89 EXPORT_SYMBOL_GPL(smc_hash_sk);
91 void smc_unhash_sk(struct sock *sk)
93 struct smc_hashinfo *h = sk->sk_prot->h.smc_hash;
95 write_lock_bh(&h->lock);
96 if (sk_del_node_init(sk))
97 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
98 write_unlock_bh(&h->lock);
100 EXPORT_SYMBOL_GPL(smc_unhash_sk);
102 struct proto smc_proto = {
104 .owner = THIS_MODULE,
105 .keepalive = smc_set_keepalive,
107 .unhash = smc_unhash_sk,
108 .obj_size = sizeof(struct smc_sock),
109 .h.smc_hash = &smc_v4_hashinfo,
110 .slab_flags = SLAB_TYPESAFE_BY_RCU,
112 EXPORT_SYMBOL_GPL(smc_proto);
114 struct proto smc_proto6 = {
116 .owner = THIS_MODULE,
117 .keepalive = smc_set_keepalive,
119 .unhash = smc_unhash_sk,
120 .obj_size = sizeof(struct smc_sock),
121 .h.smc_hash = &smc_v6_hashinfo,
122 .slab_flags = SLAB_TYPESAFE_BY_RCU,
124 EXPORT_SYMBOL_GPL(smc_proto6);
126 static int smc_release(struct socket *sock)
128 struct sock *sk = sock->sk;
129 struct smc_sock *smc;
137 /* cleanup for a dangling non-blocking connect */
138 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
139 tcp_abort(smc->clcsock->sk, ECONNABORTED);
140 flush_work(&smc->connect_work);
142 if (sk->sk_state == SMC_LISTEN)
143 /* smc_close_non_accepted() is called and acquires
144 * sock lock for child sockets again
146 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
150 if (!smc->use_fallback) {
151 rc = smc_close_active(smc);
152 sock_set_flag(sk, SOCK_DEAD);
153 sk->sk_shutdown |= SHUTDOWN_MASK;
155 if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
156 sock_put(sk); /* passive closing */
157 if (sk->sk_state == SMC_LISTEN) {
158 /* wake up clcsock accept */
159 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
161 sk->sk_state = SMC_CLOSED;
162 sk->sk_state_change(sk);
165 sk->sk_prot->unhash(sk);
167 if (sk->sk_state == SMC_CLOSED) {
170 smc_clcsock_release(smc);
173 if (!smc->use_fallback)
174 smc_conn_free(&smc->conn);
182 sock_put(sk); /* final sock_put */
187 static void smc_destruct(struct sock *sk)
189 if (sk->sk_state != SMC_CLOSED)
191 if (!sock_flag(sk, SOCK_DEAD))
194 sk_refcnt_debug_dec(sk);
197 static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
200 struct smc_sock *smc;
204 prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto;
205 sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0);
209 sock_init_data(sock, sk); /* sets sk_refcnt to 1 */
210 sk->sk_state = SMC_INIT;
211 sk->sk_destruct = smc_destruct;
212 sk->sk_protocol = protocol;
214 INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
215 INIT_WORK(&smc->connect_work, smc_connect_work);
216 INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
217 INIT_LIST_HEAD(&smc->accept_q);
218 spin_lock_init(&smc->accept_q_lock);
219 spin_lock_init(&smc->conn.send_lock);
220 sk->sk_prot->hash(sk);
221 sk_refcnt_debug_inc(sk);
222 mutex_init(&smc->clcsock_release_lock);
227 static int smc_bind(struct socket *sock, struct sockaddr *uaddr,
230 struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
231 struct sock *sk = sock->sk;
232 struct smc_sock *smc;
237 /* replicate tests from inet_bind(), to be safe wrt. future changes */
239 if (addr_len < sizeof(struct sockaddr_in))
243 if (addr->sin_family != AF_INET &&
244 addr->sin_family != AF_INET6 &&
245 addr->sin_family != AF_UNSPEC)
247 /* accept AF_UNSPEC (mapped to AF_INET) only if s_addr is INADDR_ANY */
248 if (addr->sin_family == AF_UNSPEC &&
249 addr->sin_addr.s_addr != htonl(INADDR_ANY))
254 /* Check if socket is already active */
256 if (sk->sk_state != SMC_INIT)
259 smc->clcsock->sk->sk_reuse = sk->sk_reuse;
260 rc = kernel_bind(smc->clcsock, uaddr, addr_len);
268 static void smc_copy_sock_settings(struct sock *nsk, struct sock *osk,
271 /* options we don't get control via setsockopt for */
272 nsk->sk_type = osk->sk_type;
273 nsk->sk_sndbuf = osk->sk_sndbuf;
274 nsk->sk_rcvbuf = osk->sk_rcvbuf;
275 nsk->sk_sndtimeo = osk->sk_sndtimeo;
276 nsk->sk_rcvtimeo = osk->sk_rcvtimeo;
277 nsk->sk_mark = osk->sk_mark;
278 nsk->sk_priority = osk->sk_priority;
279 nsk->sk_rcvlowat = osk->sk_rcvlowat;
280 nsk->sk_bound_dev_if = osk->sk_bound_dev_if;
281 nsk->sk_err = osk->sk_err;
283 nsk->sk_flags &= ~mask;
284 nsk->sk_flags |= osk->sk_flags & mask;
287 #define SK_FLAGS_SMC_TO_CLC ((1UL << SOCK_URGINLINE) | \
288 (1UL << SOCK_KEEPOPEN) | \
289 (1UL << SOCK_LINGER) | \
290 (1UL << SOCK_BROADCAST) | \
291 (1UL << SOCK_TIMESTAMP) | \
292 (1UL << SOCK_DBG) | \
293 (1UL << SOCK_RCVTSTAMP) | \
294 (1UL << SOCK_RCVTSTAMPNS) | \
295 (1UL << SOCK_LOCALROUTE) | \
296 (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
297 (1UL << SOCK_RXQ_OVFL) | \
298 (1UL << SOCK_WIFI_STATUS) | \
299 (1UL << SOCK_NOFCS) | \
300 (1UL << SOCK_FILTER_LOCKED) | \
301 (1UL << SOCK_TSTAMP_NEW))
302 /* copy only relevant settings and flags of SOL_SOCKET level from smc to
303 * clc socket (since smc is not called for these options from net/core)
305 static void smc_copy_sock_settings_to_clc(struct smc_sock *smc)
307 smc_copy_sock_settings(smc->clcsock->sk, &smc->sk, SK_FLAGS_SMC_TO_CLC);
310 #define SK_FLAGS_CLC_TO_SMC ((1UL << SOCK_URGINLINE) | \
311 (1UL << SOCK_KEEPOPEN) | \
312 (1UL << SOCK_LINGER) | \
314 /* copy only settings and flags relevant for smc from clc to smc socket */
315 static void smc_copy_sock_settings_to_smc(struct smc_sock *smc)
317 smc_copy_sock_settings(&smc->sk, smc->clcsock->sk, SK_FLAGS_CLC_TO_SMC);
320 /* register a new rmb, send confirm_rkey msg to register with peer */
321 static int smc_reg_rmb(struct smc_link *link, struct smc_buf_desc *rmb_desc,
324 if (!rmb_desc->wr_reg) {
325 /* register memory region for new rmb */
326 if (smc_wr_reg_send(link, rmb_desc->mr_rx[SMC_SINGLE_LINK])) {
327 rmb_desc->regerr = 1;
330 rmb_desc->wr_reg = 1;
334 /* exchange confirm_rkey msg with peer */
335 if (smc_llc_do_confirm_rkey(link, rmb_desc)) {
336 rmb_desc->regerr = 1;
342 static int smc_clnt_conf_first_link(struct smc_sock *smc)
344 struct net *net = sock_net(smc->clcsock->sk);
345 struct smc_link_group *lgr = smc->conn.lgr;
346 struct smc_link *link;
350 link = &lgr->lnk[SMC_SINGLE_LINK];
351 /* receive CONFIRM LINK request from server over RoCE fabric */
352 rest = wait_for_completion_interruptible_timeout(
354 SMC_LLC_WAIT_FIRST_TIME);
356 struct smc_clc_msg_decline dclc;
358 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
359 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
360 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
363 if (link->llc_confirm_rc)
364 return SMC_CLC_DECL_RMBE_EC;
366 rc = smc_ib_modify_qp_rts(link);
368 return SMC_CLC_DECL_ERR_RDYLNK;
370 smc_wr_remember_qp_attr(link);
372 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
373 return SMC_CLC_DECL_ERR_REGRMB;
375 /* send CONFIRM LINK response over RoCE fabric */
376 rc = smc_llc_send_confirm_link(link, SMC_LLC_RESP);
378 return SMC_CLC_DECL_TIMEOUT_CL;
380 /* receive ADD LINK request from server over RoCE fabric */
381 rest = wait_for_completion_interruptible_timeout(&link->llc_add,
384 struct smc_clc_msg_decline dclc;
386 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
387 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
388 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
391 /* send add link reject message, only one link supported for now */
392 rc = smc_llc_send_add_link(link,
393 link->smcibdev->mac[link->ibport - 1],
394 link->gid, SMC_LLC_RESP);
396 return SMC_CLC_DECL_TIMEOUT_AL;
398 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
403 static void smcr_conn_save_peer_info(struct smc_sock *smc,
404 struct smc_clc_msg_accept_confirm *clc)
406 int bufsize = smc_uncompress_bufsize(clc->rmbe_size);
408 smc->conn.peer_rmbe_idx = clc->rmbe_idx;
409 smc->conn.local_tx_ctrl.token = ntohl(clc->rmbe_alert_token);
410 smc->conn.peer_rmbe_size = bufsize;
411 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
412 smc->conn.tx_off = bufsize * (smc->conn.peer_rmbe_idx - 1);
415 static void smcd_conn_save_peer_info(struct smc_sock *smc,
416 struct smc_clc_msg_accept_confirm *clc)
418 int bufsize = smc_uncompress_bufsize(clc->dmbe_size);
420 smc->conn.peer_rmbe_idx = clc->dmbe_idx;
421 smc->conn.peer_token = clc->token;
422 /* msg header takes up space in the buffer */
423 smc->conn.peer_rmbe_size = bufsize - sizeof(struct smcd_cdc_msg);
424 atomic_set(&smc->conn.peer_rmbe_space, smc->conn.peer_rmbe_size);
425 smc->conn.tx_off = bufsize * smc->conn.peer_rmbe_idx;
428 static void smc_conn_save_peer_info(struct smc_sock *smc,
429 struct smc_clc_msg_accept_confirm *clc)
431 if (smc->conn.lgr->is_smcd)
432 smcd_conn_save_peer_info(smc, clc);
434 smcr_conn_save_peer_info(smc, clc);
437 static void smc_link_save_peer_info(struct smc_link *link,
438 struct smc_clc_msg_accept_confirm *clc)
440 link->peer_qpn = ntoh24(clc->qpn);
441 memcpy(link->peer_gid, clc->lcl.gid, SMC_GID_SIZE);
442 memcpy(link->peer_mac, clc->lcl.mac, sizeof(link->peer_mac));
443 link->peer_psn = ntoh24(clc->psn);
444 link->peer_mtu = clc->qp_mtu;
447 static void smc_switch_to_fallback(struct smc_sock *smc)
449 smc->use_fallback = true;
450 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
451 smc->clcsock->file = smc->sk.sk_socket->file;
452 smc->clcsock->file->private_data = smc->clcsock;
456 /* fall back during connect */
457 static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
459 smc_switch_to_fallback(smc);
460 smc->fallback_rsn = reason_code;
461 smc_copy_sock_settings_to_clc(smc);
462 smc->connect_nonblock = 0;
463 if (smc->sk.sk_state == SMC_INIT)
464 smc->sk.sk_state = SMC_ACTIVE;
468 /* decline and fall back during connect */
469 static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
473 if (reason_code < 0) { /* error, fallback is not possible */
474 if (smc->sk.sk_state == SMC_INIT)
475 sock_put(&smc->sk); /* passive closing */
478 if (reason_code != SMC_CLC_DECL_PEERDECL) {
479 rc = smc_clc_send_decline(smc, reason_code);
481 if (smc->sk.sk_state == SMC_INIT)
482 sock_put(&smc->sk); /* passive closing */
486 return smc_connect_fallback(smc, reason_code);
489 /* abort connecting */
490 static int smc_connect_abort(struct smc_sock *smc, int reason_code,
493 if (local_contact == SMC_FIRST_CONTACT)
494 smc_lgr_forget(smc->conn.lgr);
495 if (smc->conn.lgr->is_smcd)
496 /* there is only one lgr role for SMC-D; use server lock */
497 mutex_unlock(&smc_server_lgr_pending);
499 mutex_unlock(&smc_client_lgr_pending);
501 smc_conn_free(&smc->conn);
502 smc->connect_nonblock = 0;
506 /* check if there is a rdma device available for this connection. */
507 /* called for connect and listen */
508 static int smc_find_rdma_device(struct smc_sock *smc, struct smc_init_info *ini)
510 /* PNET table look up: search active ib_device and port
511 * within same PNETID that also contains the ethernet device
512 * used for the internal TCP socket
514 smc_pnet_find_roce_resource(smc->clcsock->sk, ini);
516 return SMC_CLC_DECL_NOSMCRDEV;
520 /* check if there is an ISM device available for this connection. */
521 /* called for connect and listen */
522 static int smc_find_ism_device(struct smc_sock *smc, struct smc_init_info *ini)
524 /* Find ISM device with same PNETID as connecting interface */
525 smc_pnet_find_ism_resource(smc->clcsock->sk, ini);
527 return SMC_CLC_DECL_NOSMCDDEV;
531 /* Check for VLAN ID and register it on ISM device just for CLC handshake */
532 static int smc_connect_ism_vlan_setup(struct smc_sock *smc,
533 struct smc_init_info *ini)
535 if (ini->vlan_id && smc_ism_get_vlan(ini->ism_dev, ini->vlan_id))
536 return SMC_CLC_DECL_ISMVLANERR;
540 /* cleanup temporary VLAN ID registration used for CLC handshake. If ISM is
541 * used, the VLAN ID will be registered again during the connection setup.
543 static int smc_connect_ism_vlan_cleanup(struct smc_sock *smc, bool is_smcd,
544 struct smc_init_info *ini)
548 if (ini->vlan_id && smc_ism_put_vlan(ini->ism_dev, ini->vlan_id))
549 return SMC_CLC_DECL_CNFERR;
553 /* CLC handshake during connect */
554 static int smc_connect_clc(struct smc_sock *smc, int smc_type,
555 struct smc_clc_msg_accept_confirm *aclc,
556 struct smc_init_info *ini)
560 /* do inband token exchange */
561 rc = smc_clc_send_proposal(smc, smc_type, ini);
564 /* receive SMC Accept CLC message */
565 return smc_clc_wait_msg(smc, aclc, sizeof(*aclc), SMC_CLC_ACCEPT,
569 /* setup for RDMA connection of client */
570 static int smc_connect_rdma(struct smc_sock *smc,
571 struct smc_clc_msg_accept_confirm *aclc,
572 struct smc_init_info *ini)
574 struct smc_link *link;
577 ini->is_smcd = false;
578 ini->ib_lcl = &aclc->lcl;
579 ini->ib_clcqpn = ntoh24(aclc->qpn);
580 ini->srv_first_contact = aclc->hdr.flag;
582 mutex_lock(&smc_client_lgr_pending);
583 reason_code = smc_conn_create(smc, ini);
585 mutex_unlock(&smc_client_lgr_pending);
588 link = &smc->conn.lgr->lnk[SMC_SINGLE_LINK];
590 smc_conn_save_peer_info(smc, aclc);
592 /* create send buffer and rmb */
593 if (smc_buf_create(smc, false))
594 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
595 ini->cln_first_contact);
597 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
598 smc_link_save_peer_info(link, aclc);
600 if (smc_rmb_rtoken_handling(&smc->conn, aclc))
601 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RTOK,
602 ini->cln_first_contact);
607 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
608 if (smc_ib_ready_link(link))
609 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_RDYLNK,
610 ini->cln_first_contact);
612 if (smc_reg_rmb(link, smc->conn.rmb_desc, true))
613 return smc_connect_abort(smc, SMC_CLC_DECL_ERR_REGRMB,
614 ini->cln_first_contact);
616 smc_rmb_sync_sg_for_device(&smc->conn);
618 reason_code = smc_clc_send_confirm(smc);
620 return smc_connect_abort(smc, reason_code,
621 ini->cln_first_contact);
625 if (ini->cln_first_contact == SMC_FIRST_CONTACT) {
626 /* QP confirmation over RoCE fabric */
627 reason_code = smc_clnt_conf_first_link(smc);
629 return smc_connect_abort(smc, reason_code,
630 ini->cln_first_contact);
632 mutex_unlock(&smc_client_lgr_pending);
634 smc_copy_sock_settings_to_clc(smc);
635 smc->connect_nonblock = 0;
636 if (smc->sk.sk_state == SMC_INIT)
637 smc->sk.sk_state = SMC_ACTIVE;
642 /* setup for ISM connection of client */
643 static int smc_connect_ism(struct smc_sock *smc,
644 struct smc_clc_msg_accept_confirm *aclc,
645 struct smc_init_info *ini)
650 ini->ism_gid = aclc->gid;
651 ini->srv_first_contact = aclc->hdr.flag;
653 /* there is only one lgr role for SMC-D; use server lock */
654 mutex_lock(&smc_server_lgr_pending);
655 rc = smc_conn_create(smc, ini);
657 mutex_unlock(&smc_server_lgr_pending);
661 /* Create send and receive buffers */
662 if (smc_buf_create(smc, true))
663 return smc_connect_abort(smc, SMC_CLC_DECL_MEM,
664 ini->cln_first_contact);
666 smc_conn_save_peer_info(smc, aclc);
671 rc = smc_clc_send_confirm(smc);
673 return smc_connect_abort(smc, rc, ini->cln_first_contact);
674 mutex_unlock(&smc_server_lgr_pending);
676 smc_copy_sock_settings_to_clc(smc);
677 smc->connect_nonblock = 0;
678 if (smc->sk.sk_state == SMC_INIT)
679 smc->sk.sk_state = SMC_ACTIVE;
684 /* perform steps before actually connecting */
685 static int __smc_connect(struct smc_sock *smc)
687 bool ism_supported = false, rdma_supported = false;
688 struct smc_clc_msg_accept_confirm aclc;
689 struct smc_init_info ini = {0};
693 sock_hold(&smc->sk); /* sock put in passive closing */
695 if (smc->use_fallback)
696 return smc_connect_fallback(smc, smc->fallback_rsn);
698 /* if peer has not signalled SMC-capability, fall back */
699 if (!tcp_sk(smc->clcsock->sk)->syn_smc)
700 return smc_connect_fallback(smc, SMC_CLC_DECL_PEERNOSMC);
702 /* IPSec connections opt out of SMC-R optimizations */
703 if (using_ipsec(smc))
704 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_IPSEC);
706 /* get vlan id from IP device */
707 if (smc_vlan_by_tcpsk(smc->clcsock, &ini))
708 return smc_connect_decline_fallback(smc,
709 SMC_CLC_DECL_GETVLANERR);
711 /* check if there is an ism device available */
712 if (!smc_find_ism_device(smc, &ini) &&
713 !smc_connect_ism_vlan_setup(smc, &ini)) {
714 /* ISM is supported for this connection */
715 ism_supported = true;
716 smc_type = SMC_TYPE_D;
719 /* check if there is a rdma device available */
720 if (!smc_find_rdma_device(smc, &ini)) {
721 /* RDMA is supported for this connection */
722 rdma_supported = true;
724 smc_type = SMC_TYPE_B; /* both */
726 smc_type = SMC_TYPE_R; /* only RDMA */
729 /* if neither ISM nor RDMA are supported, fallback */
730 if (!rdma_supported && !ism_supported)
731 return smc_connect_decline_fallback(smc, SMC_CLC_DECL_NOSMCDEV);
733 /* perform CLC handshake */
734 rc = smc_connect_clc(smc, smc_type, &aclc, &ini);
736 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
737 return smc_connect_decline_fallback(smc, rc);
740 /* depending on previous steps, connect using rdma or ism */
741 if (rdma_supported && aclc.hdr.path == SMC_TYPE_R)
742 rc = smc_connect_rdma(smc, &aclc, &ini);
743 else if (ism_supported && aclc.hdr.path == SMC_TYPE_D)
744 rc = smc_connect_ism(smc, &aclc, &ini);
746 rc = SMC_CLC_DECL_MODEUNSUPP;
748 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
749 return smc_connect_decline_fallback(smc, rc);
752 smc_connect_ism_vlan_cleanup(smc, ism_supported, &ini);
756 static void smc_connect_work(struct work_struct *work)
758 struct smc_sock *smc = container_of(work, struct smc_sock,
760 long timeo = smc->sk.sk_sndtimeo;
764 timeo = MAX_SCHEDULE_TIMEOUT;
765 lock_sock(smc->clcsock->sk);
766 if (smc->clcsock->sk->sk_err) {
767 smc->sk.sk_err = smc->clcsock->sk->sk_err;
768 } else if ((1 << smc->clcsock->sk->sk_state) &
769 (TCPF_SYN_SENT | TCP_SYN_RECV)) {
770 rc = sk_stream_wait_connect(smc->clcsock->sk, &timeo);
771 if ((rc == -EPIPE) &&
772 ((1 << smc->clcsock->sk->sk_state) &
773 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)))
776 release_sock(smc->clcsock->sk);
778 if (rc != 0 || smc->sk.sk_err) {
779 smc->sk.sk_state = SMC_CLOSED;
780 if (rc == -EPIPE || rc == -EAGAIN)
781 smc->sk.sk_err = EPIPE;
782 else if (signal_pending(current))
783 smc->sk.sk_err = -sock_intr_errno(timeo);
787 rc = __smc_connect(smc);
789 smc->sk.sk_err = -rc;
792 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
793 if (smc->sk.sk_err) {
794 smc->sk.sk_state_change(&smc->sk);
795 } else { /* allow polling before and after fallback decision */
796 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
797 smc->sk.sk_write_space(&smc->sk);
800 release_sock(&smc->sk);
803 static int smc_connect(struct socket *sock, struct sockaddr *addr,
806 struct sock *sk = sock->sk;
807 struct smc_sock *smc;
812 /* separate smc parameter checking to be safe */
813 if (alen < sizeof(addr->sa_family))
815 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6)
819 switch (sk->sk_state) {
830 smc_copy_sock_settings_to_clc(smc);
831 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
832 if (smc->connect_nonblock) {
836 rc = kernel_connect(smc->clcsock, addr, alen, flags);
837 if (rc && rc != -EINPROGRESS)
839 if (flags & O_NONBLOCK) {
840 if (schedule_work(&smc->connect_work))
841 smc->connect_nonblock = 1;
844 rc = __smc_connect(smc);
848 rc = 0; /* success cases including fallback */
857 static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
859 struct socket *new_clcsock = NULL;
860 struct sock *lsk = &lsmc->sk;
865 new_sk = smc_sock_alloc(sock_net(lsk), NULL, lsk->sk_protocol);
868 lsk->sk_err = ENOMEM;
873 *new_smc = smc_sk(new_sk);
875 mutex_lock(&lsmc->clcsock_release_lock);
877 rc = kernel_accept(lsmc->clcsock, &new_clcsock, 0);
878 mutex_unlock(&lsmc->clcsock_release_lock);
882 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
883 new_sk->sk_prot->unhash(new_sk);
885 sock_release(new_clcsock);
886 new_sk->sk_state = SMC_CLOSED;
887 sock_set_flag(new_sk, SOCK_DEAD);
888 sock_put(new_sk); /* final */
893 (*new_smc)->clcsock = new_clcsock;
898 /* add a just created sock to the accept queue of the listen sock as
899 * candidate for a following socket accept call from user space
901 static void smc_accept_enqueue(struct sock *parent, struct sock *sk)
903 struct smc_sock *par = smc_sk(parent);
905 sock_hold(sk); /* sock_put in smc_accept_unlink () */
906 spin_lock(&par->accept_q_lock);
907 list_add_tail(&smc_sk(sk)->accept_q, &par->accept_q);
908 spin_unlock(&par->accept_q_lock);
909 sk_acceptq_added(parent);
912 /* remove a socket from the accept queue of its parental listening socket */
913 static void smc_accept_unlink(struct sock *sk)
915 struct smc_sock *par = smc_sk(sk)->listen_smc;
917 spin_lock(&par->accept_q_lock);
918 list_del_init(&smc_sk(sk)->accept_q);
919 spin_unlock(&par->accept_q_lock);
920 sk_acceptq_removed(&smc_sk(sk)->listen_smc->sk);
921 sock_put(sk); /* sock_hold in smc_accept_enqueue */
924 /* remove a sock from the accept queue to bind it to a new socket created
925 * for a socket accept call from user space
927 struct sock *smc_accept_dequeue(struct sock *parent,
928 struct socket *new_sock)
930 struct smc_sock *isk, *n;
933 list_for_each_entry_safe(isk, n, &smc_sk(parent)->accept_q, accept_q) {
934 new_sk = (struct sock *)isk;
936 smc_accept_unlink(new_sk);
937 if (new_sk->sk_state == SMC_CLOSED) {
938 new_sk->sk_prot->unhash(new_sk);
940 sock_release(isk->clcsock);
943 sock_put(new_sk); /* final */
947 sock_graft(new_sk, new_sock);
948 if (isk->use_fallback) {
949 smc_sk(new_sk)->clcsock->file = new_sock->file;
950 isk->clcsock->file->private_data = isk->clcsock;
958 /* clean up for a created but never accepted sock */
959 void smc_close_non_accepted(struct sock *sk)
961 struct smc_sock *smc = smc_sk(sk);
964 if (!sk->sk_lingertime)
965 /* wait for peer closing */
966 sk->sk_lingertime = SMC_MAX_STREAM_WAIT_TIMEOUT;
967 if (!smc->use_fallback) {
968 smc_close_active(smc);
969 sock_set_flag(sk, SOCK_DEAD);
970 sk->sk_shutdown |= SHUTDOWN_MASK;
972 sk->sk_prot->unhash(sk);
980 if (smc->use_fallback) {
981 sock_put(sk); /* passive closing */
982 sk->sk_state = SMC_CLOSED;
984 if (sk->sk_state == SMC_CLOSED)
985 smc_conn_free(&smc->conn);
988 sock_put(sk); /* final sock_put */
991 static int smc_serv_conf_first_link(struct smc_sock *smc)
993 struct net *net = sock_net(smc->clcsock->sk);
994 struct smc_link_group *lgr = smc->conn.lgr;
995 struct smc_link *link;
999 link = &lgr->lnk[SMC_SINGLE_LINK];
1001 if (smc_reg_rmb(link, smc->conn.rmb_desc, false))
1002 return SMC_CLC_DECL_ERR_REGRMB;
1004 /* send CONFIRM LINK request to client over the RoCE fabric */
1005 rc = smc_llc_send_confirm_link(link, SMC_LLC_REQ);
1007 return SMC_CLC_DECL_TIMEOUT_CL;
1009 /* receive CONFIRM LINK response from client over the RoCE fabric */
1010 rest = wait_for_completion_interruptible_timeout(
1011 &link->llc_confirm_resp,
1012 SMC_LLC_WAIT_FIRST_TIME);
1014 struct smc_clc_msg_decline dclc;
1016 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1017 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1018 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_CL : rc;
1021 if (link->llc_confirm_resp_rc)
1022 return SMC_CLC_DECL_RMBE_EC;
1024 /* send ADD LINK request to client over the RoCE fabric */
1025 rc = smc_llc_send_add_link(link,
1026 link->smcibdev->mac[link->ibport - 1],
1027 link->gid, SMC_LLC_REQ);
1029 return SMC_CLC_DECL_TIMEOUT_AL;
1031 /* receive ADD LINK response from client over the RoCE fabric */
1032 rest = wait_for_completion_interruptible_timeout(&link->llc_add_resp,
1035 struct smc_clc_msg_decline dclc;
1037 rc = smc_clc_wait_msg(smc, &dclc, sizeof(dclc),
1038 SMC_CLC_DECLINE, CLC_WAIT_TIME_SHORT);
1039 return rc == -EAGAIN ? SMC_CLC_DECL_TIMEOUT_AL : rc;
1042 smc_llc_link_active(link, net->ipv4.sysctl_tcp_keepalive_time);
1047 /* listen worker: finish */
1048 static void smc_listen_out(struct smc_sock *new_smc)
1050 struct smc_sock *lsmc = new_smc->listen_smc;
1051 struct sock *newsmcsk = &new_smc->sk;
1053 if (lsmc->sk.sk_state == SMC_LISTEN) {
1054 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1055 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1056 release_sock(&lsmc->sk);
1057 } else { /* no longer listening */
1058 smc_close_non_accepted(newsmcsk);
1061 /* Wake up accept */
1062 lsmc->sk.sk_data_ready(&lsmc->sk);
1063 sock_put(&lsmc->sk); /* sock_hold in smc_tcp_listen_work */
1066 /* listen worker: finish in state connected */
1067 static void smc_listen_out_connected(struct smc_sock *new_smc)
1069 struct sock *newsmcsk = &new_smc->sk;
1071 sk_refcnt_debug_inc(newsmcsk);
1072 if (newsmcsk->sk_state == SMC_INIT)
1073 newsmcsk->sk_state = SMC_ACTIVE;
1075 smc_listen_out(new_smc);
1078 /* listen worker: finish in error state */
1079 static void smc_listen_out_err(struct smc_sock *new_smc)
1081 struct sock *newsmcsk = &new_smc->sk;
1083 if (newsmcsk->sk_state == SMC_INIT)
1084 sock_put(&new_smc->sk); /* passive closing */
1085 newsmcsk->sk_state = SMC_CLOSED;
1086 smc_conn_free(&new_smc->conn);
1088 smc_listen_out(new_smc);
1091 /* listen worker: decline and fall back if possible */
1092 static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1095 /* RDMA setup failed, switch back to TCP */
1096 if (local_contact == SMC_FIRST_CONTACT)
1097 smc_lgr_forget(new_smc->conn.lgr);
1098 if (reason_code < 0) { /* error, no fallback possible */
1099 smc_listen_out_err(new_smc);
1102 smc_conn_free(&new_smc->conn);
1103 smc_switch_to_fallback(new_smc);
1104 new_smc->fallback_rsn = reason_code;
1105 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1106 if (smc_clc_send_decline(new_smc, reason_code) < 0) {
1107 smc_listen_out_err(new_smc);
1111 smc_listen_out_connected(new_smc);
1114 /* listen worker: check prefixes */
1115 static int smc_listen_prfx_check(struct smc_sock *new_smc,
1116 struct smc_clc_msg_proposal *pclc)
1118 struct smc_clc_msg_proposal_prefix *pclc_prfx;
1119 struct socket *newclcsock = new_smc->clcsock;
1121 pclc_prfx = smc_clc_proposal_get_prefix(pclc);
1122 if (smc_clc_prfx_match(newclcsock, pclc_prfx))
1123 return SMC_CLC_DECL_DIFFPREFIX;
1128 /* listen worker: initialize connection and buffers */
1129 static int smc_listen_rdma_init(struct smc_sock *new_smc,
1130 struct smc_init_info *ini)
1134 /* allocate connection / link group */
1135 rc = smc_conn_create(new_smc, ini);
1139 /* create send buffer and rmb */
1140 if (smc_buf_create(new_smc, false))
1141 return SMC_CLC_DECL_MEM;
1146 /* listen worker: initialize connection and buffers for SMC-D */
1147 static int smc_listen_ism_init(struct smc_sock *new_smc,
1148 struct smc_clc_msg_proposal *pclc,
1149 struct smc_init_info *ini)
1151 struct smc_clc_msg_smcd *pclc_smcd;
1154 pclc_smcd = smc_get_clc_msg_smcd(pclc);
1155 ini->ism_gid = pclc_smcd->gid;
1156 rc = smc_conn_create(new_smc, ini);
1160 /* Check if peer can be reached via ISM device */
1161 if (smc_ism_cantalk(new_smc->conn.lgr->peer_gid,
1162 new_smc->conn.lgr->vlan_id,
1163 new_smc->conn.lgr->smcd)) {
1164 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1165 smc_lgr_forget(new_smc->conn.lgr);
1166 smc_conn_free(&new_smc->conn);
1167 return SMC_CLC_DECL_SMCDNOTALK;
1170 /* Create send and receive buffers */
1171 if (smc_buf_create(new_smc, true)) {
1172 if (ini->cln_first_contact == SMC_FIRST_CONTACT)
1173 smc_lgr_forget(new_smc->conn.lgr);
1174 smc_conn_free(&new_smc->conn);
1175 return SMC_CLC_DECL_MEM;
1181 /* listen worker: register buffers */
1182 static int smc_listen_rdma_reg(struct smc_sock *new_smc, int local_contact)
1184 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1186 if (local_contact != SMC_FIRST_CONTACT) {
1187 if (smc_reg_rmb(link, new_smc->conn.rmb_desc, true))
1188 return SMC_CLC_DECL_ERR_REGRMB;
1190 smc_rmb_sync_sg_for_device(&new_smc->conn);
1195 /* listen worker: finish RDMA setup */
1196 static int smc_listen_rdma_finish(struct smc_sock *new_smc,
1197 struct smc_clc_msg_accept_confirm *cclc,
1200 struct smc_link *link = &new_smc->conn.lgr->lnk[SMC_SINGLE_LINK];
1201 int reason_code = 0;
1203 if (local_contact == SMC_FIRST_CONTACT)
1204 smc_link_save_peer_info(link, cclc);
1206 if (smc_rmb_rtoken_handling(&new_smc->conn, cclc)) {
1207 reason_code = SMC_CLC_DECL_ERR_RTOK;
1211 if (local_contact == SMC_FIRST_CONTACT) {
1212 if (smc_ib_ready_link(link)) {
1213 reason_code = SMC_CLC_DECL_ERR_RDYLNK;
1216 /* QP confirmation over RoCE fabric */
1217 reason_code = smc_serv_conf_first_link(new_smc);
1224 smc_listen_decline(new_smc, reason_code, local_contact);
1228 /* setup for RDMA connection of server */
1229 static void smc_listen_work(struct work_struct *work)
1231 struct smc_sock *new_smc = container_of(work, struct smc_sock,
1233 struct socket *newclcsock = new_smc->clcsock;
1234 struct smc_clc_msg_accept_confirm cclc;
1235 struct smc_clc_msg_proposal *pclc;
1236 struct smc_init_info ini = {0};
1237 bool ism_supported = false;
1238 u8 buf[SMC_CLC_MAX_LEN];
1241 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1242 return smc_listen_out_err(new_smc);
1244 if (new_smc->use_fallback) {
1245 smc_listen_out_connected(new_smc);
1249 /* check if peer is smc capable */
1250 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1251 smc_switch_to_fallback(new_smc);
1252 new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
1253 smc_listen_out_connected(new_smc);
1257 /* do inband token exchange -
1258 * wait for and receive SMC Proposal CLC message
1260 pclc = (struct smc_clc_msg_proposal *)&buf;
1261 rc = smc_clc_wait_msg(new_smc, pclc, SMC_CLC_MAX_LEN,
1262 SMC_CLC_PROPOSAL, CLC_WAIT_TIME);
1266 /* IPSec connections opt out of SMC-R optimizations */
1267 if (using_ipsec(new_smc)) {
1268 rc = SMC_CLC_DECL_IPSEC;
1272 /* check for matching IP prefix and subnet length */
1273 rc = smc_listen_prfx_check(new_smc, pclc);
1277 /* get vlan id from IP device */
1278 if (smc_vlan_by_tcpsk(new_smc->clcsock, &ini)) {
1279 rc = SMC_CLC_DECL_GETVLANERR;
1283 mutex_lock(&smc_server_lgr_pending);
1284 smc_close_init(new_smc);
1285 smc_rx_init(new_smc);
1286 smc_tx_init(new_smc);
1288 /* check if ISM is available */
1289 if (pclc->hdr.path == SMC_TYPE_D || pclc->hdr.path == SMC_TYPE_B) {
1290 ini.is_smcd = true; /* prepare ISM check */
1291 rc = smc_find_ism_device(new_smc, &ini);
1293 rc = smc_listen_ism_init(new_smc, pclc, &ini);
1295 ism_supported = true;
1296 else if (pclc->hdr.path == SMC_TYPE_D)
1297 goto out_unlock; /* skip RDMA and decline */
1300 /* check if RDMA is available */
1301 if (!ism_supported) { /* SMC_TYPE_R or SMC_TYPE_B */
1302 /* prepare RDMA check */
1303 memset(&ini, 0, sizeof(ini));
1304 ini.is_smcd = false;
1305 ini.ib_lcl = &pclc->lcl;
1306 rc = smc_find_rdma_device(new_smc, &ini);
1308 /* no RDMA device found */
1309 if (pclc->hdr.path == SMC_TYPE_B)
1310 /* neither ISM nor RDMA device found */
1311 rc = SMC_CLC_DECL_NOSMCDEV;
1314 rc = smc_listen_rdma_init(new_smc, &ini);
1317 rc = smc_listen_rdma_reg(new_smc, ini.cln_first_contact);
1322 /* send SMC Accept CLC message */
1323 rc = smc_clc_send_accept(new_smc, ini.cln_first_contact);
1327 /* SMC-D does not need this lock any more */
1329 mutex_unlock(&smc_server_lgr_pending);
1331 /* receive SMC Confirm CLC message */
1332 rc = smc_clc_wait_msg(new_smc, &cclc, sizeof(cclc),
1333 SMC_CLC_CONFIRM, CLC_WAIT_TIME);
1341 if (!ism_supported) {
1342 rc = smc_listen_rdma_finish(new_smc, &cclc,
1343 ini.cln_first_contact);
1344 mutex_unlock(&smc_server_lgr_pending);
1348 smc_conn_save_peer_info(new_smc, &cclc);
1349 smc_listen_out_connected(new_smc);
1353 mutex_unlock(&smc_server_lgr_pending);
1355 smc_listen_decline(new_smc, rc, ini.cln_first_contact);
1358 static void smc_tcp_listen_work(struct work_struct *work)
1360 struct smc_sock *lsmc = container_of(work, struct smc_sock,
1362 struct sock *lsk = &lsmc->sk;
1363 struct smc_sock *new_smc;
1367 while (lsk->sk_state == SMC_LISTEN) {
1368 rc = smc_clcsock_accept(lsmc, &new_smc);
1374 new_smc->listen_smc = lsmc;
1375 new_smc->use_fallback = lsmc->use_fallback;
1376 new_smc->fallback_rsn = lsmc->fallback_rsn;
1377 sock_hold(lsk); /* sock_put in smc_listen_work */
1378 INIT_WORK(&new_smc->smc_listen_work, smc_listen_work);
1379 smc_copy_sock_settings_to_smc(new_smc);
1380 new_smc->sk.sk_sndbuf = lsmc->sk.sk_sndbuf;
1381 new_smc->sk.sk_rcvbuf = lsmc->sk.sk_rcvbuf;
1382 sock_hold(&new_smc->sk); /* sock_put in passive closing */
1383 if (!schedule_work(&new_smc->smc_listen_work))
1384 sock_put(&new_smc->sk);
1389 sock_put(&lsmc->sk); /* sock_hold in smc_listen */
1392 static int smc_listen(struct socket *sock, int backlog)
1394 struct sock *sk = sock->sk;
1395 struct smc_sock *smc;
1402 if ((sk->sk_state != SMC_INIT) && (sk->sk_state != SMC_LISTEN))
1406 if (sk->sk_state == SMC_LISTEN) {
1407 sk->sk_max_ack_backlog = backlog;
1410 /* some socket options are handled in core, so we could not apply
1411 * them to the clc socket -- copy smc socket options to clc socket
1413 smc_copy_sock_settings_to_clc(smc);
1414 if (!smc->use_fallback)
1415 tcp_sk(smc->clcsock->sk)->syn_smc = 1;
1417 rc = kernel_listen(smc->clcsock, backlog);
1420 sk->sk_max_ack_backlog = backlog;
1421 sk->sk_ack_backlog = 0;
1422 sk->sk_state = SMC_LISTEN;
1423 sock_hold(sk); /* sock_hold in tcp_listen_worker */
1424 if (!schedule_work(&smc->tcp_listen_work))
1432 static int smc_accept(struct socket *sock, struct socket *new_sock,
1433 int flags, bool kern)
1435 struct sock *sk = sock->sk, *nsk;
1436 DECLARE_WAITQUEUE(wait, current);
1437 struct smc_sock *lsmc;
1442 sock_hold(sk); /* sock_put below */
1445 if (lsmc->sk.sk_state != SMC_LISTEN) {
1451 /* Wait for an incoming connection */
1452 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1453 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1454 while (!(nsk = smc_accept_dequeue(sk, new_sock))) {
1455 set_current_state(TASK_INTERRUPTIBLE);
1461 timeo = schedule_timeout(timeo);
1462 /* wakeup by sk_data_ready in smc_listen_work() */
1463 sched_annotate_sleep();
1465 if (signal_pending(current)) {
1466 rc = sock_intr_errno(timeo);
1470 set_current_state(TASK_RUNNING);
1471 remove_wait_queue(sk_sleep(sk), &wait);
1474 rc = sock_error(nsk);
1479 if (lsmc->sockopt_defer_accept && !(flags & O_NONBLOCK)) {
1480 /* wait till data arrives on the socket */
1481 timeo = msecs_to_jiffies(lsmc->sockopt_defer_accept *
1483 if (smc_sk(nsk)->use_fallback) {
1484 struct sock *clcsk = smc_sk(nsk)->clcsock->sk;
1487 if (skb_queue_empty(&clcsk->sk_receive_queue))
1488 sk_wait_data(clcsk, &timeo, NULL);
1489 release_sock(clcsk);
1490 } else if (!atomic_read(&smc_sk(nsk)->conn.bytes_to_rcv)) {
1492 smc_rx_wait(smc_sk(nsk), &timeo, smc_rx_data_available);
1498 sock_put(sk); /* sock_hold above */
1502 static int smc_getname(struct socket *sock, struct sockaddr *addr,
1505 struct smc_sock *smc;
1507 if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
1508 (sock->sk->sk_state != SMC_APPCLOSEWAIT1))
1511 smc = smc_sk(sock->sk);
1513 return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
1516 static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1518 struct sock *sk = sock->sk;
1519 struct smc_sock *smc;
1524 if ((sk->sk_state != SMC_ACTIVE) &&
1525 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1526 (sk->sk_state != SMC_INIT))
1529 if (msg->msg_flags & MSG_FASTOPEN) {
1530 if (sk->sk_state == SMC_INIT) {
1531 smc_switch_to_fallback(smc);
1532 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1539 if (smc->use_fallback)
1540 rc = smc->clcsock->ops->sendmsg(smc->clcsock, msg, len);
1542 rc = smc_tx_sendmsg(smc, msg, len);
1548 static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1551 struct sock *sk = sock->sk;
1552 struct smc_sock *smc;
1557 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1558 /* socket was connected before, no more data to read */
1562 if ((sk->sk_state == SMC_INIT) ||
1563 (sk->sk_state == SMC_LISTEN) ||
1564 (sk->sk_state == SMC_CLOSED))
1567 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1572 if (smc->use_fallback) {
1573 rc = smc->clcsock->ops->recvmsg(smc->clcsock, msg, len, flags);
1575 msg->msg_namelen = 0;
1576 rc = smc_rx_recvmsg(smc, msg, NULL, len, flags);
1584 static __poll_t smc_accept_poll(struct sock *parent)
1586 struct smc_sock *isk = smc_sk(parent);
1589 spin_lock(&isk->accept_q_lock);
1590 if (!list_empty(&isk->accept_q))
1591 mask = EPOLLIN | EPOLLRDNORM;
1592 spin_unlock(&isk->accept_q_lock);
1597 static __poll_t smc_poll(struct file *file, struct socket *sock,
1600 struct sock *sk = sock->sk;
1601 struct smc_sock *smc;
1607 smc = smc_sk(sock->sk);
1608 if (smc->use_fallback) {
1609 /* delegate to CLC child sock */
1610 mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1611 sk->sk_err = smc->clcsock->sk->sk_err;
1613 if (sk->sk_state != SMC_CLOSED)
1614 sock_poll_wait(file, sock, wait);
1617 if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
1618 (sk->sk_state == SMC_CLOSED))
1620 if (sk->sk_state == SMC_LISTEN) {
1621 /* woken up by sk_data_ready in smc_listen_work() */
1622 mask |= smc_accept_poll(sk);
1623 } else if (smc->use_fallback) { /* as result of connect_work()*/
1624 mask |= smc->clcsock->ops->poll(file, smc->clcsock,
1626 sk->sk_err = smc->clcsock->sk->sk_err;
1628 if ((sk->sk_state != SMC_INIT &&
1629 atomic_read(&smc->conn.sndbuf_space)) ||
1630 sk->sk_shutdown & SEND_SHUTDOWN) {
1631 mask |= EPOLLOUT | EPOLLWRNORM;
1633 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1634 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1636 if (atomic_read(&smc->conn.bytes_to_rcv))
1637 mask |= EPOLLIN | EPOLLRDNORM;
1638 if (sk->sk_shutdown & RCV_SHUTDOWN)
1639 mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
1640 if (sk->sk_state == SMC_APPCLOSEWAIT1)
1642 if (smc->conn.urg_state == SMC_URG_VALID)
1650 static int smc_shutdown(struct socket *sock, int how)
1652 struct sock *sk = sock->sk;
1653 struct smc_sock *smc;
1659 if ((how < SHUT_RD) || (how > SHUT_RDWR))
1665 if ((sk->sk_state != SMC_ACTIVE) &&
1666 (sk->sk_state != SMC_PEERCLOSEWAIT1) &&
1667 (sk->sk_state != SMC_PEERCLOSEWAIT2) &&
1668 (sk->sk_state != SMC_APPCLOSEWAIT1) &&
1669 (sk->sk_state != SMC_APPCLOSEWAIT2) &&
1670 (sk->sk_state != SMC_APPFINCLOSEWAIT))
1672 if (smc->use_fallback) {
1673 rc = kernel_sock_shutdown(smc->clcsock, how);
1674 sk->sk_shutdown = smc->clcsock->sk->sk_shutdown;
1675 if (sk->sk_shutdown == SHUTDOWN_MASK)
1676 sk->sk_state = SMC_CLOSED;
1680 case SHUT_RDWR: /* shutdown in both directions */
1681 rc = smc_close_active(smc);
1684 rc = smc_close_shutdown_write(smc);
1688 /* nothing more to do because peer is not involved */
1692 rc1 = kernel_sock_shutdown(smc->clcsock, how);
1693 /* map sock_shutdown_cmd constants to sk_shutdown value range */
1694 sk->sk_shutdown |= how + 1;
1698 return rc ? rc : rc1;
1701 static int smc_setsockopt(struct socket *sock, int level, int optname,
1702 char __user *optval, unsigned int optlen)
1704 struct sock *sk = sock->sk;
1705 struct smc_sock *smc;
1710 /* generic setsockopts reaching us here always apply to the
1713 rc = smc->clcsock->ops->setsockopt(smc->clcsock, level, optname,
1715 if (smc->clcsock->sk->sk_err) {
1716 sk->sk_err = smc->clcsock->sk->sk_err;
1717 sk->sk_error_report(sk);
1722 if (optlen < sizeof(int))
1724 if (get_user(val, (int __user *)optval))
1731 case TCP_FASTOPEN_CONNECT:
1732 case TCP_FASTOPEN_KEY:
1733 case TCP_FASTOPEN_NO_COOKIE:
1734 /* option not supported by SMC */
1735 if (sk->sk_state == SMC_INIT) {
1736 smc_switch_to_fallback(smc);
1737 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1739 if (!smc->use_fallback)
1744 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
1745 if (val && !smc->use_fallback)
1746 mod_delayed_work(system_wq, &smc->conn.tx_work,
1751 if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
1752 if (!val && !smc->use_fallback)
1753 mod_delayed_work(system_wq, &smc->conn.tx_work,
1757 case TCP_DEFER_ACCEPT:
1758 smc->sockopt_defer_accept = val;
1768 static int smc_getsockopt(struct socket *sock, int level, int optname,
1769 char __user *optval, int __user *optlen)
1771 struct smc_sock *smc;
1773 smc = smc_sk(sock->sk);
1774 /* socket options apply to the CLC socket */
1775 return smc->clcsock->ops->getsockopt(smc->clcsock, level, optname,
1779 static int smc_ioctl(struct socket *sock, unsigned int cmd,
1782 union smc_host_cursor cons, urg;
1783 struct smc_connection *conn;
1784 struct smc_sock *smc;
1787 smc = smc_sk(sock->sk);
1789 lock_sock(&smc->sk);
1790 if (smc->use_fallback) {
1791 if (!smc->clcsock) {
1792 release_sock(&smc->sk);
1795 answ = smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
1796 release_sock(&smc->sk);
1800 case SIOCINQ: /* same as FIONREAD */
1801 if (smc->sk.sk_state == SMC_LISTEN) {
1802 release_sock(&smc->sk);
1805 if (smc->sk.sk_state == SMC_INIT ||
1806 smc->sk.sk_state == SMC_CLOSED)
1809 answ = atomic_read(&smc->conn.bytes_to_rcv);
1812 /* output queue size (not send + not acked) */
1813 if (smc->sk.sk_state == SMC_LISTEN) {
1814 release_sock(&smc->sk);
1817 if (smc->sk.sk_state == SMC_INIT ||
1818 smc->sk.sk_state == SMC_CLOSED)
1821 answ = smc->conn.sndbuf_desc->len -
1822 atomic_read(&smc->conn.sndbuf_space);
1825 /* output queue size (not send only) */
1826 if (smc->sk.sk_state == SMC_LISTEN) {
1827 release_sock(&smc->sk);
1830 if (smc->sk.sk_state == SMC_INIT ||
1831 smc->sk.sk_state == SMC_CLOSED)
1834 answ = smc_tx_prepared_sends(&smc->conn);
1837 if (smc->sk.sk_state == SMC_LISTEN) {
1838 release_sock(&smc->sk);
1841 if (smc->sk.sk_state == SMC_INIT ||
1842 smc->sk.sk_state == SMC_CLOSED) {
1845 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
1846 smc_curs_copy(&urg, &conn->urg_curs, conn);
1847 answ = smc_curs_diff(conn->rmb_desc->len,
1852 release_sock(&smc->sk);
1853 return -ENOIOCTLCMD;
1855 release_sock(&smc->sk);
1857 return put_user(answ, (int __user *)arg);
1860 static ssize_t smc_sendpage(struct socket *sock, struct page *page,
1861 int offset, size_t size, int flags)
1863 struct sock *sk = sock->sk;
1864 struct smc_sock *smc;
1869 if (sk->sk_state != SMC_ACTIVE) {
1874 if (smc->use_fallback)
1875 rc = kernel_sendpage(smc->clcsock, page, offset,
1878 rc = sock_no_sendpage(sock, page, offset, size, flags);
1884 /* Map the affected portions of the rmbe into an spd, note the number of bytes
1885 * to splice in conn->splice_pending, and press 'go'. Delays consumer cursor
1886 * updates till whenever a respective page has been fully processed.
1887 * Note that subsequent recv() calls have to wait till all splice() processing
1890 static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos,
1891 struct pipe_inode_info *pipe, size_t len,
1894 struct sock *sk = sock->sk;
1895 struct smc_sock *smc;
1900 if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) {
1901 /* socket was connected before, no more data to read */
1905 if (sk->sk_state == SMC_INIT ||
1906 sk->sk_state == SMC_LISTEN ||
1907 sk->sk_state == SMC_CLOSED)
1910 if (sk->sk_state == SMC_PEERFINCLOSEWAIT) {
1915 if (smc->use_fallback) {
1916 rc = smc->clcsock->ops->splice_read(smc->clcsock, ppos,
1923 if (flags & SPLICE_F_NONBLOCK)
1924 flags = MSG_DONTWAIT;
1927 rc = smc_rx_recvmsg(smc, NULL, pipe, len, flags);
1935 /* must look like tcp */
1936 static const struct proto_ops smc_sock_ops = {
1938 .owner = THIS_MODULE,
1939 .release = smc_release,
1941 .connect = smc_connect,
1942 .socketpair = sock_no_socketpair,
1943 .accept = smc_accept,
1944 .getname = smc_getname,
1947 .listen = smc_listen,
1948 .shutdown = smc_shutdown,
1949 .setsockopt = smc_setsockopt,
1950 .getsockopt = smc_getsockopt,
1951 .sendmsg = smc_sendmsg,
1952 .recvmsg = smc_recvmsg,
1953 .mmap = sock_no_mmap,
1954 .sendpage = smc_sendpage,
1955 .splice_read = smc_splice_read,
1958 static int smc_create(struct net *net, struct socket *sock, int protocol,
1961 int family = (protocol == SMCPROTO_SMC6) ? PF_INET6 : PF_INET;
1962 struct smc_sock *smc;
1966 rc = -ESOCKTNOSUPPORT;
1967 if (sock->type != SOCK_STREAM)
1970 rc = -EPROTONOSUPPORT;
1971 if (protocol != SMCPROTO_SMC && protocol != SMCPROTO_SMC6)
1975 sock->ops = &smc_sock_ops;
1976 sk = smc_sock_alloc(net, sock, protocol);
1980 /* create internal TCP socket for CLC handshake and fallback */
1982 smc->use_fallback = false; /* assume rdma capability first */
1983 smc->fallback_rsn = 0;
1984 rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP,
1987 sk_common_release(sk);
1990 smc->sk.sk_sndbuf = max(smc->clcsock->sk->sk_sndbuf, SMC_BUF_MIN_SIZE);
1991 smc->sk.sk_rcvbuf = max(smc->clcsock->sk->sk_rcvbuf, SMC_BUF_MIN_SIZE);
1997 static const struct net_proto_family smc_sock_family_ops = {
1999 .owner = THIS_MODULE,
2000 .create = smc_create,
2003 unsigned int smc_net_id;
2005 static __net_init int smc_net_init(struct net *net)
2007 return smc_pnet_net_init(net);
2010 static void __net_exit smc_net_exit(struct net *net)
2012 smc_pnet_net_exit(net);
2015 static struct pernet_operations smc_net_ops = {
2016 .init = smc_net_init,
2017 .exit = smc_net_exit,
2019 .size = sizeof(struct smc_net),
2022 static int __init smc_init(void)
2026 rc = register_pernet_subsys(&smc_net_ops);
2030 rc = smc_pnet_init();
2034 rc = smc_llc_init();
2036 pr_err("%s: smc_llc_init fails with %d\n", __func__, rc);
2040 rc = smc_cdc_init();
2042 pr_err("%s: smc_cdc_init fails with %d\n", __func__, rc);
2046 rc = proto_register(&smc_proto, 1);
2048 pr_err("%s: proto_register(v4) fails with %d\n", __func__, rc);
2052 rc = proto_register(&smc_proto6, 1);
2054 pr_err("%s: proto_register(v6) fails with %d\n", __func__, rc);
2058 rc = sock_register(&smc_sock_family_ops);
2060 pr_err("%s: sock_register fails with %d\n", __func__, rc);
2063 INIT_HLIST_HEAD(&smc_v4_hashinfo.ht);
2064 INIT_HLIST_HEAD(&smc_v6_hashinfo.ht);
2066 rc = smc_ib_register_client();
2068 pr_err("%s: ib_register fails with %d\n", __func__, rc);
2072 static_branch_enable(&tcp_have_smc);
2076 sock_unregister(PF_SMC);
2078 proto_unregister(&smc_proto6);
2080 proto_unregister(&smc_proto);
2086 static void __exit smc_exit(void)
2089 static_branch_disable(&tcp_have_smc);
2090 smc_ib_unregister_client();
2091 sock_unregister(PF_SMC);
2092 proto_unregister(&smc_proto6);
2093 proto_unregister(&smc_proto);
2095 unregister_pernet_subsys(&smc_net_ops);
2098 module_init(smc_init);
2099 module_exit(smc_exit);
2102 MODULE_DESCRIPTION("smc socket address family");
2103 MODULE_LICENSE("GPL");
2104 MODULE_ALIAS_NETPROTO(PF_SMC);