1 // SPDX-License-Identifier: GPL-2.0-only
2 /******************************************************************************
3 *******************************************************************************
5 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
6 ** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
9 *******************************************************************************
10 ******************************************************************************/
15 * This is the "low-level" comms layer.
17 * It is responsible for sending/receiving messages
18 * from other nodes in the cluster.
20 * Cluster nodes are referred to by their nodeids. nodeids are
21 * simply 32 bit numbers to the locking module - if they need to
22 * be expanded for the cluster infrastructure then that is its
23 * responsibility. It is this layer's
24 * responsibility to resolve these into IP address or
25 * whatever it needs for inter-node communication.
27 * The comms level is two kernel threads that deal mainly with
28 * the receiving of messages from other nodes and passing them
29 * up to the mid-level comms layer (which understands the
30 * message format) for execution by the locking core, and
31 * a send thread which does all the setting up of connections
32 * to remote nodes and the sending of data. Threads are not allowed
33 * to send their own data because it may cause them to wait in times
34 * of high load. Also, this way, the sending thread can collect together
35 * messages bound for one node and send them in one block.
37 * lowcomms will choose to use either TCP or SCTP as its transport layer
38 * depending on the configuration variable 'protocol'. This should be set
39 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
40 * cluster-wide mechanism as it must be the same on all nodes of the cluster
41 * for the DLM to function.
45 #include <asm/ioctls.h>
48 #include <linux/pagemap.h>
49 #include <linux/file.h>
50 #include <linux/mutex.h>
51 #include <linux/sctp.h>
52 #include <linux/slab.h>
53 #include <net/sctp/sctp.h>
56 #include <trace/events/dlm.h>
57 #include <trace/events/sock.h>
59 #include "dlm_internal.h"
65 #define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(5000)
66 #define NEEDED_RMEM (4*1024*1024)
69 struct socket *sock; /* NULL if not connected */
70 uint32_t nodeid; /* So we know who we are in the list */
71 /* this semaphore is used to allow parallel recv/send in read
72 * lock mode. When we release a sock we need to held the write lock.
74 * However this is locking code and not nice. When we remove the
75 * othercon handling we can look into other mechanism to synchronize
76 * io handling to call sock_release() at the right time.
78 struct rw_semaphore sock_lock;
80 #define CF_APP_LIMITED 0
81 #define CF_RECV_PENDING 1
82 #define CF_SEND_PENDING 2
83 #define CF_RECV_INTR 3
85 #define CF_IS_OTHERCON 5
86 struct list_head writequeue; /* List of outgoing writequeue_entries */
87 spinlock_t writequeue_lock;
89 struct hlist_node list;
90 /* due some connect()/accept() races we currently have this cross over
91 * connection attempt second connection for one node.
93 * There is a solution to avoid the race by introducing a connect
94 * rule as e.g. our_nodeid > nodeid_to_connect who is allowed to
95 * connect. Otherside can connect but will only be considered that
96 * the other side wants to have a reconnect.
98 * However changing to this behaviour will break backwards compatible.
99 * In a DLM protocol major version upgrade we should remove this!
101 struct connection *othercon;
102 struct work_struct rwork; /* receive worker */
103 struct work_struct swork; /* send worker */
104 wait_queue_head_t shutdown_wait;
105 unsigned char rx_leftover_buf[DLM_MAX_SOCKET_BUFSIZE];
110 struct sockaddr_storage addr[DLM_MAX_ADDR_COUNT];
111 spinlock_t addrs_lock;
114 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
116 struct listen_connection {
118 struct work_struct rwork;
121 #define DLM_WQ_REMAIN_BYTES(e) (PAGE_SIZE - e->end)
122 #define DLM_WQ_LENGTH_BYTES(e) (e->end - e->offset)
124 /* An entry waiting to be sent */
125 struct writequeue_entry {
126 struct list_head list;
133 struct connection *con;
134 struct list_head msgs;
139 struct writequeue_entry *entry;
140 struct dlm_msg *orig_msg;
144 int idx; /* new()/commit() idx exchange */
146 struct list_head list;
150 struct processqueue_entry {
155 struct list_head list;
158 struct dlm_proto_ops {
163 int (*connect)(struct connection *con, struct socket *sock,
164 struct sockaddr *addr, int addr_len);
165 void (*sockopts)(struct socket *sock);
166 int (*bind)(struct socket *sock);
167 int (*listen_validate)(void);
168 void (*listen_sockopts)(struct socket *sock);
169 int (*listen_bind)(struct socket *sock);
172 static struct listen_sock_callbacks {
173 void (*sk_error_report)(struct sock *);
174 void (*sk_data_ready)(struct sock *);
175 void (*sk_state_change)(struct sock *);
176 void (*sk_write_space)(struct sock *);
179 static struct listen_connection listen_con;
180 static struct sockaddr_storage dlm_local_addr[DLM_MAX_ADDR_COUNT];
181 static int dlm_local_count;
184 static struct workqueue_struct *io_workqueue;
185 static struct workqueue_struct *process_workqueue;
187 static struct hlist_head connection_hash[CONN_HASH_SIZE];
188 static DEFINE_SPINLOCK(connections_lock);
189 DEFINE_STATIC_SRCU(connections_srcu);
191 static const struct dlm_proto_ops *dlm_proto_ops;
193 #define DLM_IO_SUCCESS 0
196 #define DLM_IO_RESCHED 3
198 static void process_recv_sockets(struct work_struct *work);
199 static void process_send_sockets(struct work_struct *work);
200 static void process_dlm_messages(struct work_struct *work);
202 static DECLARE_WORK(process_work, process_dlm_messages);
203 static DEFINE_SPINLOCK(processqueue_lock);
204 static bool process_dlm_messages_pending;
205 static LIST_HEAD(processqueue);
207 bool dlm_lowcomms_is_running(void)
209 return !!listen_con.sock;
212 static void lowcomms_queue_swork(struct connection *con)
214 assert_spin_locked(&con->writequeue_lock);
216 if (!test_bit(CF_IO_STOP, &con->flags) &&
217 !test_bit(CF_APP_LIMITED, &con->flags) &&
218 !test_and_set_bit(CF_SEND_PENDING, &con->flags))
219 queue_work(io_workqueue, &con->swork);
222 static void lowcomms_queue_rwork(struct connection *con)
224 #ifdef CONFIG_LOCKDEP
225 WARN_ON_ONCE(!lockdep_sock_is_held(con->sock->sk));
228 if (!test_bit(CF_IO_STOP, &con->flags) &&
229 !test_and_set_bit(CF_RECV_PENDING, &con->flags))
230 queue_work(io_workqueue, &con->rwork);
233 static void writequeue_entry_ctor(void *data)
235 struct writequeue_entry *entry = data;
237 INIT_LIST_HEAD(&entry->msgs);
240 struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void)
242 return kmem_cache_create("dlm_writequeue", sizeof(struct writequeue_entry),
243 0, 0, writequeue_entry_ctor);
246 struct kmem_cache *dlm_lowcomms_msg_cache_create(void)
248 return kmem_cache_create("dlm_msg", sizeof(struct dlm_msg), 0, 0, NULL);
251 /* need to held writequeue_lock */
252 static struct writequeue_entry *con_next_wq(struct connection *con)
254 struct writequeue_entry *e;
256 e = list_first_entry_or_null(&con->writequeue, struct writequeue_entry,
258 /* if len is zero nothing is to send, if there are users filling
259 * buffers we wait until the users are done so we can send more.
261 if (!e || e->users || e->len == 0)
267 static struct connection *__find_con(int nodeid, int r)
269 struct connection *con;
271 hlist_for_each_entry_rcu(con, &connection_hash[r], list) {
272 if (con->nodeid == nodeid)
279 static void dlm_con_init(struct connection *con, int nodeid)
281 con->nodeid = nodeid;
282 init_rwsem(&con->sock_lock);
283 INIT_LIST_HEAD(&con->writequeue);
284 spin_lock_init(&con->writequeue_lock);
285 INIT_WORK(&con->swork, process_send_sockets);
286 INIT_WORK(&con->rwork, process_recv_sockets);
287 spin_lock_init(&con->addrs_lock);
288 init_waitqueue_head(&con->shutdown_wait);
292 * If 'allocation' is zero then we don't attempt to create a new
293 * connection structure for this node.
295 static struct connection *nodeid2con(int nodeid, gfp_t alloc)
297 struct connection *con, *tmp;
300 r = nodeid_hash(nodeid);
301 con = __find_con(nodeid, r);
305 con = kzalloc(sizeof(*con), alloc);
309 dlm_con_init(con, nodeid);
311 spin_lock(&connections_lock);
312 /* Because multiple workqueues/threads calls this function it can
313 * race on multiple cpu's. Instead of locking hot path __find_con()
314 * we just check in rare cases of recently added nodes again
315 * under protection of connections_lock. If this is the case we
316 * abort our connection creation and return the existing connection.
318 tmp = __find_con(nodeid, r);
320 spin_unlock(&connections_lock);
325 hlist_add_head_rcu(&con->list, &connection_hash[r]);
326 spin_unlock(&connections_lock);
331 static int addr_compare(const struct sockaddr_storage *x,
332 const struct sockaddr_storage *y)
334 switch (x->ss_family) {
336 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
337 struct sockaddr_in *siny = (struct sockaddr_in *)y;
338 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
340 if (sinx->sin_port != siny->sin_port)
345 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
346 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
347 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
349 if (sinx->sin6_port != siny->sin6_port)
359 static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
360 struct sockaddr *sa_out, bool try_new_addr,
363 struct sockaddr_storage sas;
364 struct connection *con;
367 if (!dlm_local_count)
370 idx = srcu_read_lock(&connections_srcu);
371 con = nodeid2con(nodeid, 0);
373 srcu_read_unlock(&connections_srcu, idx);
377 spin_lock(&con->addrs_lock);
378 if (!con->addr_count) {
379 spin_unlock(&con->addrs_lock);
380 srcu_read_unlock(&connections_srcu, idx);
384 memcpy(&sas, &con->addr[con->curr_addr_index],
385 sizeof(struct sockaddr_storage));
388 con->curr_addr_index++;
389 if (con->curr_addr_index == con->addr_count)
390 con->curr_addr_index = 0;
394 spin_unlock(&con->addrs_lock);
397 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
400 srcu_read_unlock(&connections_srcu, idx);
404 if (dlm_local_addr[0].ss_family == AF_INET) {
405 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
406 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
407 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
409 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
410 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
411 ret6->sin6_addr = in6->sin6_addr;
414 srcu_read_unlock(&connections_srcu, idx);
418 static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid,
421 struct connection *con;
424 idx = srcu_read_lock(&connections_srcu);
425 for (i = 0; i < CONN_HASH_SIZE; i++) {
426 hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
427 WARN_ON_ONCE(!con->addr_count);
429 spin_lock(&con->addrs_lock);
430 for (addr_i = 0; addr_i < con->addr_count; addr_i++) {
431 if (addr_compare(&con->addr[addr_i], addr)) {
432 *nodeid = con->nodeid;
434 spin_unlock(&con->addrs_lock);
435 srcu_read_unlock(&connections_srcu, idx);
439 spin_unlock(&con->addrs_lock);
442 srcu_read_unlock(&connections_srcu, idx);
447 static bool dlm_lowcomms_con_has_addr(const struct connection *con,
448 const struct sockaddr_storage *addr)
452 for (i = 0; i < con->addr_count; i++) {
453 if (addr_compare(&con->addr[i], addr))
460 int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
462 struct connection *con;
465 idx = srcu_read_lock(&connections_srcu);
466 con = nodeid2con(nodeid, GFP_NOFS);
468 srcu_read_unlock(&connections_srcu, idx);
472 spin_lock(&con->addrs_lock);
473 if (!con->addr_count) {
474 memcpy(&con->addr[0], addr, sizeof(*addr));
476 con->mark = dlm_config.ci_mark;
477 spin_unlock(&con->addrs_lock);
478 srcu_read_unlock(&connections_srcu, idx);
482 ret = dlm_lowcomms_con_has_addr(con, addr);
484 spin_unlock(&con->addrs_lock);
485 srcu_read_unlock(&connections_srcu, idx);
489 if (con->addr_count >= DLM_MAX_ADDR_COUNT) {
490 spin_unlock(&con->addrs_lock);
491 srcu_read_unlock(&connections_srcu, idx);
495 memcpy(&con->addr[con->addr_count++], addr, sizeof(*addr));
496 srcu_read_unlock(&connections_srcu, idx);
497 spin_unlock(&con->addrs_lock);
501 /* Data available on socket or listen socket received a connect */
502 static void lowcomms_data_ready(struct sock *sk)
504 struct connection *con = sock2con(sk);
506 trace_sk_data_ready(sk);
508 set_bit(CF_RECV_INTR, &con->flags);
509 lowcomms_queue_rwork(con);
512 static void lowcomms_write_space(struct sock *sk)
514 struct connection *con = sock2con(sk);
516 clear_bit(SOCK_NOSPACE, &con->sock->flags);
518 spin_lock_bh(&con->writequeue_lock);
519 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
520 con->sock->sk->sk_write_pending--;
521 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
524 lowcomms_queue_swork(con);
525 spin_unlock_bh(&con->writequeue_lock);
528 static void lowcomms_state_change(struct sock *sk)
530 /* SCTP layer is not calling sk_data_ready when the connection
531 * is done, so we catch the signal through here.
533 if (sk->sk_shutdown == RCV_SHUTDOWN)
534 lowcomms_data_ready(sk);
537 static void lowcomms_listen_data_ready(struct sock *sk)
539 trace_sk_data_ready(sk);
541 queue_work(io_workqueue, &listen_con.rwork);
544 int dlm_lowcomms_connect_node(int nodeid)
546 struct connection *con;
549 idx = srcu_read_lock(&connections_srcu);
550 con = nodeid2con(nodeid, 0);
551 if (WARN_ON_ONCE(!con)) {
552 srcu_read_unlock(&connections_srcu, idx);
556 down_read(&con->sock_lock);
558 spin_lock_bh(&con->writequeue_lock);
559 lowcomms_queue_swork(con);
560 spin_unlock_bh(&con->writequeue_lock);
562 up_read(&con->sock_lock);
563 srcu_read_unlock(&connections_srcu, idx);
569 int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark)
571 struct connection *con;
574 idx = srcu_read_lock(&connections_srcu);
575 con = nodeid2con(nodeid, 0);
577 srcu_read_unlock(&connections_srcu, idx);
581 spin_lock(&con->addrs_lock);
583 spin_unlock(&con->addrs_lock);
584 srcu_read_unlock(&connections_srcu, idx);
588 static void lowcomms_error_report(struct sock *sk)
590 struct connection *con = sock2con(sk);
591 struct inet_sock *inet;
594 switch (sk->sk_family) {
596 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
597 "sending to node %d at %pI4, dport %d, "
598 "sk_err=%d/%d\n", dlm_our_nodeid(),
599 con->nodeid, &inet->inet_daddr,
600 ntohs(inet->inet_dport), sk->sk_err,
601 READ_ONCE(sk->sk_err_soft));
603 #if IS_ENABLED(CONFIG_IPV6)
605 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
606 "sending to node %d at %pI6c, "
607 "dport %d, sk_err=%d/%d\n", dlm_our_nodeid(),
608 con->nodeid, &sk->sk_v6_daddr,
609 ntohs(inet->inet_dport), sk->sk_err,
610 READ_ONCE(sk->sk_err_soft));
614 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
615 "invalid socket family %d set, "
616 "sk_err=%d/%d\n", dlm_our_nodeid(),
617 sk->sk_family, sk->sk_err,
618 READ_ONCE(sk->sk_err_soft));
622 dlm_midcomms_unack_msg_resend(con->nodeid);
624 listen_sock.sk_error_report(sk);
627 static void restore_callbacks(struct sock *sk)
629 #ifdef CONFIG_LOCKDEP
630 WARN_ON_ONCE(!lockdep_sock_is_held(sk));
633 sk->sk_user_data = NULL;
634 sk->sk_data_ready = listen_sock.sk_data_ready;
635 sk->sk_state_change = listen_sock.sk_state_change;
636 sk->sk_write_space = listen_sock.sk_write_space;
637 sk->sk_error_report = listen_sock.sk_error_report;
640 /* Make a socket active */
641 static void add_sock(struct socket *sock, struct connection *con)
643 struct sock *sk = sock->sk;
648 sk->sk_user_data = con;
649 sk->sk_data_ready = lowcomms_data_ready;
650 sk->sk_write_space = lowcomms_write_space;
651 if (dlm_config.ci_protocol == DLM_PROTO_SCTP)
652 sk->sk_state_change = lowcomms_state_change;
653 sk->sk_allocation = GFP_NOFS;
654 sk->sk_use_task_frag = false;
655 sk->sk_error_report = lowcomms_error_report;
659 /* Add the port number to an IPv6 or 4 sockaddr and return the address
661 static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
664 saddr->ss_family = dlm_local_addr[0].ss_family;
665 if (saddr->ss_family == AF_INET) {
666 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
667 in4_addr->sin_port = cpu_to_be16(port);
668 *addr_len = sizeof(struct sockaddr_in);
669 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
671 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
672 in6_addr->sin6_port = cpu_to_be16(port);
673 *addr_len = sizeof(struct sockaddr_in6);
675 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
678 static void dlm_page_release(struct kref *kref)
680 struct writequeue_entry *e = container_of(kref, struct writequeue_entry,
683 __free_page(e->page);
684 dlm_free_writequeue(e);
687 static void dlm_msg_release(struct kref *kref)
689 struct dlm_msg *msg = container_of(kref, struct dlm_msg, ref);
691 kref_put(&msg->entry->ref, dlm_page_release);
695 static void free_entry(struct writequeue_entry *e)
697 struct dlm_msg *msg, *tmp;
699 list_for_each_entry_safe(msg, tmp, &e->msgs, list) {
701 msg->orig_msg->retransmit = false;
702 kref_put(&msg->orig_msg->ref, dlm_msg_release);
705 list_del(&msg->list);
706 kref_put(&msg->ref, dlm_msg_release);
710 kref_put(&e->ref, dlm_page_release);
713 static void dlm_close_sock(struct socket **sock)
715 lock_sock((*sock)->sk);
716 restore_callbacks((*sock)->sk);
717 release_sock((*sock)->sk);
723 static void allow_connection_io(struct connection *con)
726 clear_bit(CF_IO_STOP, &con->othercon->flags);
727 clear_bit(CF_IO_STOP, &con->flags);
730 static void stop_connection_io(struct connection *con)
733 stop_connection_io(con->othercon);
735 spin_lock_bh(&con->writequeue_lock);
736 set_bit(CF_IO_STOP, &con->flags);
737 spin_unlock_bh(&con->writequeue_lock);
739 down_write(&con->sock_lock);
741 lock_sock(con->sock->sk);
742 restore_callbacks(con->sock->sk);
743 release_sock(con->sock->sk);
745 up_write(&con->sock_lock);
747 cancel_work_sync(&con->swork);
748 cancel_work_sync(&con->rwork);
751 /* Close a remote connection and tidy up */
752 static void close_connection(struct connection *con, bool and_other)
754 struct writequeue_entry *e;
756 if (con->othercon && and_other)
757 close_connection(con->othercon, false);
759 down_write(&con->sock_lock);
761 up_write(&con->sock_lock);
765 dlm_close_sock(&con->sock);
767 /* if we send a writequeue entry only a half way, we drop the
768 * whole entry because reconnection and that we not start of the
769 * middle of a msg which will confuse the other end.
771 * we can always drop messages because retransmits, but what we
772 * cannot allow is to transmit half messages which may be processed
775 * our policy is to start on a clean state when disconnects, we don't
776 * know what's send/received on transport layer in this case.
778 spin_lock_bh(&con->writequeue_lock);
779 if (!list_empty(&con->writequeue)) {
780 e = list_first_entry(&con->writequeue, struct writequeue_entry,
785 spin_unlock_bh(&con->writequeue_lock);
787 con->rx_leftover = 0;
789 clear_bit(CF_APP_LIMITED, &con->flags);
790 clear_bit(CF_RECV_PENDING, &con->flags);
791 clear_bit(CF_SEND_PENDING, &con->flags);
792 up_write(&con->sock_lock);
795 static void shutdown_connection(struct connection *con, bool and_other)
799 if (con->othercon && and_other)
800 shutdown_connection(con->othercon, false);
802 flush_workqueue(io_workqueue);
803 down_read(&con->sock_lock);
804 /* nothing to shutdown */
806 up_read(&con->sock_lock);
810 ret = kernel_sock_shutdown(con->sock, SHUT_WR);
811 up_read(&con->sock_lock);
813 log_print("Connection %p failed to shutdown: %d will force close",
817 ret = wait_event_timeout(con->shutdown_wait, !con->sock,
818 DLM_SHUTDOWN_WAIT_TIMEOUT);
820 log_print("Connection %p shutdown timed out, will force close",
829 close_connection(con, false);
832 static struct processqueue_entry *new_processqueue_entry(int nodeid,
835 struct processqueue_entry *pentry;
837 pentry = kmalloc(sizeof(*pentry), GFP_NOFS);
841 pentry->buf = kmalloc(buflen, GFP_NOFS);
847 pentry->nodeid = nodeid;
851 static void free_processqueue_entry(struct processqueue_entry *pentry)
857 struct dlm_processed_nodes {
860 struct list_head list;
863 static void process_dlm_messages(struct work_struct *work)
865 struct processqueue_entry *pentry;
866 LIST_HEAD(processed_nodes);
868 spin_lock(&processqueue_lock);
869 pentry = list_first_entry_or_null(&processqueue,
870 struct processqueue_entry, list);
871 if (WARN_ON_ONCE(!pentry)) {
872 process_dlm_messages_pending = false;
873 spin_unlock(&processqueue_lock);
877 list_del(&pentry->list);
878 spin_unlock(&processqueue_lock);
881 dlm_process_incoming_buffer(pentry->nodeid, pentry->buf,
883 free_processqueue_entry(pentry);
885 spin_lock(&processqueue_lock);
886 pentry = list_first_entry_or_null(&processqueue,
887 struct processqueue_entry, list);
889 process_dlm_messages_pending = false;
890 spin_unlock(&processqueue_lock);
894 list_del(&pentry->list);
895 spin_unlock(&processqueue_lock);
899 /* Data received from remote end */
900 static int receive_from_sock(struct connection *con, int buflen)
902 struct processqueue_entry *pentry;
903 int ret, buflen_real;
907 pentry = new_processqueue_entry(con->nodeid, buflen);
909 return DLM_IO_RESCHED;
911 memcpy(pentry->buf, con->rx_leftover_buf, con->rx_leftover);
913 /* calculate new buffer parameter regarding last receive and
914 * possible leftover bytes
916 iov.iov_base = pentry->buf + con->rx_leftover;
917 iov.iov_len = buflen - con->rx_leftover;
919 memset(&msg, 0, sizeof(msg));
920 msg.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
921 clear_bit(CF_RECV_INTR, &con->flags);
923 ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len,
925 trace_dlm_recv(con->nodeid, ret);
926 if (ret == -EAGAIN) {
927 lock_sock(con->sock->sk);
928 if (test_and_clear_bit(CF_RECV_INTR, &con->flags)) {
929 release_sock(con->sock->sk);
933 clear_bit(CF_RECV_PENDING, &con->flags);
934 release_sock(con->sock->sk);
935 free_processqueue_entry(pentry);
937 } else if (ret == 0) {
938 /* close will clear CF_RECV_PENDING */
939 free_processqueue_entry(pentry);
941 } else if (ret < 0) {
942 free_processqueue_entry(pentry);
946 /* new buflen according readed bytes and leftover from last receive */
947 buflen_real = ret + con->rx_leftover;
948 ret = dlm_validate_incoming_buffer(con->nodeid, pentry->buf,
951 free_processqueue_entry(pentry);
955 pentry->buflen = ret;
957 /* calculate leftover bytes from process and put it into begin of
958 * the receive buffer, so next receive we have the full message
959 * at the start address of the receive buffer.
961 con->rx_leftover = buflen_real - ret;
962 memmove(con->rx_leftover_buf, pentry->buf + ret,
965 spin_lock(&processqueue_lock);
966 list_add_tail(&pentry->list, &processqueue);
967 if (!process_dlm_messages_pending) {
968 process_dlm_messages_pending = true;
969 queue_work(process_workqueue, &process_work);
971 spin_unlock(&processqueue_lock);
973 return DLM_IO_SUCCESS;
976 /* Listening socket is busy, accept a connection */
977 static int accept_from_sock(void)
979 struct sockaddr_storage peeraddr;
980 int len, idx, result, nodeid;
981 struct connection *newcon;
982 struct socket *newsock;
985 result = kernel_accept(listen_con.sock, &newsock, O_NONBLOCK);
986 if (result == -EAGAIN)
991 /* Get the connected socket's peer */
992 memset(&peeraddr, 0, sizeof(peeraddr));
993 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
995 result = -ECONNABORTED;
999 /* Get the new node's NODEID */
1000 make_sockaddr(&peeraddr, 0, &len);
1001 if (addr_to_nodeid(&peeraddr, &nodeid, &mark)) {
1002 switch (peeraddr.ss_family) {
1004 struct sockaddr_in *sin = (struct sockaddr_in *)&peeraddr;
1006 log_print("connect from non cluster IPv4 node %pI4",
1010 #if IS_ENABLED(CONFIG_IPV6)
1012 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&peeraddr;
1014 log_print("connect from non cluster IPv6 node %pI6c",
1020 log_print("invalid family from non cluster node");
1024 sock_release(newsock);
1028 log_print("got connection from %d", nodeid);
1030 /* Check to see if we already have a connection to this node. This
1031 * could happen if the two nodes initiate a connection at roughly
1032 * the same time and the connections cross on the wire.
1033 * In this case we store the incoming one in "othercon"
1035 idx = srcu_read_lock(&connections_srcu);
1036 newcon = nodeid2con(nodeid, 0);
1037 if (WARN_ON_ONCE(!newcon)) {
1038 srcu_read_unlock(&connections_srcu, idx);
1043 sock_set_mark(newsock->sk, mark);
1045 down_write(&newcon->sock_lock);
1047 struct connection *othercon = newcon->othercon;
1050 othercon = kzalloc(sizeof(*othercon), GFP_NOFS);
1052 log_print("failed to allocate incoming socket");
1053 up_write(&newcon->sock_lock);
1054 srcu_read_unlock(&connections_srcu, idx);
1059 dlm_con_init(othercon, nodeid);
1060 lockdep_set_subclass(&othercon->sock_lock, 1);
1061 newcon->othercon = othercon;
1062 set_bit(CF_IS_OTHERCON, &othercon->flags);
1064 /* close other sock con if we have something new */
1065 close_connection(othercon, false);
1068 down_write(&othercon->sock_lock);
1069 add_sock(newsock, othercon);
1071 /* check if we receved something while adding */
1072 lock_sock(othercon->sock->sk);
1073 lowcomms_queue_rwork(othercon);
1074 release_sock(othercon->sock->sk);
1075 up_write(&othercon->sock_lock);
1078 /* accept copies the sk after we've saved the callbacks, so we
1079 don't want to save them a second time or comm errors will
1080 result in calling sk_error_report recursively. */
1081 add_sock(newsock, newcon);
1083 /* check if we receved something while adding */
1084 lock_sock(newcon->sock->sk);
1085 lowcomms_queue_rwork(newcon);
1086 release_sock(newcon->sock->sk);
1088 up_write(&newcon->sock_lock);
1089 srcu_read_unlock(&connections_srcu, idx);
1091 return DLM_IO_SUCCESS;
1095 sock_release(newsock);
1101 * writequeue_entry_complete - try to delete and free write queue entry
1102 * @e: write queue entry to try to delete
1103 * @completed: bytes completed
1105 * writequeue_lock must be held.
1107 static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
1109 e->offset += completed;
1110 e->len -= completed;
1111 /* signal that page was half way transmitted */
1114 if (e->len == 0 && e->users == 0)
1119 * sctp_bind_addrs - bind a SCTP socket to all our addresses
1121 static int sctp_bind_addrs(struct socket *sock, uint16_t port)
1123 struct sockaddr_storage localaddr;
1124 struct sockaddr *addr = (struct sockaddr *)&localaddr;
1125 int i, addr_len, result = 0;
1127 for (i = 0; i < dlm_local_count; i++) {
1128 memcpy(&localaddr, &dlm_local_addr[i], sizeof(localaddr));
1129 make_sockaddr(&localaddr, port, &addr_len);
1132 result = kernel_bind(sock, addr, addr_len);
1134 result = sock_bind_add(sock->sk, addr, addr_len);
1137 log_print("Can't bind to %d addr number %d, %d.\n",
1138 port, i + 1, result);
1145 /* Get local addresses */
1146 static void init_local(void)
1148 struct sockaddr_storage sas;
1151 dlm_local_count = 0;
1152 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
1153 if (dlm_our_addr(&sas, i))
1156 memcpy(&dlm_local_addr[dlm_local_count++], &sas, sizeof(sas));
1160 static struct writequeue_entry *new_writequeue_entry(struct connection *con)
1162 struct writequeue_entry *entry;
1164 entry = dlm_allocate_writequeue();
1168 entry->page = alloc_page(GFP_ATOMIC | __GFP_ZERO);
1170 dlm_free_writequeue(entry);
1177 entry->dirty = false;
1180 kref_init(&entry->ref);
1184 static struct writequeue_entry *new_wq_entry(struct connection *con, int len,
1185 char **ppc, void (*cb)(void *data),
1188 struct writequeue_entry *e;
1190 spin_lock_bh(&con->writequeue_lock);
1191 if (!list_empty(&con->writequeue)) {
1192 e = list_last_entry(&con->writequeue, struct writequeue_entry, list);
1193 if (DLM_WQ_REMAIN_BYTES(e) >= len) {
1196 *ppc = page_address(e->page) + e->end;
1206 e = new_writequeue_entry(con);
1211 *ppc = page_address(e->page);
1216 list_add_tail(&e->list, &con->writequeue);
1219 spin_unlock_bh(&con->writequeue_lock);
1223 static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
1224 gfp_t allocation, char **ppc,
1225 void (*cb)(void *data),
1228 struct writequeue_entry *e;
1229 struct dlm_msg *msg;
1231 msg = dlm_allocate_msg(allocation);
1235 kref_init(&msg->ref);
1237 e = new_wq_entry(con, len, ppc, cb, data);
1243 msg->retransmit = false;
1244 msg->orig_msg = NULL;
1252 /* avoid false positive for nodes_srcu, unlock happens in
1253 * dlm_lowcomms_commit_msg which is a must call if success
1256 struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
1257 char **ppc, void (*cb)(void *data),
1260 struct connection *con;
1261 struct dlm_msg *msg;
1264 if (len > DLM_MAX_SOCKET_BUFSIZE ||
1265 len < sizeof(struct dlm_header)) {
1266 BUILD_BUG_ON(PAGE_SIZE < DLM_MAX_SOCKET_BUFSIZE);
1267 log_print("failed to allocate a buffer of size %d", len);
1272 idx = srcu_read_lock(&connections_srcu);
1273 con = nodeid2con(nodeid, 0);
1274 if (WARN_ON_ONCE(!con)) {
1275 srcu_read_unlock(&connections_srcu, idx);
1279 msg = dlm_lowcomms_new_msg_con(con, len, allocation, ppc, cb, data);
1281 srcu_read_unlock(&connections_srcu, idx);
1285 /* for dlm_lowcomms_commit_msg() */
1286 kref_get(&msg->ref);
1287 /* we assume if successful commit must called */
1293 static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
1295 struct writequeue_entry *e = msg->entry;
1296 struct connection *con = e->con;
1299 spin_lock_bh(&con->writequeue_lock);
1300 kref_get(&msg->ref);
1301 list_add(&msg->list, &e->msgs);
1307 e->len = DLM_WQ_LENGTH_BYTES(e);
1309 lowcomms_queue_swork(con);
1312 spin_unlock_bh(&con->writequeue_lock);
1316 /* avoid false positive for nodes_srcu, lock was happen in
1317 * dlm_lowcomms_new_msg
1320 void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
1322 _dlm_lowcomms_commit_msg(msg);
1323 srcu_read_unlock(&connections_srcu, msg->idx);
1324 /* because dlm_lowcomms_new_msg() */
1325 kref_put(&msg->ref, dlm_msg_release);
1329 void dlm_lowcomms_put_msg(struct dlm_msg *msg)
1331 kref_put(&msg->ref, dlm_msg_release);
1334 /* does not held connections_srcu, usage lowcomms_error_report only */
1335 int dlm_lowcomms_resend_msg(struct dlm_msg *msg)
1337 struct dlm_msg *msg_resend;
1340 if (msg->retransmit)
1343 msg_resend = dlm_lowcomms_new_msg_con(msg->entry->con, msg->len,
1344 GFP_ATOMIC, &ppc, NULL, NULL);
1348 msg->retransmit = true;
1349 kref_get(&msg->ref);
1350 msg_resend->orig_msg = msg;
1352 memcpy(ppc, msg->ppc, msg->len);
1353 _dlm_lowcomms_commit_msg(msg_resend);
1354 dlm_lowcomms_put_msg(msg_resend);
1359 /* Send a message */
1360 static int send_to_sock(struct connection *con)
1362 struct writequeue_entry *e;
1363 struct bio_vec bvec;
1364 struct msghdr msg = {
1365 .msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL,
1367 int len, offset, ret;
1369 spin_lock_bh(&con->writequeue_lock);
1370 e = con_next_wq(con);
1372 clear_bit(CF_SEND_PENDING, &con->flags);
1373 spin_unlock_bh(&con->writequeue_lock);
1379 WARN_ON_ONCE(len == 0 && e->users == 0);
1380 spin_unlock_bh(&con->writequeue_lock);
1382 bvec_set_page(&bvec, e->page, len, offset);
1383 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bvec, 1, len);
1384 ret = sock_sendmsg(con->sock, &msg);
1385 trace_dlm_send(con->nodeid, ret);
1386 if (ret == -EAGAIN || ret == 0) {
1387 lock_sock(con->sock->sk);
1388 spin_lock_bh(&con->writequeue_lock);
1389 if (test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
1390 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1391 /* Notify TCP that we're limited by the
1392 * application window size.
1394 set_bit(SOCK_NOSPACE, &con->sock->sk->sk_socket->flags);
1395 con->sock->sk->sk_write_pending++;
1397 clear_bit(CF_SEND_PENDING, &con->flags);
1398 spin_unlock_bh(&con->writequeue_lock);
1399 release_sock(con->sock->sk);
1401 /* wait for write_space() event */
1404 spin_unlock_bh(&con->writequeue_lock);
1405 release_sock(con->sock->sk);
1407 return DLM_IO_RESCHED;
1408 } else if (ret < 0) {
1412 spin_lock_bh(&con->writequeue_lock);
1413 writequeue_entry_complete(e, ret);
1414 spin_unlock_bh(&con->writequeue_lock);
1416 return DLM_IO_SUCCESS;
1419 static void clean_one_writequeue(struct connection *con)
1421 struct writequeue_entry *e, *safe;
1423 spin_lock_bh(&con->writequeue_lock);
1424 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
1427 spin_unlock_bh(&con->writequeue_lock);
1430 static void connection_release(struct rcu_head *rcu)
1432 struct connection *con = container_of(rcu, struct connection, rcu);
1434 WARN_ON_ONCE(!list_empty(&con->writequeue));
1435 WARN_ON_ONCE(con->sock);
1439 /* Called from recovery when it knows that a node has
1441 int dlm_lowcomms_close(int nodeid)
1443 struct connection *con;
1446 log_print("closing connection to node %d", nodeid);
1448 idx = srcu_read_lock(&connections_srcu);
1449 con = nodeid2con(nodeid, 0);
1450 if (WARN_ON_ONCE(!con)) {
1451 srcu_read_unlock(&connections_srcu, idx);
1455 stop_connection_io(con);
1456 log_print("io handling for node: %d stopped", nodeid);
1457 close_connection(con, true);
1459 spin_lock(&connections_lock);
1460 hlist_del_rcu(&con->list);
1461 spin_unlock(&connections_lock);
1463 clean_one_writequeue(con);
1464 call_srcu(&connections_srcu, &con->rcu, connection_release);
1465 if (con->othercon) {
1466 clean_one_writequeue(con->othercon);
1467 call_srcu(&connections_srcu, &con->othercon->rcu, connection_release);
1469 srcu_read_unlock(&connections_srcu, idx);
1471 /* for debugging we print when we are done to compare with other
1472 * messages in between. This function need to be correctly synchronized
1475 log_print("closing connection to node %d done", nodeid);
1480 /* Receive worker function */
1481 static void process_recv_sockets(struct work_struct *work)
1483 struct connection *con = container_of(work, struct connection, rwork);
1486 down_read(&con->sock_lock);
1488 up_read(&con->sock_lock);
1492 buflen = READ_ONCE(dlm_config.ci_buffer_size);
1494 ret = receive_from_sock(con, buflen);
1495 } while (ret == DLM_IO_SUCCESS);
1496 up_read(&con->sock_lock);
1500 /* CF_RECV_PENDING cleared */
1503 close_connection(con, false);
1504 wake_up(&con->shutdown_wait);
1505 /* CF_RECV_PENDING cleared */
1507 case DLM_IO_RESCHED:
1509 queue_work(io_workqueue, &con->rwork);
1510 /* CF_RECV_PENDING not cleared */
1514 if (test_bit(CF_IS_OTHERCON, &con->flags)) {
1515 close_connection(con, false);
1517 spin_lock_bh(&con->writequeue_lock);
1518 lowcomms_queue_swork(con);
1519 spin_unlock_bh(&con->writequeue_lock);
1522 /* CF_RECV_PENDING cleared for othercon
1523 * we trigger send queue if not already done
1524 * and process_send_sockets will handle it
1534 static void process_listen_recv_socket(struct work_struct *work)
1538 if (WARN_ON_ONCE(!listen_con.sock))
1542 ret = accept_from_sock();
1543 } while (ret == DLM_IO_SUCCESS);
1546 log_print("critical error accepting connection: %d", ret);
1549 static int dlm_connect(struct connection *con)
1551 struct sockaddr_storage addr;
1552 int result, addr_len;
1553 struct socket *sock;
1556 memset(&addr, 0, sizeof(addr));
1557 result = nodeid_to_addr(con->nodeid, &addr, NULL,
1558 dlm_proto_ops->try_new_addr, &mark);
1560 log_print("no address for nodeid %d", con->nodeid);
1564 /* Create a socket to communicate with */
1565 result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
1566 SOCK_STREAM, dlm_proto_ops->proto, &sock);
1570 sock_set_mark(sock->sk, mark);
1571 dlm_proto_ops->sockopts(sock);
1573 result = dlm_proto_ops->bind(sock);
1579 add_sock(sock, con);
1581 log_print_ratelimited("connecting to %d", con->nodeid);
1582 make_sockaddr(&addr, dlm_config.ci_tcp_port, &addr_len);
1583 result = dlm_proto_ops->connect(con, sock, (struct sockaddr *)&addr,
1593 dlm_close_sock(&con->sock);
1601 /* Send worker function */
1602 static void process_send_sockets(struct work_struct *work)
1604 struct connection *con = container_of(work, struct connection, swork);
1607 WARN_ON_ONCE(test_bit(CF_IS_OTHERCON, &con->flags));
1609 down_read(&con->sock_lock);
1611 up_read(&con->sock_lock);
1612 down_write(&con->sock_lock);
1614 ret = dlm_connect(con);
1619 /* avoid spamming resched on connection
1620 * we might can switch to a state_change
1621 * event based mechanism if established
1626 /* CF_SEND_PENDING not cleared */
1627 up_write(&con->sock_lock);
1628 log_print("connect to node %d try %d error %d",
1629 con->nodeid, con->retries++, ret);
1631 /* For now we try forever to reconnect. In
1632 * future we should send a event to cluster
1633 * manager to fence itself after certain amount
1636 queue_work(io_workqueue, &con->swork);
1640 downgrade_write(&con->sock_lock);
1644 ret = send_to_sock(con);
1645 } while (ret == DLM_IO_SUCCESS);
1646 up_read(&con->sock_lock);
1650 /* CF_SEND_PENDING cleared */
1652 case DLM_IO_RESCHED:
1653 /* CF_SEND_PENDING not cleared */
1655 queue_work(io_workqueue, &con->swork);
1659 close_connection(con, false);
1661 /* CF_SEND_PENDING cleared */
1662 spin_lock_bh(&con->writequeue_lock);
1663 lowcomms_queue_swork(con);
1664 spin_unlock_bh(&con->writequeue_lock);
1673 static void work_stop(void)
1676 destroy_workqueue(io_workqueue);
1677 io_workqueue = NULL;
1680 if (process_workqueue) {
1681 destroy_workqueue(process_workqueue);
1682 process_workqueue = NULL;
1686 static int work_start(void)
1688 io_workqueue = alloc_workqueue("dlm_io", WQ_HIGHPRI | WQ_MEM_RECLAIM |
1690 if (!io_workqueue) {
1691 log_print("can't start dlm_io");
1695 /* ordered dlm message process queue,
1696 * should be converted to a tasklet
1698 process_workqueue = alloc_ordered_workqueue("dlm_process",
1699 WQ_HIGHPRI | WQ_MEM_RECLAIM);
1700 if (!process_workqueue) {
1701 log_print("can't start dlm_process");
1702 destroy_workqueue(io_workqueue);
1703 io_workqueue = NULL;
1710 void dlm_lowcomms_shutdown(void)
1712 struct connection *con;
1715 /* stop lowcomms_listen_data_ready calls */
1716 lock_sock(listen_con.sock->sk);
1717 listen_con.sock->sk->sk_data_ready = listen_sock.sk_data_ready;
1718 release_sock(listen_con.sock->sk);
1720 cancel_work_sync(&listen_con.rwork);
1721 dlm_close_sock(&listen_con.sock);
1723 idx = srcu_read_lock(&connections_srcu);
1724 for (i = 0; i < CONN_HASH_SIZE; i++) {
1725 hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
1726 shutdown_connection(con, true);
1727 stop_connection_io(con);
1728 flush_workqueue(process_workqueue);
1729 close_connection(con, true);
1731 clean_one_writequeue(con);
1733 clean_one_writequeue(con->othercon);
1734 allow_connection_io(con);
1737 srcu_read_unlock(&connections_srcu, idx);
1740 void dlm_lowcomms_stop(void)
1743 dlm_proto_ops = NULL;
1746 static int dlm_listen_for_all(void)
1748 struct socket *sock;
1751 log_print("Using %s for communications",
1752 dlm_proto_ops->name);
1754 result = dlm_proto_ops->listen_validate();
1758 result = sock_create_kern(&init_net, dlm_local_addr[0].ss_family,
1759 SOCK_STREAM, dlm_proto_ops->proto, &sock);
1761 log_print("Can't create comms socket: %d", result);
1765 sock_set_mark(sock->sk, dlm_config.ci_mark);
1766 dlm_proto_ops->listen_sockopts(sock);
1768 result = dlm_proto_ops->listen_bind(sock);
1772 lock_sock(sock->sk);
1773 listen_sock.sk_data_ready = sock->sk->sk_data_ready;
1774 listen_sock.sk_write_space = sock->sk->sk_write_space;
1775 listen_sock.sk_error_report = sock->sk->sk_error_report;
1776 listen_sock.sk_state_change = sock->sk->sk_state_change;
1778 listen_con.sock = sock;
1780 sock->sk->sk_allocation = GFP_NOFS;
1781 sock->sk->sk_use_task_frag = false;
1782 sock->sk->sk_data_ready = lowcomms_listen_data_ready;
1783 release_sock(sock->sk);
1785 result = sock->ops->listen(sock, 128);
1787 dlm_close_sock(&listen_con.sock);
1798 static int dlm_tcp_bind(struct socket *sock)
1800 struct sockaddr_storage src_addr;
1801 int result, addr_len;
1803 /* Bind to our cluster-known address connecting to avoid
1806 memcpy(&src_addr, &dlm_local_addr[0], sizeof(src_addr));
1807 make_sockaddr(&src_addr, 0, &addr_len);
1809 result = sock->ops->bind(sock, (struct sockaddr *)&src_addr,
1812 /* This *may* not indicate a critical error */
1813 log_print("could not bind for connect: %d", result);
1819 static int dlm_tcp_connect(struct connection *con, struct socket *sock,
1820 struct sockaddr *addr, int addr_len)
1822 return sock->ops->connect(sock, addr, addr_len, O_NONBLOCK);
1825 static int dlm_tcp_listen_validate(void)
1827 /* We don't support multi-homed hosts */
1828 if (dlm_local_count > 1) {
1829 log_print("TCP protocol can't handle multi-homed hosts, try SCTP");
1836 static void dlm_tcp_sockopts(struct socket *sock)
1838 /* Turn off Nagle's algorithm */
1839 tcp_sock_set_nodelay(sock->sk);
1842 static void dlm_tcp_listen_sockopts(struct socket *sock)
1844 dlm_tcp_sockopts(sock);
1845 sock_set_reuseaddr(sock->sk);
1848 static int dlm_tcp_listen_bind(struct socket *sock)
1852 /* Bind to our port */
1853 make_sockaddr(&dlm_local_addr[0], dlm_config.ci_tcp_port, &addr_len);
1854 return sock->ops->bind(sock, (struct sockaddr *)&dlm_local_addr[0],
1858 static const struct dlm_proto_ops dlm_tcp_ops = {
1860 .proto = IPPROTO_TCP,
1861 .connect = dlm_tcp_connect,
1862 .sockopts = dlm_tcp_sockopts,
1863 .bind = dlm_tcp_bind,
1864 .listen_validate = dlm_tcp_listen_validate,
1865 .listen_sockopts = dlm_tcp_listen_sockopts,
1866 .listen_bind = dlm_tcp_listen_bind,
1869 static int dlm_sctp_bind(struct socket *sock)
1871 return sctp_bind_addrs(sock, 0);
1874 static int dlm_sctp_connect(struct connection *con, struct socket *sock,
1875 struct sockaddr *addr, int addr_len)
1880 * Make sock->ops->connect() function return in specified time,
1881 * since O_NONBLOCK argument in connect() function does not work here,
1882 * then, we should restore the default value of this attribute.
1884 sock_set_sndtimeo(sock->sk, 5);
1885 ret = sock->ops->connect(sock, addr, addr_len, 0);
1886 sock_set_sndtimeo(sock->sk, 0);
1890 static int dlm_sctp_listen_validate(void)
1892 if (!IS_ENABLED(CONFIG_IP_SCTP)) {
1893 log_print("SCTP is not enabled by this kernel");
1897 request_module("sctp");
1901 static int dlm_sctp_bind_listen(struct socket *sock)
1903 return sctp_bind_addrs(sock, dlm_config.ci_tcp_port);
1906 static void dlm_sctp_sockopts(struct socket *sock)
1908 /* Turn off Nagle's algorithm */
1909 sctp_sock_set_nodelay(sock->sk);
1910 sock_set_rcvbuf(sock->sk, NEEDED_RMEM);
1913 static const struct dlm_proto_ops dlm_sctp_ops = {
1915 .proto = IPPROTO_SCTP,
1916 .try_new_addr = true,
1917 .connect = dlm_sctp_connect,
1918 .sockopts = dlm_sctp_sockopts,
1919 .bind = dlm_sctp_bind,
1920 .listen_validate = dlm_sctp_listen_validate,
1921 .listen_sockopts = dlm_sctp_sockopts,
1922 .listen_bind = dlm_sctp_bind_listen,
1925 int dlm_lowcomms_start(void)
1930 if (!dlm_local_count) {
1932 log_print("no local IP address has been set");
1936 error = work_start();
1940 /* Start listening */
1941 switch (dlm_config.ci_protocol) {
1943 dlm_proto_ops = &dlm_tcp_ops;
1945 case DLM_PROTO_SCTP:
1946 dlm_proto_ops = &dlm_sctp_ops;
1949 log_print("Invalid protocol identifier %d set",
1950 dlm_config.ci_protocol);
1952 goto fail_proto_ops;
1955 error = dlm_listen_for_all();
1962 dlm_proto_ops = NULL;
1969 void dlm_lowcomms_init(void)
1973 for (i = 0; i < CONN_HASH_SIZE; i++)
1974 INIT_HLIST_HEAD(&connection_hash[i]);
1976 INIT_WORK(&listen_con.rwork, process_listen_recv_socket);
1979 void dlm_lowcomms_exit(void)
1981 struct connection *con;
1984 idx = srcu_read_lock(&connections_srcu);
1985 for (i = 0; i < CONN_HASH_SIZE; i++) {
1986 hlist_for_each_entry_rcu(con, &connection_hash[i], list) {
1987 spin_lock(&connections_lock);
1988 hlist_del_rcu(&con->list);
1989 spin_unlock(&connections_lock);
1992 call_srcu(&connections_srcu, &con->othercon->rcu,
1993 connection_release);
1994 call_srcu(&connections_srcu, &con->rcu, connection_release);
1997 srcu_read_unlock(&connections_srcu, idx);