2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
36 #include <linux/module.h>
38 #include <net/net_namespace.h>
39 #include <net/netns/generic.h>
45 /* only for info exporting */
46 static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
47 static LIST_HEAD(rds_tcp_tc_list);
48 static unsigned int rds_tcp_tc_count;
50 /* Track rds_tcp_connection structs so they can be cleaned up */
51 static DEFINE_SPINLOCK(rds_tcp_conn_lock);
52 static LIST_HEAD(rds_tcp_conn_list);
54 static struct kmem_cache *rds_tcp_conn_slab;
56 #define RDS_TCP_DEFAULT_BUFSIZE (128 * 1024)
58 /* doing it this way avoids calling tcp_sk() */
59 void rds_tcp_nonagle(struct socket *sock)
61 mm_segment_t oldfs = get_fs();
65 sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
70 void rds_tcp_tune(struct socket *sock)
72 struct sock *sk = sock->sk;
74 rds_tcp_nonagle(sock);
77 * We're trying to saturate gigabit with the default,
78 * see svc_sock_setbufsize().
81 sk->sk_sndbuf = RDS_TCP_DEFAULT_BUFSIZE;
82 sk->sk_rcvbuf = RDS_TCP_DEFAULT_BUFSIZE;
83 sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK;
87 u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
89 return tcp_sk(tc->t_sock->sk)->snd_nxt;
92 u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
94 return tcp_sk(tc->t_sock->sk)->snd_una;
97 void rds_tcp_restore_callbacks(struct socket *sock,
98 struct rds_tcp_connection *tc)
100 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
101 write_lock_bh(&sock->sk->sk_callback_lock);
103 /* done under the callback_lock to serialize with write_space */
104 spin_lock(&rds_tcp_tc_list_lock);
105 list_del_init(&tc->t_list_item);
107 spin_unlock(&rds_tcp_tc_list_lock);
111 sock->sk->sk_write_space = tc->t_orig_write_space;
112 sock->sk->sk_data_ready = tc->t_orig_data_ready;
113 sock->sk->sk_state_change = tc->t_orig_state_change;
114 sock->sk->sk_user_data = NULL;
116 write_unlock_bh(&sock->sk->sk_callback_lock);
120 * This is the only path that sets tc->t_sock. Send and receive trust that
121 * it is set. The RDS_CONN_CONNECTED bit protects those paths from being
122 * called while it isn't set.
124 void rds_tcp_set_callbacks(struct socket *sock, struct rds_connection *conn)
126 struct rds_tcp_connection *tc = conn->c_transport_data;
128 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
129 write_lock_bh(&sock->sk->sk_callback_lock);
131 /* done under the callback_lock to serialize with write_space */
132 spin_lock(&rds_tcp_tc_list_lock);
133 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
135 spin_unlock(&rds_tcp_tc_list_lock);
137 /* accepted sockets need our listen data ready undone */
138 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
139 sock->sk->sk_data_ready = sock->sk->sk_user_data;
143 tc->t_orig_data_ready = sock->sk->sk_data_ready;
144 tc->t_orig_write_space = sock->sk->sk_write_space;
145 tc->t_orig_state_change = sock->sk->sk_state_change;
147 sock->sk->sk_user_data = conn;
148 sock->sk->sk_data_ready = rds_tcp_data_ready;
149 sock->sk->sk_write_space = rds_tcp_write_space;
150 sock->sk->sk_state_change = rds_tcp_state_change;
152 write_unlock_bh(&sock->sk->sk_callback_lock);
155 static void rds_tcp_tc_info(struct socket *sock, unsigned int len,
156 struct rds_info_iterator *iter,
157 struct rds_info_lengths *lens)
159 struct rds_info_tcp_socket tsinfo;
160 struct rds_tcp_connection *tc;
162 struct sockaddr_in sin;
165 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
167 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
170 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
172 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0);
173 tsinfo.local_addr = sin.sin_addr.s_addr;
174 tsinfo.local_port = sin.sin_port;
175 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1);
176 tsinfo.peer_addr = sin.sin_addr.s_addr;
177 tsinfo.peer_port = sin.sin_port;
179 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
180 tsinfo.data_rem = tc->t_tinc_data_rem;
181 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
182 tsinfo.last_expected_una = tc->t_last_expected_una;
183 tsinfo.last_seen_una = tc->t_last_seen_una;
185 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
189 lens->nr = rds_tcp_tc_count;
190 lens->each = sizeof(tsinfo);
192 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
195 static int rds_tcp_laddr_check(struct net *net, __be32 addr)
197 if (inet_addr_type(net, addr) == RTN_LOCAL)
199 return -EADDRNOTAVAIL;
202 static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
204 struct rds_tcp_connection *tc;
206 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
212 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
213 tc->t_tinc_data_rem = 0;
215 conn->c_transport_data = tc;
217 spin_lock_irq(&rds_tcp_conn_lock);
218 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
219 spin_unlock_irq(&rds_tcp_conn_lock);
221 rdsdebug("alloced tc %p\n", conn->c_transport_data);
225 static void rds_tcp_conn_free(void *arg)
227 struct rds_tcp_connection *tc = arg;
229 rdsdebug("freeing tc %p\n", tc);
231 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
232 list_del(&tc->t_tcp_node);
233 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
235 kmem_cache_free(rds_tcp_conn_slab, tc);
238 static void rds_tcp_destroy_conns(void)
240 struct rds_tcp_connection *tc, *_tc;
243 /* avoid calling conn_destroy with irqs off */
244 spin_lock_irq(&rds_tcp_conn_lock);
245 list_splice(&rds_tcp_conn_list, &tmp_list);
246 INIT_LIST_HEAD(&rds_tcp_conn_list);
247 spin_unlock_irq(&rds_tcp_conn_lock);
249 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
250 if (tc->conn->c_passive)
251 rds_conn_destroy(tc->conn->c_passive);
252 rds_conn_destroy(tc->conn);
256 static void rds_tcp_exit(void);
258 struct rds_transport rds_tcp_transport = {
259 .laddr_check = rds_tcp_laddr_check,
260 .xmit_prepare = rds_tcp_xmit_prepare,
261 .xmit_complete = rds_tcp_xmit_complete,
262 .xmit = rds_tcp_xmit,
263 .recv = rds_tcp_recv,
264 .conn_alloc = rds_tcp_conn_alloc,
265 .conn_free = rds_tcp_conn_free,
266 .conn_connect = rds_tcp_conn_connect,
267 .conn_shutdown = rds_tcp_conn_shutdown,
268 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
269 .inc_free = rds_tcp_inc_free,
270 .stats_info_copy = rds_tcp_stats_info_copy,
271 .exit = rds_tcp_exit,
272 .t_owner = THIS_MODULE,
274 .t_type = RDS_TRANS_TCP,
275 .t_prefer_loopback = 1,
278 static int rds_tcp_netid;
280 /* per-network namespace private data for this module */
282 struct socket *rds_tcp_listen_sock;
283 struct work_struct rds_tcp_accept_w;
286 static void rds_tcp_accept_worker(struct work_struct *work)
288 struct rds_tcp_net *rtn = container_of(work,
292 while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
296 void rds_tcp_accept_work(struct sock *sk)
298 struct net *net = sock_net(sk);
299 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
301 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
304 static __net_init int rds_tcp_init_net(struct net *net)
306 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
308 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
309 if (!rtn->rds_tcp_listen_sock) {
310 pr_warn("could not set up listen sock\n");
311 return -EAFNOSUPPORT;
313 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
317 static void __net_exit rds_tcp_exit_net(struct net *net)
319 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
321 /* If rds_tcp_exit_net() is called as a result of netns deletion,
322 * the rds_tcp_kill_sock() device notifier would already have cleaned
323 * up the listen socket, thus there is no work to do in this function.
325 * If rds_tcp_exit_net() is called as a result of module unload,
326 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
327 * we do need to clean up the listen socket here.
329 if (rtn->rds_tcp_listen_sock) {
330 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
331 rtn->rds_tcp_listen_sock = NULL;
332 flush_work(&rtn->rds_tcp_accept_w);
336 static struct pernet_operations rds_tcp_net_ops = {
337 .init = rds_tcp_init_net,
338 .exit = rds_tcp_exit_net,
339 .id = &rds_tcp_netid,
340 .size = sizeof(struct rds_tcp_net),
343 static void rds_tcp_kill_sock(struct net *net)
345 struct rds_tcp_connection *tc, *_tc;
348 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
350 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
351 rtn->rds_tcp_listen_sock = NULL;
352 flush_work(&rtn->rds_tcp_accept_w);
353 spin_lock_irq(&rds_tcp_conn_lock);
354 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
355 struct net *c_net = read_pnet(&tc->conn->c_net);
357 if (net != c_net || !tc->t_sock)
359 list_move_tail(&tc->t_tcp_node, &tmp_list);
361 spin_unlock_irq(&rds_tcp_conn_lock);
362 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
364 sk->sk_prot->disconnect(sk, 0);
366 if (tc->conn->c_passive)
367 rds_conn_destroy(tc->conn->c_passive);
368 rds_conn_destroy(tc->conn);
372 static int rds_tcp_dev_event(struct notifier_block *this,
373 unsigned long event, void *ptr)
375 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
377 /* rds-tcp registers as a pernet subys, so the ->exit will only
378 * get invoked after network acitivity has quiesced. We need to
379 * clean up all sockets to quiesce network activity, and use
380 * the unregistration of the per-net loopback device as a trigger
381 * to start that cleanup.
383 if (event == NETDEV_UNREGISTER_FINAL &&
384 dev->ifindex == LOOPBACK_IFINDEX)
385 rds_tcp_kill_sock(dev_net(dev));
390 static struct notifier_block rds_tcp_dev_notifier = {
391 .notifier_call = rds_tcp_dev_event,
392 .priority = -10, /* must be called after other network notifiers */
395 static void rds_tcp_exit(void)
397 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
398 unregister_pernet_subsys(&rds_tcp_net_ops);
399 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
400 pr_warn("could not unregister rds_tcp_dev_notifier\n");
401 rds_tcp_destroy_conns();
402 rds_trans_unregister(&rds_tcp_transport);
404 kmem_cache_destroy(rds_tcp_conn_slab);
406 module_exit(rds_tcp_exit);
408 static int rds_tcp_init(void)
412 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
413 sizeof(struct rds_tcp_connection),
415 if (!rds_tcp_conn_slab) {
420 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
422 pr_warn("could not register rds_tcp_dev_notifier\n");
426 ret = register_pernet_subsys(&rds_tcp_net_ops);
430 ret = rds_tcp_recv_init();
434 ret = rds_trans_register(&rds_tcp_transport);
438 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
445 unregister_pernet_subsys(&rds_tcp_net_ops);
446 kmem_cache_destroy(rds_tcp_conn_slab);
450 module_init(rds_tcp_init);
453 MODULE_DESCRIPTION("RDS: TCP transport");
454 MODULE_LICENSE("Dual BSD/GPL");