]>
Commit | Line | Data |
---|---|---|
c5fa7b3c YX |
1 | /* |
2 | * net/tipc/server.c: TIPC server infrastructure | |
3 | * | |
4 | * Copyright (c) 2012-2013, Wind River Systems | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions are met: | |
9 | * | |
10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | |
15 | * 3. Neither the names of the copyright holders nor the names of its | |
16 | * contributors may be used to endorse or promote products derived from | |
17 | * this software without specific prior written permission. | |
18 | * | |
19 | * Alternatively, this software may be distributed under the terms of the | |
20 | * GNU General Public License ("GPL") version 2 as published by the Free | |
21 | * Software Foundation. | |
22 | * | |
23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
24 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
27 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
30 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
31 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
32 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
33 | * POSSIBILITY OF SUCH DAMAGE. | |
34 | */ | |
35 | ||
36 | #include "server.h" | |
37 | #include "core.h" | |
38 | #include <net/sock.h> | |
39 | ||
40 | /* Number of messages to send before rescheduling */ | |
41 | #define MAX_SEND_MSG_COUNT 25 | |
42 | #define MAX_RECV_MSG_COUNT 25 | |
43 | #define CF_CONNECTED 1 | |
44 | ||
45 | #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data) | |
46 | ||
47 | /** | |
48 | * struct tipc_conn - TIPC connection structure | |
49 | * @kref: reference counter to connection object | |
50 | * @conid: connection identifier | |
51 | * @sock: socket handler associated with connection | |
52 | * @flags: indicates connection state | |
53 | * @server: pointer to connected server | |
54 | * @rwork: receive work item | |
55 | * @usr_data: user-specified field | |
56 | * @rx_action: what to do when connection socket is active | |
57 | * @outqueue: pointer to first outbound message in queue | |
963a1855 | 58 | * @outqueue_lock: control access to the outqueue |
c5fa7b3c YX |
59 | * @outqueue: list of connection objects for its server |
60 | * @swork: send work item | |
61 | */ | |
62 | struct tipc_conn { | |
63 | struct kref kref; | |
64 | int conid; | |
65 | struct socket *sock; | |
66 | unsigned long flags; | |
67 | struct tipc_server *server; | |
68 | struct work_struct rwork; | |
69 | int (*rx_action) (struct tipc_conn *con); | |
70 | void *usr_data; | |
71 | struct list_head outqueue; | |
72 | spinlock_t outqueue_lock; | |
73 | struct work_struct swork; | |
74 | }; | |
75 | ||
76 | /* An entry waiting to be sent */ | |
77 | struct outqueue_entry { | |
78 | struct list_head list; | |
79 | struct kvec iov; | |
80 | struct sockaddr_tipc dest; | |
81 | }; | |
82 | ||
83 | static void tipc_recv_work(struct work_struct *work); | |
84 | static void tipc_send_work(struct work_struct *work); | |
85 | static void tipc_clean_outqueues(struct tipc_conn *con); | |
86 | ||
87 | static void tipc_conn_kref_release(struct kref *kref) | |
88 | { | |
89 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | |
c5fa7b3c YX |
90 | |
91 | if (con->sock) { | |
92 | tipc_sock_release_local(con->sock); | |
93 | con->sock = NULL; | |
94 | } | |
95 | ||
96 | tipc_clean_outqueues(con); | |
c5fa7b3c YX |
97 | kfree(con); |
98 | } | |
99 | ||
100 | static void conn_put(struct tipc_conn *con) | |
101 | { | |
102 | kref_put(&con->kref, tipc_conn_kref_release); | |
103 | } | |
104 | ||
105 | static void conn_get(struct tipc_conn *con) | |
106 | { | |
107 | kref_get(&con->kref); | |
108 | } | |
109 | ||
110 | static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid) | |
111 | { | |
112 | struct tipc_conn *con; | |
113 | ||
114 | spin_lock_bh(&s->idr_lock); | |
115 | con = idr_find(&s->conn_idr, conid); | |
116 | if (con) | |
117 | conn_get(con); | |
118 | spin_unlock_bh(&s->idr_lock); | |
119 | return con; | |
120 | } | |
121 | ||
122 | static void sock_data_ready(struct sock *sk, int unused) | |
123 | { | |
124 | struct tipc_conn *con; | |
125 | ||
126 | read_lock(&sk->sk_callback_lock); | |
127 | con = sock2con(sk); | |
128 | if (con && test_bit(CF_CONNECTED, &con->flags)) { | |
129 | conn_get(con); | |
130 | if (!queue_work(con->server->rcv_wq, &con->rwork)) | |
131 | conn_put(con); | |
132 | } | |
133 | read_unlock(&sk->sk_callback_lock); | |
134 | } | |
135 | ||
136 | static void sock_write_space(struct sock *sk) | |
137 | { | |
138 | struct tipc_conn *con; | |
139 | ||
140 | read_lock(&sk->sk_callback_lock); | |
141 | con = sock2con(sk); | |
142 | if (con && test_bit(CF_CONNECTED, &con->flags)) { | |
143 | conn_get(con); | |
144 | if (!queue_work(con->server->send_wq, &con->swork)) | |
145 | conn_put(con); | |
146 | } | |
147 | read_unlock(&sk->sk_callback_lock); | |
148 | } | |
149 | ||
150 | static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con) | |
151 | { | |
152 | struct sock *sk = sock->sk; | |
153 | ||
154 | write_lock_bh(&sk->sk_callback_lock); | |
155 | ||
156 | sk->sk_data_ready = sock_data_ready; | |
157 | sk->sk_write_space = sock_write_space; | |
158 | sk->sk_user_data = con; | |
159 | ||
160 | con->sock = sock; | |
161 | ||
162 | write_unlock_bh(&sk->sk_callback_lock); | |
163 | } | |
164 | ||
165 | static void tipc_unregister_callbacks(struct tipc_conn *con) | |
166 | { | |
167 | struct sock *sk = con->sock->sk; | |
168 | ||
169 | write_lock_bh(&sk->sk_callback_lock); | |
170 | sk->sk_user_data = NULL; | |
171 | write_unlock_bh(&sk->sk_callback_lock); | |
172 | } | |
173 | ||
174 | static void tipc_close_conn(struct tipc_conn *con) | |
175 | { | |
176 | struct tipc_server *s = con->server; | |
177 | ||
178 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { | |
6d4ebeb4 YX |
179 | if (con->conid) |
180 | s->tipc_conn_shutdown(con->conid, con->usr_data); | |
181 | ||
c5fa7b3c YX |
182 | spin_lock_bh(&s->idr_lock); |
183 | idr_remove(&s->conn_idr, con->conid); | |
184 | s->idr_in_use--; | |
185 | spin_unlock_bh(&s->idr_lock); | |
186 | ||
187 | tipc_unregister_callbacks(con); | |
188 | ||
189 | /* We shouldn't flush pending works as we may be in the | |
190 | * thread. In fact the races with pending rx/tx work structs | |
191 | * are harmless for us here as we have already deleted this | |
192 | * connection from server connection list and set | |
193 | * sk->sk_user_data to 0 before releasing connection object. | |
194 | */ | |
195 | kernel_sock_shutdown(con->sock, SHUT_RDWR); | |
196 | ||
197 | conn_put(con); | |
198 | } | |
199 | } | |
200 | ||
201 | static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s) | |
202 | { | |
203 | struct tipc_conn *con; | |
204 | int ret; | |
205 | ||
206 | con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC); | |
207 | if (!con) | |
208 | return ERR_PTR(-ENOMEM); | |
209 | ||
210 | kref_init(&con->kref); | |
211 | INIT_LIST_HEAD(&con->outqueue); | |
212 | spin_lock_init(&con->outqueue_lock); | |
213 | INIT_WORK(&con->swork, tipc_send_work); | |
214 | INIT_WORK(&con->rwork, tipc_recv_work); | |
215 | ||
216 | spin_lock_bh(&s->idr_lock); | |
217 | ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC); | |
218 | if (ret < 0) { | |
219 | kfree(con); | |
220 | spin_unlock_bh(&s->idr_lock); | |
221 | return ERR_PTR(-ENOMEM); | |
222 | } | |
223 | con->conid = ret; | |
224 | s->idr_in_use++; | |
225 | spin_unlock_bh(&s->idr_lock); | |
226 | ||
227 | set_bit(CF_CONNECTED, &con->flags); | |
228 | con->server = s; | |
229 | ||
230 | return con; | |
231 | } | |
232 | ||
233 | static int tipc_receive_from_sock(struct tipc_conn *con) | |
234 | { | |
235 | struct msghdr msg = {}; | |
236 | struct tipc_server *s = con->server; | |
237 | struct sockaddr_tipc addr; | |
238 | struct kvec iov; | |
239 | void *buf; | |
240 | int ret; | |
241 | ||
242 | buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC); | |
243 | if (!buf) { | |
244 | ret = -ENOMEM; | |
245 | goto out_close; | |
246 | } | |
247 | ||
248 | iov.iov_base = buf; | |
249 | iov.iov_len = s->max_rcvbuf_size; | |
250 | msg.msg_name = &addr; | |
251 | ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, | |
252 | MSG_DONTWAIT); | |
253 | if (ret <= 0) { | |
254 | kmem_cache_free(s->rcvbuf_cache, buf); | |
255 | goto out_close; | |
256 | } | |
257 | ||
258 | s->tipc_conn_recvmsg(con->conid, &addr, con->usr_data, buf, ret); | |
259 | ||
260 | kmem_cache_free(s->rcvbuf_cache, buf); | |
261 | ||
262 | return 0; | |
263 | ||
264 | out_close: | |
265 | if (ret != -EWOULDBLOCK) | |
266 | tipc_close_conn(con); | |
267 | else if (ret == 0) | |
268 | /* Don't return success if we really got EOF */ | |
269 | ret = -EAGAIN; | |
270 | ||
271 | return ret; | |
272 | } | |
273 | ||
274 | static int tipc_accept_from_sock(struct tipc_conn *con) | |
275 | { | |
276 | struct tipc_server *s = con->server; | |
277 | struct socket *sock = con->sock; | |
278 | struct socket *newsock; | |
279 | struct tipc_conn *newcon; | |
280 | int ret; | |
281 | ||
282 | ret = tipc_sock_accept_local(sock, &newsock, O_NONBLOCK); | |
283 | if (ret < 0) | |
284 | return ret; | |
285 | ||
286 | newcon = tipc_alloc_conn(con->server); | |
287 | if (IS_ERR(newcon)) { | |
288 | ret = PTR_ERR(newcon); | |
289 | sock_release(newsock); | |
290 | return ret; | |
291 | } | |
292 | ||
293 | newcon->rx_action = tipc_receive_from_sock; | |
294 | tipc_register_callbacks(newsock, newcon); | |
295 | ||
296 | /* Notify that new connection is incoming */ | |
297 | newcon->usr_data = s->tipc_conn_new(newcon->conid); | |
298 | ||
299 | /* Wake up receive process in case of 'SYN+' message */ | |
300 | newsock->sk->sk_data_ready(newsock->sk, 0); | |
301 | return ret; | |
302 | } | |
303 | ||
304 | static struct socket *tipc_create_listen_sock(struct tipc_conn *con) | |
305 | { | |
306 | struct tipc_server *s = con->server; | |
307 | struct socket *sock = NULL; | |
308 | int ret; | |
309 | ||
310 | ret = tipc_sock_create_local(s->type, &sock); | |
311 | if (ret < 0) | |
312 | return NULL; | |
313 | ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, | |
314 | (char *)&s->imp, sizeof(s->imp)); | |
315 | if (ret < 0) | |
316 | goto create_err; | |
317 | ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr)); | |
318 | if (ret < 0) | |
319 | goto create_err; | |
320 | ||
321 | switch (s->type) { | |
322 | case SOCK_STREAM: | |
323 | case SOCK_SEQPACKET: | |
324 | con->rx_action = tipc_accept_from_sock; | |
325 | ||
326 | ret = kernel_listen(sock, 0); | |
327 | if (ret < 0) | |
328 | goto create_err; | |
329 | break; | |
330 | case SOCK_DGRAM: | |
331 | case SOCK_RDM: | |
332 | con->rx_action = tipc_receive_from_sock; | |
333 | break; | |
334 | default: | |
335 | pr_err("Unknown socket type %d\n", s->type); | |
336 | goto create_err; | |
337 | } | |
338 | return sock; | |
339 | ||
340 | create_err: | |
341 | sock_release(sock); | |
342 | con->sock = NULL; | |
343 | return NULL; | |
344 | } | |
345 | ||
346 | static int tipc_open_listening_sock(struct tipc_server *s) | |
347 | { | |
348 | struct socket *sock; | |
349 | struct tipc_conn *con; | |
350 | ||
351 | con = tipc_alloc_conn(s); | |
352 | if (IS_ERR(con)) | |
353 | return PTR_ERR(con); | |
354 | ||
355 | sock = tipc_create_listen_sock(con); | |
c756891a YX |
356 | if (!sock) { |
357 | idr_remove(&s->conn_idr, con->conid); | |
358 | s->idr_in_use--; | |
359 | kfree(con); | |
c5fa7b3c | 360 | return -EINVAL; |
c756891a | 361 | } |
c5fa7b3c YX |
362 | |
363 | tipc_register_callbacks(sock, con); | |
364 | return 0; | |
365 | } | |
366 | ||
367 | static struct outqueue_entry *tipc_alloc_entry(void *data, int len) | |
368 | { | |
369 | struct outqueue_entry *entry; | |
370 | void *buf; | |
371 | ||
372 | entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC); | |
373 | if (!entry) | |
374 | return NULL; | |
375 | ||
376 | buf = kmalloc(len, GFP_ATOMIC); | |
377 | if (!buf) { | |
378 | kfree(entry); | |
379 | return NULL; | |
380 | } | |
381 | ||
382 | memcpy(buf, data, len); | |
383 | entry->iov.iov_base = buf; | |
384 | entry->iov.iov_len = len; | |
385 | ||
386 | return entry; | |
387 | } | |
388 | ||
389 | static void tipc_free_entry(struct outqueue_entry *e) | |
390 | { | |
391 | kfree(e->iov.iov_base); | |
392 | kfree(e); | |
393 | } | |
394 | ||
395 | static void tipc_clean_outqueues(struct tipc_conn *con) | |
396 | { | |
397 | struct outqueue_entry *e, *safe; | |
398 | ||
399 | spin_lock_bh(&con->outqueue_lock); | |
400 | list_for_each_entry_safe(e, safe, &con->outqueue, list) { | |
401 | list_del(&e->list); | |
402 | tipc_free_entry(e); | |
403 | } | |
404 | spin_unlock_bh(&con->outqueue_lock); | |
405 | } | |
406 | ||
407 | int tipc_conn_sendmsg(struct tipc_server *s, int conid, | |
408 | struct sockaddr_tipc *addr, void *data, size_t len) | |
409 | { | |
410 | struct outqueue_entry *e; | |
411 | struct tipc_conn *con; | |
412 | ||
413 | con = tipc_conn_lookup(s, conid); | |
414 | if (!con) | |
415 | return -EINVAL; | |
416 | ||
417 | e = tipc_alloc_entry(data, len); | |
418 | if (!e) { | |
419 | conn_put(con); | |
420 | return -ENOMEM; | |
421 | } | |
422 | ||
423 | if (addr) | |
424 | memcpy(&e->dest, addr, sizeof(struct sockaddr_tipc)); | |
425 | ||
426 | spin_lock_bh(&con->outqueue_lock); | |
427 | list_add_tail(&e->list, &con->outqueue); | |
428 | spin_unlock_bh(&con->outqueue_lock); | |
429 | ||
430 | if (test_bit(CF_CONNECTED, &con->flags)) | |
431 | if (!queue_work(s->send_wq, &con->swork)) | |
432 | conn_put(con); | |
433 | ||
434 | return 0; | |
435 | } | |
436 | ||
437 | void tipc_conn_terminate(struct tipc_server *s, int conid) | |
438 | { | |
439 | struct tipc_conn *con; | |
440 | ||
441 | con = tipc_conn_lookup(s, conid); | |
442 | if (con) { | |
443 | tipc_close_conn(con); | |
444 | conn_put(con); | |
445 | } | |
446 | } | |
447 | ||
448 | static void tipc_send_to_sock(struct tipc_conn *con) | |
449 | { | |
450 | int count = 0; | |
451 | struct tipc_server *s = con->server; | |
452 | struct outqueue_entry *e; | |
453 | struct msghdr msg; | |
454 | int ret; | |
455 | ||
456 | spin_lock_bh(&con->outqueue_lock); | |
457 | while (1) { | |
458 | e = list_entry(con->outqueue.next, struct outqueue_entry, | |
459 | list); | |
460 | if ((struct list_head *) e == &con->outqueue) | |
461 | break; | |
462 | spin_unlock_bh(&con->outqueue_lock); | |
463 | ||
464 | memset(&msg, 0, sizeof(msg)); | |
465 | msg.msg_flags = MSG_DONTWAIT; | |
466 | ||
467 | if (s->type == SOCK_DGRAM || s->type == SOCK_RDM) { | |
468 | msg.msg_name = &e->dest; | |
469 | msg.msg_namelen = sizeof(struct sockaddr_tipc); | |
470 | } | |
471 | ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1, | |
472 | e->iov.iov_len); | |
473 | if (ret == -EWOULDBLOCK || ret == 0) { | |
474 | cond_resched(); | |
475 | goto out; | |
476 | } else if (ret < 0) { | |
477 | goto send_err; | |
478 | } | |
479 | ||
480 | /* Don't starve users filling buffers */ | |
481 | if (++count >= MAX_SEND_MSG_COUNT) { | |
482 | cond_resched(); | |
483 | count = 0; | |
484 | } | |
485 | ||
486 | spin_lock_bh(&con->outqueue_lock); | |
487 | list_del(&e->list); | |
488 | tipc_free_entry(e); | |
489 | } | |
490 | spin_unlock_bh(&con->outqueue_lock); | |
491 | out: | |
492 | return; | |
493 | ||
494 | send_err: | |
495 | tipc_close_conn(con); | |
496 | } | |
497 | ||
498 | static void tipc_recv_work(struct work_struct *work) | |
499 | { | |
500 | struct tipc_conn *con = container_of(work, struct tipc_conn, rwork); | |
501 | int count = 0; | |
502 | ||
503 | while (test_bit(CF_CONNECTED, &con->flags)) { | |
504 | if (con->rx_action(con)) | |
505 | break; | |
506 | ||
507 | /* Don't flood Rx machine */ | |
508 | if (++count >= MAX_RECV_MSG_COUNT) { | |
509 | cond_resched(); | |
510 | count = 0; | |
511 | } | |
512 | } | |
513 | conn_put(con); | |
514 | } | |
515 | ||
516 | static void tipc_send_work(struct work_struct *work) | |
517 | { | |
518 | struct tipc_conn *con = container_of(work, struct tipc_conn, swork); | |
519 | ||
520 | if (test_bit(CF_CONNECTED, &con->flags)) | |
521 | tipc_send_to_sock(con); | |
522 | ||
523 | conn_put(con); | |
524 | } | |
525 | ||
526 | static void tipc_work_stop(struct tipc_server *s) | |
527 | { | |
528 | destroy_workqueue(s->rcv_wq); | |
529 | destroy_workqueue(s->send_wq); | |
530 | } | |
531 | ||
532 | static int tipc_work_start(struct tipc_server *s) | |
533 | { | |
534 | s->rcv_wq = alloc_workqueue("tipc_rcv", WQ_UNBOUND, 1); | |
535 | if (!s->rcv_wq) { | |
536 | pr_err("can't start tipc receive workqueue\n"); | |
537 | return -ENOMEM; | |
538 | } | |
539 | ||
540 | s->send_wq = alloc_workqueue("tipc_send", WQ_UNBOUND, 1); | |
541 | if (!s->send_wq) { | |
542 | pr_err("can't start tipc send workqueue\n"); | |
543 | destroy_workqueue(s->rcv_wq); | |
544 | return -ENOMEM; | |
545 | } | |
546 | ||
547 | return 0; | |
548 | } | |
549 | ||
550 | int tipc_server_start(struct tipc_server *s) | |
551 | { | |
552 | int ret; | |
553 | ||
554 | spin_lock_init(&s->idr_lock); | |
555 | idr_init(&s->conn_idr); | |
556 | s->idr_in_use = 0; | |
557 | ||
558 | s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size, | |
559 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
560 | if (!s->rcvbuf_cache) | |
561 | return -ENOMEM; | |
562 | ||
563 | ret = tipc_work_start(s); | |
564 | if (ret < 0) { | |
565 | kmem_cache_destroy(s->rcvbuf_cache); | |
566 | return ret; | |
567 | } | |
c756891a YX |
568 | ret = tipc_open_listening_sock(s); |
569 | if (ret < 0) { | |
570 | tipc_work_stop(s); | |
571 | kmem_cache_destroy(s->rcvbuf_cache); | |
572 | return ret; | |
573 | } | |
c756891a | 574 | return ret; |
c5fa7b3c YX |
575 | } |
576 | ||
577 | void tipc_server_stop(struct tipc_server *s) | |
578 | { | |
579 | struct tipc_conn *con; | |
580 | int total = 0; | |
581 | int id; | |
582 | ||
c5fa7b3c YX |
583 | spin_lock_bh(&s->idr_lock); |
584 | for (id = 0; total < s->idr_in_use; id++) { | |
585 | con = idr_find(&s->conn_idr, id); | |
586 | if (con) { | |
587 | total++; | |
588 | spin_unlock_bh(&s->idr_lock); | |
589 | tipc_close_conn(con); | |
590 | spin_lock_bh(&s->idr_lock); | |
591 | } | |
592 | } | |
593 | spin_unlock_bh(&s->idr_lock); | |
594 | ||
595 | tipc_work_stop(s); | |
596 | kmem_cache_destroy(s->rcvbuf_cache); | |
597 | idr_destroy(&s->conn_idr); | |
598 | } |