]>
Commit | Line | Data |
---|---|---|
c5fa7b3c YX |
1 | /* |
2 | * net/tipc/server.c: TIPC server infrastructure | |
3 | * | |
4 | * Copyright (c) 2012-2013, Wind River Systems | |
5 | * All rights reserved. | |
6 | * | |
7 | * Redistribution and use in source and binary forms, with or without | |
8 | * modification, are permitted provided that the following conditions are met: | |
9 | * | |
10 | * 1. Redistributions of source code must retain the above copyright | |
11 | * notice, this list of conditions and the following disclaimer. | |
12 | * 2. Redistributions in binary form must reproduce the above copyright | |
13 | * notice, this list of conditions and the following disclaimer in the | |
14 | * documentation and/or other materials provided with the distribution. | |
15 | * 3. Neither the names of the copyright holders nor the names of its | |
16 | * contributors may be used to endorse or promote products derived from | |
17 | * this software without specific prior written permission. | |
18 | * | |
19 | * Alternatively, this software may be distributed under the terms of the | |
20 | * GNU General Public License ("GPL") version 2 as published by the Free | |
21 | * Software Foundation. | |
22 | * | |
23 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
24 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
25 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
26 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
27 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
28 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
29 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
30 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
31 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
32 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
33 | * POSSIBILITY OF SUCH DAMAGE. | |
34 | */ | |
35 | ||
36 | #include "server.h" | |
37 | #include "core.h" | |
859fc7c0 | 38 | #include "socket.h" |
c5fa7b3c | 39 | #include <net/sock.h> |
76100a8a | 40 | #include <linux/module.h> |
c5fa7b3c YX |
41 | |
42 | /* Number of messages to send before rescheduling */ | |
43 | #define MAX_SEND_MSG_COUNT 25 | |
44 | #define MAX_RECV_MSG_COUNT 25 | |
45 | #define CF_CONNECTED 1 | |
76100a8a | 46 | #define CF_SERVER 2 |
c5fa7b3c YX |
47 | |
48 | #define sock2con(x) ((struct tipc_conn *)(x)->sk_user_data) | |
49 | ||
50 | /** | |
51 | * struct tipc_conn - TIPC connection structure | |
52 | * @kref: reference counter to connection object | |
53 | * @conid: connection identifier | |
54 | * @sock: socket handler associated with connection | |
55 | * @flags: indicates connection state | |
56 | * @server: pointer to connected server | |
57 | * @rwork: receive work item | |
58 | * @usr_data: user-specified field | |
59 | * @rx_action: what to do when connection socket is active | |
60 | * @outqueue: pointer to first outbound message in queue | |
963a1855 | 61 | * @outqueue_lock: control access to the outqueue |
c5fa7b3c YX |
62 | * @outqueue: list of connection objects for its server |
63 | * @swork: send work item | |
64 | */ | |
65 | struct tipc_conn { | |
66 | struct kref kref; | |
67 | int conid; | |
68 | struct socket *sock; | |
69 | unsigned long flags; | |
70 | struct tipc_server *server; | |
71 | struct work_struct rwork; | |
72 | int (*rx_action) (struct tipc_conn *con); | |
73 | void *usr_data; | |
74 | struct list_head outqueue; | |
75 | spinlock_t outqueue_lock; | |
76 | struct work_struct swork; | |
77 | }; | |
78 | ||
79 | /* An entry waiting to be sent */ | |
80 | struct outqueue_entry { | |
81 | struct list_head list; | |
82 | struct kvec iov; | |
83 | struct sockaddr_tipc dest; | |
84 | }; | |
85 | ||
86 | static void tipc_recv_work(struct work_struct *work); | |
87 | static void tipc_send_work(struct work_struct *work); | |
88 | static void tipc_clean_outqueues(struct tipc_conn *con); | |
89 | ||
90 | static void tipc_conn_kref_release(struct kref *kref) | |
91 | { | |
92 | struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); | |
2b9bb7f3 | 93 | struct sockaddr_tipc *saddr = con->server->saddr; |
76100a8a YX |
94 | struct socket *sock = con->sock; |
95 | struct sock *sk; | |
96 | ||
97 | if (sock) { | |
98 | sk = sock->sk; | |
99 | if (test_bit(CF_SERVER, &con->flags)) { | |
100 | __module_get(sock->ops->owner); | |
101 | __module_get(sk->sk_prot_creator->owner); | |
102 | } | |
2b9bb7f3 YX |
103 | saddr->scope = -TIPC_NODE_SCOPE; |
104 | kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr)); | |
def81f69 | 105 | sock_release(sock); |
c5fa7b3c YX |
106 | con->sock = NULL; |
107 | } | |
108 | ||
109 | tipc_clean_outqueues(con); | |
c5fa7b3c YX |
110 | kfree(con); |
111 | } | |
112 | ||
113 | static void conn_put(struct tipc_conn *con) | |
114 | { | |
115 | kref_put(&con->kref, tipc_conn_kref_release); | |
116 | } | |
117 | ||
118 | static void conn_get(struct tipc_conn *con) | |
119 | { | |
120 | kref_get(&con->kref); | |
121 | } | |
122 | ||
123 | static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid) | |
124 | { | |
125 | struct tipc_conn *con; | |
126 | ||
127 | spin_lock_bh(&s->idr_lock); | |
128 | con = idr_find(&s->conn_idr, conid); | |
129 | if (con) | |
130 | conn_get(con); | |
131 | spin_unlock_bh(&s->idr_lock); | |
132 | return con; | |
133 | } | |
134 | ||
676d2369 | 135 | static void sock_data_ready(struct sock *sk) |
c5fa7b3c YX |
136 | { |
137 | struct tipc_conn *con; | |
138 | ||
139 | read_lock(&sk->sk_callback_lock); | |
140 | con = sock2con(sk); | |
141 | if (con && test_bit(CF_CONNECTED, &con->flags)) { | |
142 | conn_get(con); | |
143 | if (!queue_work(con->server->rcv_wq, &con->rwork)) | |
144 | conn_put(con); | |
145 | } | |
146 | read_unlock(&sk->sk_callback_lock); | |
147 | } | |
148 | ||
149 | static void sock_write_space(struct sock *sk) | |
150 | { | |
151 | struct tipc_conn *con; | |
152 | ||
153 | read_lock(&sk->sk_callback_lock); | |
154 | con = sock2con(sk); | |
155 | if (con && test_bit(CF_CONNECTED, &con->flags)) { | |
156 | conn_get(con); | |
157 | if (!queue_work(con->server->send_wq, &con->swork)) | |
158 | conn_put(con); | |
159 | } | |
160 | read_unlock(&sk->sk_callback_lock); | |
161 | } | |
162 | ||
163 | static void tipc_register_callbacks(struct socket *sock, struct tipc_conn *con) | |
164 | { | |
165 | struct sock *sk = sock->sk; | |
166 | ||
167 | write_lock_bh(&sk->sk_callback_lock); | |
168 | ||
169 | sk->sk_data_ready = sock_data_ready; | |
170 | sk->sk_write_space = sock_write_space; | |
171 | sk->sk_user_data = con; | |
172 | ||
173 | con->sock = sock; | |
174 | ||
175 | write_unlock_bh(&sk->sk_callback_lock); | |
176 | } | |
177 | ||
178 | static void tipc_unregister_callbacks(struct tipc_conn *con) | |
179 | { | |
180 | struct sock *sk = con->sock->sk; | |
181 | ||
182 | write_lock_bh(&sk->sk_callback_lock); | |
183 | sk->sk_user_data = NULL; | |
184 | write_unlock_bh(&sk->sk_callback_lock); | |
185 | } | |
186 | ||
187 | static void tipc_close_conn(struct tipc_conn *con) | |
188 | { | |
189 | struct tipc_server *s = con->server; | |
190 | ||
191 | if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { | |
6d4ebeb4 YX |
192 | if (con->conid) |
193 | s->tipc_conn_shutdown(con->conid, con->usr_data); | |
194 | ||
c5fa7b3c YX |
195 | spin_lock_bh(&s->idr_lock); |
196 | idr_remove(&s->conn_idr, con->conid); | |
197 | s->idr_in_use--; | |
198 | spin_unlock_bh(&s->idr_lock); | |
199 | ||
200 | tipc_unregister_callbacks(con); | |
201 | ||
202 | /* We shouldn't flush pending works as we may be in the | |
203 | * thread. In fact the races with pending rx/tx work structs | |
204 | * are harmless for us here as we have already deleted this | |
205 | * connection from server connection list and set | |
206 | * sk->sk_user_data to 0 before releasing connection object. | |
207 | */ | |
208 | kernel_sock_shutdown(con->sock, SHUT_RDWR); | |
209 | ||
210 | conn_put(con); | |
211 | } | |
212 | } | |
213 | ||
214 | static struct tipc_conn *tipc_alloc_conn(struct tipc_server *s) | |
215 | { | |
216 | struct tipc_conn *con; | |
217 | int ret; | |
218 | ||
219 | con = kzalloc(sizeof(struct tipc_conn), GFP_ATOMIC); | |
220 | if (!con) | |
221 | return ERR_PTR(-ENOMEM); | |
222 | ||
223 | kref_init(&con->kref); | |
224 | INIT_LIST_HEAD(&con->outqueue); | |
225 | spin_lock_init(&con->outqueue_lock); | |
226 | INIT_WORK(&con->swork, tipc_send_work); | |
227 | INIT_WORK(&con->rwork, tipc_recv_work); | |
228 | ||
229 | spin_lock_bh(&s->idr_lock); | |
230 | ret = idr_alloc(&s->conn_idr, con, 0, 0, GFP_ATOMIC); | |
231 | if (ret < 0) { | |
232 | kfree(con); | |
233 | spin_unlock_bh(&s->idr_lock); | |
234 | return ERR_PTR(-ENOMEM); | |
235 | } | |
236 | con->conid = ret; | |
237 | s->idr_in_use++; | |
238 | spin_unlock_bh(&s->idr_lock); | |
239 | ||
240 | set_bit(CF_CONNECTED, &con->flags); | |
241 | con->server = s; | |
242 | ||
243 | return con; | |
244 | } | |
245 | ||
246 | static int tipc_receive_from_sock(struct tipc_conn *con) | |
247 | { | |
248 | struct msghdr msg = {}; | |
249 | struct tipc_server *s = con->server; | |
250 | struct sockaddr_tipc addr; | |
251 | struct kvec iov; | |
252 | void *buf; | |
253 | int ret; | |
254 | ||
255 | buf = kmem_cache_alloc(s->rcvbuf_cache, GFP_ATOMIC); | |
256 | if (!buf) { | |
257 | ret = -ENOMEM; | |
258 | goto out_close; | |
259 | } | |
260 | ||
261 | iov.iov_base = buf; | |
262 | iov.iov_len = s->max_rcvbuf_size; | |
263 | msg.msg_name = &addr; | |
264 | ret = kernel_recvmsg(con->sock, &msg, &iov, 1, iov.iov_len, | |
265 | MSG_DONTWAIT); | |
266 | if (ret <= 0) { | |
267 | kmem_cache_free(s->rcvbuf_cache, buf); | |
268 | goto out_close; | |
269 | } | |
270 | ||
4ac1c8d0 YX |
271 | s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr, |
272 | con->usr_data, buf, ret); | |
c5fa7b3c YX |
273 | |
274 | kmem_cache_free(s->rcvbuf_cache, buf); | |
275 | ||
276 | return 0; | |
277 | ||
278 | out_close: | |
279 | if (ret != -EWOULDBLOCK) | |
280 | tipc_close_conn(con); | |
281 | else if (ret == 0) | |
282 | /* Don't return success if we really got EOF */ | |
283 | ret = -EAGAIN; | |
284 | ||
285 | return ret; | |
286 | } | |
287 | ||
288 | static int tipc_accept_from_sock(struct tipc_conn *con) | |
289 | { | |
290 | struct tipc_server *s = con->server; | |
291 | struct socket *sock = con->sock; | |
292 | struct socket *newsock; | |
293 | struct tipc_conn *newcon; | |
294 | int ret; | |
295 | ||
76100a8a | 296 | ret = kernel_accept(sock, &newsock, O_NONBLOCK); |
c5fa7b3c YX |
297 | if (ret < 0) |
298 | return ret; | |
299 | ||
300 | newcon = tipc_alloc_conn(con->server); | |
301 | if (IS_ERR(newcon)) { | |
302 | ret = PTR_ERR(newcon); | |
303 | sock_release(newsock); | |
304 | return ret; | |
305 | } | |
306 | ||
307 | newcon->rx_action = tipc_receive_from_sock; | |
308 | tipc_register_callbacks(newsock, newcon); | |
309 | ||
310 | /* Notify that new connection is incoming */ | |
311 | newcon->usr_data = s->tipc_conn_new(newcon->conid); | |
90bdfcb7 YX |
312 | if (!newcon->usr_data) { |
313 | sock_release(newsock); | |
314 | return -ENOMEM; | |
315 | } | |
c5fa7b3c YX |
316 | |
317 | /* Wake up receive process in case of 'SYN+' message */ | |
676d2369 | 318 | newsock->sk->sk_data_ready(newsock->sk); |
c5fa7b3c YX |
319 | return ret; |
320 | } | |
321 | ||
322 | static struct socket *tipc_create_listen_sock(struct tipc_conn *con) | |
323 | { | |
324 | struct tipc_server *s = con->server; | |
325 | struct socket *sock = NULL; | |
326 | int ret; | |
327 | ||
fa787ae0 | 328 | ret = sock_create_kern(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock); |
c5fa7b3c YX |
329 | if (ret < 0) |
330 | return NULL; | |
331 | ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, | |
332 | (char *)&s->imp, sizeof(s->imp)); | |
333 | if (ret < 0) | |
334 | goto create_err; | |
335 | ret = kernel_bind(sock, (struct sockaddr *)s->saddr, sizeof(*s->saddr)); | |
336 | if (ret < 0) | |
337 | goto create_err; | |
338 | ||
339 | switch (s->type) { | |
340 | case SOCK_STREAM: | |
341 | case SOCK_SEQPACKET: | |
342 | con->rx_action = tipc_accept_from_sock; | |
343 | ||
344 | ret = kernel_listen(sock, 0); | |
345 | if (ret < 0) | |
346 | goto create_err; | |
347 | break; | |
348 | case SOCK_DGRAM: | |
349 | case SOCK_RDM: | |
350 | con->rx_action = tipc_receive_from_sock; | |
351 | break; | |
352 | default: | |
353 | pr_err("Unknown socket type %d\n", s->type); | |
354 | goto create_err; | |
355 | } | |
76100a8a YX |
356 | |
357 | /* As server's listening socket owner and creator is the same module, | |
358 | * we have to decrease TIPC module reference count to guarantee that | |
359 | * it remains zero after the server socket is created, otherwise, | |
360 | * executing "rmmod" command is unable to make TIPC module deleted | |
361 | * after TIPC module is inserted successfully. | |
362 | * | |
363 | * However, the reference count is ever increased twice in | |
364 | * sock_create_kern(): one is to increase the reference count of owner | |
365 | * of TIPC socket's proto_ops struct; another is to increment the | |
366 | * reference count of owner of TIPC proto struct. Therefore, we must | |
367 | * decrement the module reference count twice to ensure that it keeps | |
368 | * zero after server's listening socket is created. Of course, we | |
369 | * must bump the module reference count twice as well before the socket | |
370 | * is closed. | |
371 | */ | |
372 | module_put(sock->ops->owner); | |
373 | module_put(sock->sk->sk_prot_creator->owner); | |
374 | set_bit(CF_SERVER, &con->flags); | |
375 | ||
c5fa7b3c YX |
376 | return sock; |
377 | ||
378 | create_err: | |
76100a8a | 379 | kernel_sock_shutdown(sock, SHUT_RDWR); |
def81f69 | 380 | sock_release(sock); |
c5fa7b3c YX |
381 | return NULL; |
382 | } | |
383 | ||
384 | static int tipc_open_listening_sock(struct tipc_server *s) | |
385 | { | |
386 | struct socket *sock; | |
387 | struct tipc_conn *con; | |
388 | ||
389 | con = tipc_alloc_conn(s); | |
390 | if (IS_ERR(con)) | |
391 | return PTR_ERR(con); | |
392 | ||
393 | sock = tipc_create_listen_sock(con); | |
c756891a YX |
394 | if (!sock) { |
395 | idr_remove(&s->conn_idr, con->conid); | |
396 | s->idr_in_use--; | |
397 | kfree(con); | |
c5fa7b3c | 398 | return -EINVAL; |
c756891a | 399 | } |
c5fa7b3c YX |
400 | |
401 | tipc_register_callbacks(sock, con); | |
402 | return 0; | |
403 | } | |
404 | ||
405 | static struct outqueue_entry *tipc_alloc_entry(void *data, int len) | |
406 | { | |
407 | struct outqueue_entry *entry; | |
408 | void *buf; | |
409 | ||
410 | entry = kmalloc(sizeof(struct outqueue_entry), GFP_ATOMIC); | |
411 | if (!entry) | |
412 | return NULL; | |
413 | ||
414 | buf = kmalloc(len, GFP_ATOMIC); | |
415 | if (!buf) { | |
416 | kfree(entry); | |
417 | return NULL; | |
418 | } | |
419 | ||
420 | memcpy(buf, data, len); | |
421 | entry->iov.iov_base = buf; | |
422 | entry->iov.iov_len = len; | |
423 | ||
424 | return entry; | |
425 | } | |
426 | ||
427 | static void tipc_free_entry(struct outqueue_entry *e) | |
428 | { | |
429 | kfree(e->iov.iov_base); | |
430 | kfree(e); | |
431 | } | |
432 | ||
433 | static void tipc_clean_outqueues(struct tipc_conn *con) | |
434 | { | |
435 | struct outqueue_entry *e, *safe; | |
436 | ||
437 | spin_lock_bh(&con->outqueue_lock); | |
438 | list_for_each_entry_safe(e, safe, &con->outqueue, list) { | |
439 | list_del(&e->list); | |
440 | tipc_free_entry(e); | |
441 | } | |
442 | spin_unlock_bh(&con->outqueue_lock); | |
443 | } | |
444 | ||
445 | int tipc_conn_sendmsg(struct tipc_server *s, int conid, | |
446 | struct sockaddr_tipc *addr, void *data, size_t len) | |
447 | { | |
448 | struct outqueue_entry *e; | |
449 | struct tipc_conn *con; | |
450 | ||
451 | con = tipc_conn_lookup(s, conid); | |
452 | if (!con) | |
453 | return -EINVAL; | |
454 | ||
455 | e = tipc_alloc_entry(data, len); | |
456 | if (!e) { | |
457 | conn_put(con); | |
458 | return -ENOMEM; | |
459 | } | |
460 | ||
461 | if (addr) | |
462 | memcpy(&e->dest, addr, sizeof(struct sockaddr_tipc)); | |
463 | ||
464 | spin_lock_bh(&con->outqueue_lock); | |
465 | list_add_tail(&e->list, &con->outqueue); | |
466 | spin_unlock_bh(&con->outqueue_lock); | |
467 | ||
4652edb7 | 468 | if (test_bit(CF_CONNECTED, &con->flags)) { |
c5fa7b3c YX |
469 | if (!queue_work(s->send_wq, &con->swork)) |
470 | conn_put(con); | |
4652edb7 YX |
471 | } else { |
472 | conn_put(con); | |
473 | } | |
c5fa7b3c YX |
474 | return 0; |
475 | } | |
476 | ||
477 | void tipc_conn_terminate(struct tipc_server *s, int conid) | |
478 | { | |
479 | struct tipc_conn *con; | |
480 | ||
481 | con = tipc_conn_lookup(s, conid); | |
482 | if (con) { | |
483 | tipc_close_conn(con); | |
484 | conn_put(con); | |
485 | } | |
486 | } | |
487 | ||
488 | static void tipc_send_to_sock(struct tipc_conn *con) | |
489 | { | |
490 | int count = 0; | |
491 | struct tipc_server *s = con->server; | |
492 | struct outqueue_entry *e; | |
493 | struct msghdr msg; | |
494 | int ret; | |
495 | ||
496 | spin_lock_bh(&con->outqueue_lock); | |
497 | while (1) { | |
498 | e = list_entry(con->outqueue.next, struct outqueue_entry, | |
499 | list); | |
500 | if ((struct list_head *) e == &con->outqueue) | |
501 | break; | |
502 | spin_unlock_bh(&con->outqueue_lock); | |
503 | ||
504 | memset(&msg, 0, sizeof(msg)); | |
505 | msg.msg_flags = MSG_DONTWAIT; | |
506 | ||
507 | if (s->type == SOCK_DGRAM || s->type == SOCK_RDM) { | |
508 | msg.msg_name = &e->dest; | |
509 | msg.msg_namelen = sizeof(struct sockaddr_tipc); | |
510 | } | |
511 | ret = kernel_sendmsg(con->sock, &msg, &e->iov, 1, | |
512 | e->iov.iov_len); | |
513 | if (ret == -EWOULDBLOCK || ret == 0) { | |
514 | cond_resched(); | |
515 | goto out; | |
516 | } else if (ret < 0) { | |
517 | goto send_err; | |
518 | } | |
519 | ||
520 | /* Don't starve users filling buffers */ | |
521 | if (++count >= MAX_SEND_MSG_COUNT) { | |
522 | cond_resched(); | |
523 | count = 0; | |
524 | } | |
525 | ||
526 | spin_lock_bh(&con->outqueue_lock); | |
527 | list_del(&e->list); | |
528 | tipc_free_entry(e); | |
529 | } | |
530 | spin_unlock_bh(&con->outqueue_lock); | |
531 | out: | |
532 | return; | |
533 | ||
534 | send_err: | |
535 | tipc_close_conn(con); | |
536 | } | |
537 | ||
538 | static void tipc_recv_work(struct work_struct *work) | |
539 | { | |
540 | struct tipc_conn *con = container_of(work, struct tipc_conn, rwork); | |
541 | int count = 0; | |
542 | ||
543 | while (test_bit(CF_CONNECTED, &con->flags)) { | |
544 | if (con->rx_action(con)) | |
545 | break; | |
546 | ||
547 | /* Don't flood Rx machine */ | |
548 | if (++count >= MAX_RECV_MSG_COUNT) { | |
549 | cond_resched(); | |
550 | count = 0; | |
551 | } | |
552 | } | |
553 | conn_put(con); | |
554 | } | |
555 | ||
556 | static void tipc_send_work(struct work_struct *work) | |
557 | { | |
558 | struct tipc_conn *con = container_of(work, struct tipc_conn, swork); | |
559 | ||
560 | if (test_bit(CF_CONNECTED, &con->flags)) | |
561 | tipc_send_to_sock(con); | |
562 | ||
563 | conn_put(con); | |
564 | } | |
565 | ||
566 | static void tipc_work_stop(struct tipc_server *s) | |
567 | { | |
568 | destroy_workqueue(s->rcv_wq); | |
569 | destroy_workqueue(s->send_wq); | |
570 | } | |
571 | ||
572 | static int tipc_work_start(struct tipc_server *s) | |
573 | { | |
06c8581f | 574 | s->rcv_wq = alloc_ordered_workqueue("tipc_rcv", 0); |
c5fa7b3c YX |
575 | if (!s->rcv_wq) { |
576 | pr_err("can't start tipc receive workqueue\n"); | |
577 | return -ENOMEM; | |
578 | } | |
579 | ||
06c8581f | 580 | s->send_wq = alloc_ordered_workqueue("tipc_send", 0); |
c5fa7b3c YX |
581 | if (!s->send_wq) { |
582 | pr_err("can't start tipc send workqueue\n"); | |
583 | destroy_workqueue(s->rcv_wq); | |
584 | return -ENOMEM; | |
585 | } | |
586 | ||
587 | return 0; | |
588 | } | |
589 | ||
590 | int tipc_server_start(struct tipc_server *s) | |
591 | { | |
592 | int ret; | |
593 | ||
594 | spin_lock_init(&s->idr_lock); | |
595 | idr_init(&s->conn_idr); | |
596 | s->idr_in_use = 0; | |
597 | ||
598 | s->rcvbuf_cache = kmem_cache_create(s->name, s->max_rcvbuf_size, | |
599 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
600 | if (!s->rcvbuf_cache) | |
601 | return -ENOMEM; | |
602 | ||
603 | ret = tipc_work_start(s); | |
604 | if (ret < 0) { | |
605 | kmem_cache_destroy(s->rcvbuf_cache); | |
606 | return ret; | |
607 | } | |
c756891a YX |
608 | ret = tipc_open_listening_sock(s); |
609 | if (ret < 0) { | |
610 | tipc_work_stop(s); | |
611 | kmem_cache_destroy(s->rcvbuf_cache); | |
612 | return ret; | |
613 | } | |
c756891a | 614 | return ret; |
c5fa7b3c YX |
615 | } |
616 | ||
617 | void tipc_server_stop(struct tipc_server *s) | |
618 | { | |
619 | struct tipc_conn *con; | |
620 | int total = 0; | |
621 | int id; | |
622 | ||
c5fa7b3c YX |
623 | spin_lock_bh(&s->idr_lock); |
624 | for (id = 0; total < s->idr_in_use; id++) { | |
625 | con = idr_find(&s->conn_idr, id); | |
626 | if (con) { | |
627 | total++; | |
628 | spin_unlock_bh(&s->idr_lock); | |
629 | tipc_close_conn(con); | |
630 | spin_lock_bh(&s->idr_lock); | |
631 | } | |
632 | } | |
633 | spin_unlock_bh(&s->idr_lock); | |
634 | ||
635 | tipc_work_stop(s); | |
636 | kmem_cache_destroy(s->rcvbuf_cache); | |
637 | idr_destroy(&s->conn_idr); | |
638 | } |