]>
Commit | Line | Data |
---|---|---|
17926a79 DH |
1 | /* incoming call handling |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
9b6d5398 JP |
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | ||
17926a79 DH |
14 | #include <linux/module.h> |
15 | #include <linux/net.h> | |
16 | #include <linux/skbuff.h> | |
17 | #include <linux/errqueue.h> | |
18 | #include <linux/udp.h> | |
19 | #include <linux/in.h> | |
20 | #include <linux/in6.h> | |
21 | #include <linux/icmp.h> | |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
00e90712 | 23 | #include <linux/circ_buf.h> |
17926a79 DH |
24 | #include <net/sock.h> |
25 | #include <net/af_rxrpc.h> | |
26 | #include <net/ip.h> | |
27 | #include "ar-internal.h" | |
28 | ||
00e90712 DH |
29 | /* |
30 | * Preallocate a single service call, connection and peer and, if possible, | |
31 | * give them a user ID and attach the user's side of the ID to them. | |
32 | */ | |
33 | static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx, | |
34 | struct rxrpc_backlog *b, | |
35 | rxrpc_notify_rx_t notify_rx, | |
36 | rxrpc_user_attach_call_t user_attach_call, | |
37 | unsigned long user_call_ID, gfp_t gfp) | |
38 | { | |
39 | const void *here = __builtin_return_address(0); | |
40 | struct rxrpc_call *call; | |
41 | int max, tmp; | |
42 | unsigned int size = RXRPC_BACKLOG_MAX; | |
43 | unsigned int head, tail, call_head, call_tail; | |
44 | ||
45 | max = rx->sk.sk_max_ack_backlog; | |
46 | tmp = rx->sk.sk_ack_backlog; | |
47 | if (tmp >= max) { | |
48 | _leave(" = -ENOBUFS [full %u]", max); | |
49 | return -ENOBUFS; | |
50 | } | |
51 | max -= tmp; | |
52 | ||
53 | /* We don't need more conns and peers than we have calls, but on the | |
54 | * other hand, we shouldn't ever use more peers than conns or conns | |
55 | * than calls. | |
56 | */ | |
57 | call_head = b->call_backlog_head; | |
58 | call_tail = READ_ONCE(b->call_backlog_tail); | |
59 | tmp = CIRC_CNT(call_head, call_tail, size); | |
60 | if (tmp >= max) { | |
61 | _leave(" = -ENOBUFS [enough %u]", tmp); | |
62 | return -ENOBUFS; | |
63 | } | |
64 | max = tmp + 1; | |
65 | ||
66 | head = b->peer_backlog_head; | |
67 | tail = READ_ONCE(b->peer_backlog_tail); | |
68 | if (CIRC_CNT(head, tail, size) < max) { | |
69 | struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp); | |
70 | if (!peer) | |
71 | return -ENOMEM; | |
72 | b->peer_backlog[head] = peer; | |
73 | smp_store_release(&b->peer_backlog_head, | |
74 | (head + 1) & (size - 1)); | |
75 | } | |
76 | ||
77 | head = b->conn_backlog_head; | |
78 | tail = READ_ONCE(b->conn_backlog_tail); | |
79 | if (CIRC_CNT(head, tail, size) < max) { | |
80 | struct rxrpc_connection *conn; | |
81 | ||
82 | conn = rxrpc_prealloc_service_connection(gfp); | |
83 | if (!conn) | |
84 | return -ENOMEM; | |
85 | b->conn_backlog[head] = conn; | |
86 | smp_store_release(&b->conn_backlog_head, | |
87 | (head + 1) & (size - 1)); | |
363deeab DH |
88 | |
89 | trace_rxrpc_conn(conn, rxrpc_conn_new_service, | |
90 | atomic_read(&conn->usage), here); | |
00e90712 DH |
91 | } |
92 | ||
93 | /* Now it gets complicated, because calls get registered with the | |
94 | * socket here, particularly if a user ID is preassigned by the user. | |
95 | */ | |
96 | call = rxrpc_alloc_call(gfp); | |
97 | if (!call) | |
98 | return -ENOMEM; | |
99 | call->flags |= (1 << RXRPC_CALL_IS_SERVICE); | |
100 | call->state = RXRPC_CALL_SERVER_PREALLOC; | |
101 | ||
102 | trace_rxrpc_call(call, rxrpc_call_new_service, | |
103 | atomic_read(&call->usage), | |
104 | here, (const void *)user_call_ID); | |
105 | ||
106 | write_lock(&rx->call_lock); | |
107 | if (user_attach_call) { | |
108 | struct rxrpc_call *xcall; | |
109 | struct rb_node *parent, **pp; | |
110 | ||
111 | /* Check the user ID isn't already in use */ | |
112 | pp = &rx->calls.rb_node; | |
113 | parent = NULL; | |
114 | while (*pp) { | |
115 | parent = *pp; | |
116 | xcall = rb_entry(parent, struct rxrpc_call, sock_node); | |
117 | if (user_call_ID < call->user_call_ID) | |
118 | pp = &(*pp)->rb_left; | |
119 | else if (user_call_ID > call->user_call_ID) | |
120 | pp = &(*pp)->rb_right; | |
121 | else | |
122 | goto id_in_use; | |
123 | } | |
124 | ||
125 | call->user_call_ID = user_call_ID; | |
126 | call->notify_rx = notify_rx; | |
cbd00891 | 127 | rxrpc_get_call(call, rxrpc_call_got_kernel); |
00e90712 DH |
128 | user_attach_call(call, user_call_ID); |
129 | rxrpc_get_call(call, rxrpc_call_got_userid); | |
130 | rb_link_node(&call->sock_node, parent, pp); | |
131 | rb_insert_color(&call->sock_node, &rx->calls); | |
132 | set_bit(RXRPC_CALL_HAS_USERID, &call->flags); | |
133 | } | |
134 | ||
248f219c DH |
135 | list_add(&call->sock_link, &rx->sock_calls); |
136 | ||
00e90712 DH |
137 | write_unlock(&rx->call_lock); |
138 | ||
139 | write_lock(&rxrpc_call_lock); | |
140 | list_add_tail(&call->link, &rxrpc_calls); | |
141 | write_unlock(&rxrpc_call_lock); | |
142 | ||
143 | b->call_backlog[call_head] = call; | |
144 | smp_store_release(&b->call_backlog_head, (call_head + 1) & (size - 1)); | |
145 | _leave(" = 0 [%d -> %lx]", call->debug_id, user_call_ID); | |
146 | return 0; | |
147 | ||
148 | id_in_use: | |
149 | write_unlock(&rx->call_lock); | |
150 | rxrpc_cleanup_call(call); | |
151 | _leave(" = -EBADSLT"); | |
152 | return -EBADSLT; | |
153 | } | |
154 | ||
155 | /* | |
156 | * Preallocate sufficient service connections, calls and peers to cover the | |
157 | * entire backlog of a socket. When a new call comes in, if we don't have | |
158 | * sufficient of each available, the call gets rejected as busy or ignored. | |
159 | * | |
160 | * The backlog is replenished when a connection is accepted or rejected. | |
161 | */ | |
162 | int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) | |
163 | { | |
164 | struct rxrpc_backlog *b = rx->backlog; | |
165 | ||
166 | if (!b) { | |
167 | b = kzalloc(sizeof(struct rxrpc_backlog), gfp); | |
168 | if (!b) | |
169 | return -ENOMEM; | |
170 | rx->backlog = b; | |
171 | } | |
172 | ||
173 | if (rx->discard_new_call) | |
174 | return 0; | |
175 | ||
176 | while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp) == 0) | |
177 | ; | |
178 | ||
179 | return 0; | |
180 | } | |
181 | ||
182 | /* | |
183 | * Discard the preallocation on a service. | |
184 | */ | |
185 | void rxrpc_discard_prealloc(struct rxrpc_sock *rx) | |
186 | { | |
187 | struct rxrpc_backlog *b = rx->backlog; | |
188 | unsigned int size = RXRPC_BACKLOG_MAX, head, tail; | |
189 | ||
190 | if (!b) | |
191 | return; | |
192 | rx->backlog = NULL; | |
193 | ||
248f219c DH |
194 | /* Make sure that there aren't any incoming calls in progress before we |
195 | * clear the preallocation buffers. | |
196 | */ | |
197 | spin_lock_bh(&rx->incoming_lock); | |
198 | spin_unlock_bh(&rx->incoming_lock); | |
199 | ||
00e90712 DH |
200 | head = b->peer_backlog_head; |
201 | tail = b->peer_backlog_tail; | |
202 | while (CIRC_CNT(head, tail, size) > 0) { | |
203 | struct rxrpc_peer *peer = b->peer_backlog[tail]; | |
204 | kfree(peer); | |
205 | tail = (tail + 1) & (size - 1); | |
206 | } | |
207 | ||
208 | head = b->conn_backlog_head; | |
209 | tail = b->conn_backlog_tail; | |
210 | while (CIRC_CNT(head, tail, size) > 0) { | |
211 | struct rxrpc_connection *conn = b->conn_backlog[tail]; | |
212 | write_lock(&rxrpc_connection_lock); | |
213 | list_del(&conn->link); | |
214 | list_del(&conn->proc_link); | |
215 | write_unlock(&rxrpc_connection_lock); | |
216 | kfree(conn); | |
217 | tail = (tail + 1) & (size - 1); | |
218 | } | |
219 | ||
220 | head = b->call_backlog_head; | |
221 | tail = b->call_backlog_tail; | |
222 | while (CIRC_CNT(head, tail, size) > 0) { | |
223 | struct rxrpc_call *call = b->call_backlog[tail]; | |
224 | if (rx->discard_new_call) { | |
225 | _debug("discard %lx", call->user_call_ID); | |
226 | rx->discard_new_call(call, call->user_call_ID); | |
3432a757 | 227 | rxrpc_put_call(call, rxrpc_call_put_kernel); |
00e90712 DH |
228 | } |
229 | rxrpc_call_completed(call); | |
230 | rxrpc_release_call(rx, call); | |
231 | rxrpc_put_call(call, rxrpc_call_put); | |
232 | tail = (tail + 1) & (size - 1); | |
233 | } | |
234 | ||
235 | kfree(b); | |
236 | } | |
237 | ||
17926a79 | 238 | /* |
248f219c DH |
239 | * Allocate a new incoming call from the prealloc pool, along with a connection |
240 | * and a peer as necessary. | |
17926a79 | 241 | */ |
248f219c DH |
242 | static struct rxrpc_call *rxrpc_alloc_incoming_call(struct rxrpc_sock *rx, |
243 | struct rxrpc_local *local, | |
244 | struct rxrpc_connection *conn, | |
245 | struct sk_buff *skb) | |
17926a79 | 246 | { |
248f219c DH |
247 | struct rxrpc_backlog *b = rx->backlog; |
248 | struct rxrpc_peer *peer, *xpeer; | |
249 | struct rxrpc_call *call; | |
250 | unsigned short call_head, conn_head, peer_head; | |
251 | unsigned short call_tail, conn_tail, peer_tail; | |
252 | unsigned short call_count, conn_count; | |
253 | ||
254 | /* #calls >= #conns >= #peers must hold true. */ | |
255 | call_head = smp_load_acquire(&b->call_backlog_head); | |
256 | call_tail = b->call_backlog_tail; | |
257 | call_count = CIRC_CNT(call_head, call_tail, RXRPC_BACKLOG_MAX); | |
258 | conn_head = smp_load_acquire(&b->conn_backlog_head); | |
259 | conn_tail = b->conn_backlog_tail; | |
260 | conn_count = CIRC_CNT(conn_head, conn_tail, RXRPC_BACKLOG_MAX); | |
261 | ASSERTCMP(conn_count, >=, call_count); | |
262 | peer_head = smp_load_acquire(&b->peer_backlog_head); | |
263 | peer_tail = b->peer_backlog_tail; | |
264 | ASSERTCMP(CIRC_CNT(peer_head, peer_tail, RXRPC_BACKLOG_MAX), >=, | |
265 | conn_count); | |
266 | ||
267 | if (call_count == 0) | |
268 | return NULL; | |
269 | ||
270 | if (!conn) { | |
271 | /* No connection. We're going to need a peer to start off | |
272 | * with. If one doesn't yet exist, use a spare from the | |
273 | * preallocation set. We dump the address into the spare in | |
274 | * anticipation - and to save on stack space. | |
275 | */ | |
276 | xpeer = b->peer_backlog[peer_tail]; | |
277 | if (rxrpc_extract_addr_from_skb(&xpeer->srx, skb) < 0) | |
278 | return NULL; | |
279 | ||
280 | peer = rxrpc_lookup_incoming_peer(local, xpeer); | |
281 | if (peer == xpeer) { | |
282 | b->peer_backlog[peer_tail] = NULL; | |
283 | smp_store_release(&b->peer_backlog_tail, | |
284 | (peer_tail + 1) & | |
285 | (RXRPC_BACKLOG_MAX - 1)); | |
286 | } | |
17926a79 | 287 | |
248f219c DH |
288 | /* Now allocate and set up the connection */ |
289 | conn = b->conn_backlog[conn_tail]; | |
290 | b->conn_backlog[conn_tail] = NULL; | |
291 | smp_store_release(&b->conn_backlog_tail, | |
292 | (conn_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
293 | rxrpc_get_local(local); | |
294 | conn->params.local = local; | |
295 | conn->params.peer = peer; | |
363deeab | 296 | rxrpc_see_connection(conn); |
248f219c DH |
297 | rxrpc_new_incoming_connection(conn, skb); |
298 | } else { | |
299 | rxrpc_get_connection(conn); | |
17926a79 DH |
300 | } |
301 | ||
248f219c DH |
302 | /* And now we can allocate and set up a new call */ |
303 | call = b->call_backlog[call_tail]; | |
304 | b->call_backlog[call_tail] = NULL; | |
305 | smp_store_release(&b->call_backlog_tail, | |
306 | (call_tail + 1) & (RXRPC_BACKLOG_MAX - 1)); | |
307 | ||
cbd00891 | 308 | rxrpc_see_call(call); |
248f219c DH |
309 | call->conn = conn; |
310 | call->peer = rxrpc_get_peer(conn->params.peer); | |
311 | return call; | |
17926a79 DH |
312 | } |
313 | ||
314 | /* | |
248f219c DH |
315 | * Set up a new incoming call. Called in BH context with the RCU read lock |
316 | * held. | |
317 | * | |
318 | * If this is for a kernel service, when we allocate the call, it will have | |
319 | * three refs on it: (1) the kernel service, (2) the user_call_ID tree, (3) the | |
320 | * retainer ref obtained from the backlog buffer. Prealloc calls for userspace | |
321 | * services only have the ref from the backlog buffer. We want to pass this | |
322 | * ref to non-BH context to dispose of. | |
323 | * | |
324 | * If we want to report an error, we mark the skb with the packet type and | |
325 | * abort code and return NULL. | |
17926a79 | 326 | */ |
248f219c DH |
327 | struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, |
328 | struct rxrpc_connection *conn, | |
329 | struct sk_buff *skb) | |
17926a79 | 330 | { |
248f219c DH |
331 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
332 | struct rxrpc_sock *rx; | |
17926a79 | 333 | struct rxrpc_call *call; |
1e9e5c95 | 334 | u16 service_id = sp->hdr.serviceId; |
17926a79 DH |
335 | |
336 | _enter(""); | |
337 | ||
248f219c | 338 | /* Get the socket providing the service */ |
1e9e5c95 | 339 | rx = rcu_dereference(local->service); |
7212a57e | 340 | if (rx && service_id == rx->srx.srx_service) |
1e9e5c95 | 341 | goto found_service; |
17926a79 | 342 | |
248f219c DH |
343 | trace_rxrpc_abort("INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, |
344 | RX_INVALID_OPERATION, EOPNOTSUPP); | |
345 | skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; | |
346 | skb->priority = RX_INVALID_OPERATION; | |
347 | _leave(" = NULL [service]"); | |
348 | return NULL; | |
17926a79 | 349 | |
248f219c DH |
350 | found_service: |
351 | spin_lock(&rx->incoming_lock); | |
352 | if (rx->sk.sk_state == RXRPC_CLOSE) { | |
353 | trace_rxrpc_abort("CLS", sp->hdr.cid, sp->hdr.callNumber, | |
354 | sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); | |
355 | skb->mark = RXRPC_SKB_MARK_LOCAL_ABORT; | |
356 | skb->priority = RX_INVALID_OPERATION; | |
357 | _leave(" = NULL [close]"); | |
358 | call = NULL; | |
359 | goto out; | |
17926a79 | 360 | } |
17926a79 | 361 | |
248f219c DH |
362 | call = rxrpc_alloc_incoming_call(rx, local, conn, skb); |
363 | if (!call) { | |
364 | skb->mark = RXRPC_SKB_MARK_BUSY; | |
365 | _leave(" = NULL [busy]"); | |
366 | call = NULL; | |
367 | goto out; | |
368 | } | |
58dc63c9 DH |
369 | |
370 | trace_rxrpc_receive(call, rxrpc_receive_incoming, | |
371 | sp->hdr.serial, sp->hdr.seq); | |
17926a79 | 372 | |
248f219c DH |
373 | /* Make the call live. */ |
374 | rxrpc_incoming_call(rx, call, skb); | |
375 | conn = call->conn; | |
17926a79 | 376 | |
248f219c DH |
377 | if (rx->notify_new_call) |
378 | rx->notify_new_call(&rx->sk, call, call->user_call_ID); | |
e6f3afb3 DH |
379 | else |
380 | sk_acceptq_added(&rx->sk); | |
17926a79 | 381 | |
248f219c DH |
382 | spin_lock(&conn->state_lock); |
383 | switch (conn->state) { | |
384 | case RXRPC_CONN_SERVICE_UNSECURED: | |
385 | conn->state = RXRPC_CONN_SERVICE_CHALLENGING; | |
386 | set_bit(RXRPC_CONN_EV_CHALLENGE, &call->conn->events); | |
387 | rxrpc_queue_conn(call->conn); | |
388 | break; | |
17926a79 | 389 | |
248f219c DH |
390 | case RXRPC_CONN_SERVICE: |
391 | write_lock(&call->state_lock); | |
392 | if (rx->discard_new_call) | |
393 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | |
394 | else | |
395 | call->state = RXRPC_CALL_SERVER_ACCEPTING; | |
396 | write_unlock(&call->state_lock); | |
397 | break; | |
17926a79 | 398 | |
248f219c DH |
399 | case RXRPC_CONN_REMOTELY_ABORTED: |
400 | rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, | |
401 | conn->remote_abort, ECONNABORTED); | |
402 | break; | |
403 | case RXRPC_CONN_LOCALLY_ABORTED: | |
404 | rxrpc_abort_call("CON", call, sp->hdr.seq, | |
405 | conn->local_abort, ECONNABORTED); | |
406 | break; | |
17926a79 DH |
407 | default: |
408 | BUG(); | |
409 | } | |
248f219c | 410 | spin_unlock(&conn->state_lock); |
17926a79 | 411 | |
248f219c DH |
412 | if (call->state == RXRPC_CALL_SERVER_ACCEPTING) |
413 | rxrpc_notify_socket(call); | |
d991b4a3 | 414 | |
3432a757 DH |
415 | /* We have to discard the prealloc queue's ref here and rely on a |
416 | * combination of the RCU read lock and refs held either by the socket | |
417 | * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel | |
418 | * service to prevent the call from being deallocated too early. | |
419 | */ | |
420 | rxrpc_put_call(call, rxrpc_call_put); | |
421 | ||
248f219c DH |
422 | _leave(" = %p{%d}", call, call->debug_id); |
423 | out: | |
424 | spin_unlock(&rx->incoming_lock); | |
425 | return call; | |
17926a79 DH |
426 | } |
427 | ||
428 | /* | |
429 | * handle acceptance of a call by userspace | |
430 | * - assign the user call ID to the call at the front of the queue | |
431 | */ | |
651350d1 | 432 | struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, |
d001648e DH |
433 | unsigned long user_call_ID, |
434 | rxrpc_notify_rx_t notify_rx) | |
17926a79 DH |
435 | { |
436 | struct rxrpc_call *call; | |
437 | struct rb_node *parent, **pp; | |
438 | int ret; | |
439 | ||
440 | _enter(",%lx", user_call_ID); | |
441 | ||
442 | ASSERT(!irqs_disabled()); | |
443 | ||
444 | write_lock(&rx->call_lock); | |
445 | ||
b25de360 DH |
446 | if (list_empty(&rx->to_be_accepted)) { |
447 | write_unlock(&rx->call_lock); | |
448 | kleave(" = -ENODATA [empty]"); | |
449 | return ERR_PTR(-ENODATA); | |
450 | } | |
17926a79 DH |
451 | |
452 | /* check the user ID isn't already in use */ | |
17926a79 DH |
453 | pp = &rx->calls.rb_node; |
454 | parent = NULL; | |
455 | while (*pp) { | |
456 | parent = *pp; | |
457 | call = rb_entry(parent, struct rxrpc_call, sock_node); | |
458 | ||
459 | if (user_call_ID < call->user_call_ID) | |
460 | pp = &(*pp)->rb_left; | |
461 | else if (user_call_ID > call->user_call_ID) | |
462 | pp = &(*pp)->rb_right; | |
463 | else | |
248f219c | 464 | goto id_in_use; |
17926a79 DH |
465 | } |
466 | ||
248f219c DH |
467 | /* Dequeue the first call and check it's still valid. We gain |
468 | * responsibility for the queue's reference. | |
469 | */ | |
470 | call = list_entry(rx->to_be_accepted.next, | |
471 | struct rxrpc_call, accept_link); | |
17926a79 DH |
472 | list_del_init(&call->accept_link); |
473 | sk_acceptq_removed(&rx->sk); | |
e34d4234 | 474 | rxrpc_see_call(call); |
17926a79 DH |
475 | |
476 | write_lock_bh(&call->state_lock); | |
477 | switch (call->state) { | |
478 | case RXRPC_CALL_SERVER_ACCEPTING: | |
479 | call->state = RXRPC_CALL_SERVER_RECV_REQUEST; | |
480 | break; | |
f5c17aae DH |
481 | case RXRPC_CALL_COMPLETE: |
482 | ret = call->error; | |
17926a79 | 483 | goto out_release; |
17926a79 DH |
484 | default: |
485 | BUG(); | |
486 | } | |
487 | ||
488 | /* formalise the acceptance */ | |
d001648e | 489 | call->notify_rx = notify_rx; |
17926a79 | 490 | call->user_call_ID = user_call_ID; |
248f219c | 491 | rxrpc_get_call(call, rxrpc_call_got_userid); |
17926a79 DH |
492 | rb_link_node(&call->sock_node, parent, pp); |
493 | rb_insert_color(&call->sock_node, &rx->calls); | |
494 | if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) | |
495 | BUG(); | |
17926a79 DH |
496 | |
497 | write_unlock_bh(&call->state_lock); | |
498 | write_unlock(&rx->call_lock); | |
248f219c DH |
499 | rxrpc_notify_socket(call); |
500 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
651350d1 DH |
501 | _leave(" = %p{%d}", call, call->debug_id); |
502 | return call; | |
503 | ||
651350d1 | 504 | out_release: |
248f219c | 505 | _debug("release %p", call); |
651350d1 | 506 | write_unlock_bh(&call->state_lock); |
8d94aa38 | 507 | write_unlock(&rx->call_lock); |
8d94aa38 | 508 | rxrpc_release_call(rx, call); |
248f219c DH |
509 | rxrpc_put_call(call, rxrpc_call_put); |
510 | goto out; | |
511 | ||
512 | id_in_use: | |
513 | ret = -EBADSLT; | |
651350d1 | 514 | write_unlock(&rx->call_lock); |
248f219c DH |
515 | out: |
516 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
651350d1 DH |
517 | _leave(" = %d", ret); |
518 | return ERR_PTR(ret); | |
519 | } | |
520 | ||
521 | /* | |
b4f1342f | 522 | * Handle rejection of a call by userspace |
651350d1 DH |
523 | * - reject the call at the front of the queue |
524 | */ | |
525 | int rxrpc_reject_call(struct rxrpc_sock *rx) | |
526 | { | |
527 | struct rxrpc_call *call; | |
248f219c | 528 | bool abort = false; |
651350d1 DH |
529 | int ret; |
530 | ||
531 | _enter(""); | |
532 | ||
533 | ASSERT(!irqs_disabled()); | |
534 | ||
535 | write_lock(&rx->call_lock); | |
536 | ||
248f219c | 537 | if (list_empty(&rx->to_be_accepted)) { |
8d94aa38 | 538 | write_unlock(&rx->call_lock); |
8d94aa38 DH |
539 | return -ENODATA; |
540 | } | |
651350d1 | 541 | |
248f219c DH |
542 | /* Dequeue the first call and check it's still valid. We gain |
543 | * responsibility for the queue's reference. | |
544 | */ | |
545 | call = list_entry(rx->to_be_accepted.next, | |
546 | struct rxrpc_call, accept_link); | |
651350d1 DH |
547 | list_del_init(&call->accept_link); |
548 | sk_acceptq_removed(&rx->sk); | |
e34d4234 | 549 | rxrpc_see_call(call); |
651350d1 DH |
550 | |
551 | write_lock_bh(&call->state_lock); | |
552 | switch (call->state) { | |
553 | case RXRPC_CALL_SERVER_ACCEPTING: | |
248f219c DH |
554 | __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, ECONNABORTED); |
555 | abort = true; | |
556 | /* fall through */ | |
f5c17aae DH |
557 | case RXRPC_CALL_COMPLETE: |
558 | ret = call->error; | |
248f219c | 559 | goto out_discard; |
651350d1 DH |
560 | default: |
561 | BUG(); | |
562 | } | |
17926a79 | 563 | |
248f219c | 564 | out_discard: |
17926a79 | 565 | write_unlock_bh(&call->state_lock); |
17926a79 | 566 | write_unlock(&rx->call_lock); |
248f219c | 567 | if (abort) { |
26cb02aa | 568 | rxrpc_send_abort_packet(call); |
248f219c DH |
569 | rxrpc_release_call(rx, call); |
570 | rxrpc_put_call(call, rxrpc_call_put); | |
571 | } | |
572 | rxrpc_service_prealloc(rx, GFP_KERNEL); | |
651350d1 DH |
573 | _leave(" = %d", ret); |
574 | return ret; | |
575 | } | |
00e90712 DH |
576 | |
577 | /* | |
578 | * rxrpc_kernel_charge_accept - Charge up socket with preallocated calls | |
579 | * @sock: The socket on which to preallocate | |
580 | * @notify_rx: Event notification function for the call | |
581 | * @user_attach_call: Func to attach call to user_call_ID | |
582 | * @user_call_ID: The tag to attach to the preallocated call | |
583 | * @gfp: The allocation conditions. | |
584 | * | |
585 | * Charge up the socket with preallocated calls, each with a user ID. A | |
586 | * function should be provided to effect the attachment from the user's side. | |
587 | * The user is given a ref to hold on the call. | |
588 | * | |
589 | * Note that the call may be come connected before this function returns. | |
590 | */ | |
591 | int rxrpc_kernel_charge_accept(struct socket *sock, | |
592 | rxrpc_notify_rx_t notify_rx, | |
593 | rxrpc_user_attach_call_t user_attach_call, | |
594 | unsigned long user_call_ID, gfp_t gfp) | |
595 | { | |
596 | struct rxrpc_sock *rx = rxrpc_sk(sock->sk); | |
597 | struct rxrpc_backlog *b = rx->backlog; | |
598 | ||
599 | if (sock->sk->sk_state == RXRPC_CLOSE) | |
600 | return -ESHUTDOWN; | |
601 | ||
602 | return rxrpc_service_prealloc_one(rx, b, notify_rx, | |
603 | user_attach_call, user_call_ID, | |
604 | gfp); | |
605 | } | |
606 | EXPORT_SYMBOL(rxrpc_kernel_charge_accept); |