]>
Commit | Line | Data |
---|---|---|
17926a79 DH |
1 | /* RxRPC remote transport endpoint management |
2 | * | |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/net.h> | |
14 | #include <linux/skbuff.h> | |
15 | #include <linux/udp.h> | |
16 | #include <linux/in.h> | |
17 | #include <linux/in6.h> | |
18 | #include <linux/icmp.h> | |
19 | #include <net/sock.h> | |
20 | #include <net/af_rxrpc.h> | |
21 | #include <net/ip.h> | |
224711df | 22 | #include <net/route.h> |
17926a79 DH |
23 | #include "ar-internal.h" |
24 | ||
25 | static LIST_HEAD(rxrpc_peers); | |
26 | static DEFINE_RWLOCK(rxrpc_peer_lock); | |
27 | static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq); | |
28 | ||
29 | static void rxrpc_destroy_peer(struct work_struct *work); | |
30 | ||
224711df DH |
31 | /* |
32 | * assess the MTU size for the network interface through which this peer is | |
33 | * reached | |
34 | */ | |
35 | static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) | |
36 | { | |
37 | struct rtable *rt; | |
38 | struct flowi fl; | |
39 | int ret; | |
40 | ||
41 | peer->if_mtu = 1500; | |
42 | ||
43 | memset(&fl, 0, sizeof(fl)); | |
44 | ||
45 | switch (peer->srx.transport.family) { | |
46 | case AF_INET: | |
47 | fl.oif = 0; | |
48 | fl.proto = IPPROTO_UDP, | |
49 | fl.nl_u.ip4_u.saddr = 0; | |
50 | fl.nl_u.ip4_u.daddr = peer->srx.transport.sin.sin_addr.s_addr; | |
51 | fl.nl_u.ip4_u.tos = 0; | |
52 | /* assume AFS.CM talking to AFS.FS */ | |
53 | fl.uli_u.ports.sport = htons(7001); | |
54 | fl.uli_u.ports.dport = htons(7000); | |
55 | break; | |
56 | default: | |
57 | BUG(); | |
58 | } | |
59 | ||
60 | ret = ip_route_output_key(&rt, &fl); | |
61 | if (ret < 0) { | |
a6a62b69 | 62 | _leave(" [route err %d]", ret); |
224711df DH |
63 | return; |
64 | } | |
65 | ||
66 | peer->if_mtu = dst_mtu(&rt->u.dst); | |
67 | dst_release(&rt->u.dst); | |
68 | ||
a6a62b69 | 69 | _leave(" [if_mtu %u]", peer->if_mtu); |
224711df DH |
70 | } |
71 | ||
17926a79 DH |
72 | /* |
73 | * allocate a new peer | |
74 | */ | |
75 | static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, | |
76 | gfp_t gfp) | |
77 | { | |
78 | struct rxrpc_peer *peer; | |
79 | ||
80 | _enter(""); | |
81 | ||
82 | peer = kzalloc(sizeof(struct rxrpc_peer), gfp); | |
83 | if (peer) { | |
84 | INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer); | |
85 | INIT_LIST_HEAD(&peer->link); | |
86 | INIT_LIST_HEAD(&peer->error_targets); | |
87 | spin_lock_init(&peer->lock); | |
88 | atomic_set(&peer->usage, 1); | |
89 | peer->debug_id = atomic_inc_return(&rxrpc_debug_id); | |
90 | memcpy(&peer->srx, srx, sizeof(*srx)); | |
91 | ||
224711df DH |
92 | rxrpc_assess_MTU_size(peer); |
93 | peer->mtu = peer->if_mtu; | |
17926a79 DH |
94 | |
95 | if (srx->transport.family == AF_INET) { | |
96 | peer->hdrsize = sizeof(struct iphdr); | |
97 | switch (srx->transport_type) { | |
98 | case SOCK_DGRAM: | |
99 | peer->hdrsize += sizeof(struct udphdr); | |
100 | break; | |
101 | default: | |
102 | BUG(); | |
103 | break; | |
104 | } | |
105 | } else { | |
106 | BUG(); | |
107 | } | |
108 | ||
109 | peer->hdrsize += sizeof(struct rxrpc_header); | |
110 | peer->maxdata = peer->mtu - peer->hdrsize; | |
111 | } | |
112 | ||
113 | _leave(" = %p", peer); | |
114 | return peer; | |
115 | } | |
116 | ||
117 | /* | |
118 | * obtain a remote transport endpoint for the specified address | |
119 | */ | |
120 | struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp) | |
121 | { | |
122 | struct rxrpc_peer *peer, *candidate; | |
123 | const char *new = "old"; | |
124 | int usage; | |
125 | ||
126 | _enter("{%d,%d,%u.%u.%u.%u+%hu}", | |
127 | srx->transport_type, | |
128 | srx->transport_len, | |
129 | NIPQUAD(srx->transport.sin.sin_addr), | |
130 | ntohs(srx->transport.sin.sin_port)); | |
131 | ||
132 | /* search the peer list first */ | |
133 | read_lock_bh(&rxrpc_peer_lock); | |
134 | list_for_each_entry(peer, &rxrpc_peers, link) { | |
135 | _debug("check PEER %d { u=%d t=%d l=%d }", | |
136 | peer->debug_id, | |
137 | atomic_read(&peer->usage), | |
138 | peer->srx.transport_type, | |
139 | peer->srx.transport_len); | |
140 | ||
141 | if (atomic_read(&peer->usage) > 0 && | |
142 | peer->srx.transport_type == srx->transport_type && | |
143 | peer->srx.transport_len == srx->transport_len && | |
144 | memcmp(&peer->srx.transport, | |
145 | &srx->transport, | |
146 | srx->transport_len) == 0) | |
147 | goto found_extant_peer; | |
148 | } | |
149 | read_unlock_bh(&rxrpc_peer_lock); | |
150 | ||
151 | /* not yet present - create a candidate for a new record and then | |
152 | * redo the search */ | |
153 | candidate = rxrpc_alloc_peer(srx, gfp); | |
154 | if (!candidate) { | |
155 | _leave(" = -ENOMEM"); | |
156 | return ERR_PTR(-ENOMEM); | |
157 | } | |
158 | ||
159 | write_lock_bh(&rxrpc_peer_lock); | |
160 | ||
161 | list_for_each_entry(peer, &rxrpc_peers, link) { | |
162 | if (atomic_read(&peer->usage) > 0 && | |
163 | peer->srx.transport_type == srx->transport_type && | |
164 | peer->srx.transport_len == srx->transport_len && | |
165 | memcmp(&peer->srx.transport, | |
166 | &srx->transport, | |
167 | srx->transport_len) == 0) | |
168 | goto found_extant_second; | |
169 | } | |
170 | ||
171 | /* we can now add the new candidate to the list */ | |
172 | peer = candidate; | |
173 | candidate = NULL; | |
174 | ||
175 | list_add_tail(&peer->link, &rxrpc_peers); | |
176 | write_unlock_bh(&rxrpc_peer_lock); | |
177 | new = "new"; | |
178 | ||
179 | success: | |
180 | _net("PEER %s %d {%d,%u,%u.%u.%u.%u+%hu}", | |
181 | new, | |
182 | peer->debug_id, | |
183 | peer->srx.transport_type, | |
184 | peer->srx.transport.family, | |
185 | NIPQUAD(peer->srx.transport.sin.sin_addr), | |
186 | ntohs(peer->srx.transport.sin.sin_port)); | |
187 | ||
188 | _leave(" = %p {u=%d}", peer, atomic_read(&peer->usage)); | |
189 | return peer; | |
190 | ||
191 | /* we found the peer in the list immediately */ | |
192 | found_extant_peer: | |
193 | usage = atomic_inc_return(&peer->usage); | |
194 | read_unlock_bh(&rxrpc_peer_lock); | |
195 | goto success; | |
196 | ||
197 | /* we found the peer on the second time through the list */ | |
198 | found_extant_second: | |
199 | usage = atomic_inc_return(&peer->usage); | |
200 | write_unlock_bh(&rxrpc_peer_lock); | |
201 | kfree(candidate); | |
202 | goto success; | |
203 | } | |
204 | ||
205 | /* | |
206 | * find the peer associated with a packet | |
207 | */ | |
208 | struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local, | |
209 | __be32 addr, __be16 port) | |
210 | { | |
211 | struct rxrpc_peer *peer; | |
212 | ||
213 | _enter(""); | |
214 | ||
215 | /* search the peer list */ | |
216 | read_lock_bh(&rxrpc_peer_lock); | |
217 | ||
218 | if (local->srx.transport.family == AF_INET && | |
219 | local->srx.transport_type == SOCK_DGRAM | |
220 | ) { | |
221 | list_for_each_entry(peer, &rxrpc_peers, link) { | |
222 | if (atomic_read(&peer->usage) > 0 && | |
223 | peer->srx.transport_type == SOCK_DGRAM && | |
224 | peer->srx.transport.family == AF_INET && | |
225 | peer->srx.transport.sin.sin_port == port && | |
226 | peer->srx.transport.sin.sin_addr.s_addr == addr) | |
227 | goto found_UDP_peer; | |
228 | } | |
229 | ||
230 | goto new_UDP_peer; | |
231 | } | |
232 | ||
233 | read_unlock_bh(&rxrpc_peer_lock); | |
234 | _leave(" = -EAFNOSUPPORT"); | |
235 | return ERR_PTR(-EAFNOSUPPORT); | |
236 | ||
237 | found_UDP_peer: | |
238 | _net("Rx UDP DGRAM from peer %d", peer->debug_id); | |
239 | atomic_inc(&peer->usage); | |
240 | read_unlock_bh(&rxrpc_peer_lock); | |
241 | _leave(" = %p", peer); | |
242 | return peer; | |
243 | ||
244 | new_UDP_peer: | |
245 | _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); | |
246 | read_unlock_bh(&rxrpc_peer_lock); | |
247 | _leave(" = -EBUSY [new]"); | |
248 | return ERR_PTR(-EBUSY); | |
249 | } | |
250 | ||
251 | /* | |
252 | * release a remote transport endpoint | |
253 | */ | |
254 | void rxrpc_put_peer(struct rxrpc_peer *peer) | |
255 | { | |
256 | _enter("%p{u=%d}", peer, atomic_read(&peer->usage)); | |
257 | ||
258 | ASSERTCMP(atomic_read(&peer->usage), >, 0); | |
259 | ||
260 | if (likely(!atomic_dec_and_test(&peer->usage))) { | |
261 | _leave(" [in use]"); | |
262 | return; | |
263 | } | |
264 | ||
651350d1 | 265 | rxrpc_queue_work(&peer->destroyer); |
17926a79 DH |
266 | _leave(""); |
267 | } | |
268 | ||
269 | /* | |
270 | * destroy a remote transport endpoint | |
271 | */ | |
272 | static void rxrpc_destroy_peer(struct work_struct *work) | |
273 | { | |
274 | struct rxrpc_peer *peer = | |
275 | container_of(work, struct rxrpc_peer, destroyer); | |
276 | ||
277 | _enter("%p{%d}", peer, atomic_read(&peer->usage)); | |
278 | ||
279 | write_lock_bh(&rxrpc_peer_lock); | |
280 | list_del(&peer->link); | |
281 | write_unlock_bh(&rxrpc_peer_lock); | |
282 | ||
283 | _net("DESTROY PEER %d", peer->debug_id); | |
284 | kfree(peer); | |
285 | ||
286 | if (list_empty(&rxrpc_peers)) | |
287 | wake_up_all(&rxrpc_peer_wq); | |
288 | _leave(""); | |
289 | } | |
290 | ||
291 | /* | |
292 | * preemptively destroy all the peer records from a transport endpoint rather | |
293 | * than waiting for them to time out | |
294 | */ | |
295 | void __exit rxrpc_destroy_all_peers(void) | |
296 | { | |
297 | DECLARE_WAITQUEUE(myself,current); | |
298 | ||
299 | _enter(""); | |
300 | ||
301 | /* we simply have to wait for them to go away */ | |
302 | if (!list_empty(&rxrpc_peers)) { | |
303 | set_current_state(TASK_UNINTERRUPTIBLE); | |
304 | add_wait_queue(&rxrpc_peer_wq, &myself); | |
305 | ||
306 | while (!list_empty(&rxrpc_peers)) { | |
307 | schedule(); | |
308 | set_current_state(TASK_UNINTERRUPTIBLE); | |
309 | } | |
310 | ||
311 | remove_wait_queue(&rxrpc_peer_wq, &myself); | |
312 | set_current_state(TASK_RUNNING); | |
313 | } | |
314 | ||
315 | _leave(""); | |
316 | } |