]>
Commit | Line | Data |
---|---|---|
f66d7490 | 1 | /* Peer event handling, typically ICMP messages. |
17926a79 DH |
2 | * |
3 | * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. | |
4 | * Written by David Howells ([email protected]) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License | |
8 | * as published by the Free Software Foundation; either version | |
9 | * 2 of the License, or (at your option) any later version. | |
10 | */ | |
11 | ||
12 | #include <linux/module.h> | |
13 | #include <linux/net.h> | |
14 | #include <linux/skbuff.h> | |
15 | #include <linux/errqueue.h> | |
16 | #include <linux/udp.h> | |
17 | #include <linux/in.h> | |
18 | #include <linux/in6.h> | |
19 | #include <linux/icmp.h> | |
20 | #include <net/sock.h> | |
21 | #include <net/af_rxrpc.h> | |
22 | #include <net/ip.h> | |
23 | #include "ar-internal.h" | |
24 | ||
f66d7490 | 25 | static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *); |
f3344303 DH |
26 | static void rxrpc_distribute_error(struct rxrpc_peer *, int, |
27 | enum rxrpc_call_completion); | |
f66d7490 | 28 | |
be6e6707 DH |
29 | /* |
30 | * Find the peer associated with an ICMP packet. | |
31 | */ | |
32 | static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local, | |
494337c9 DH |
33 | const struct sk_buff *skb, |
34 | struct sockaddr_rxrpc *srx) | |
be6e6707 DH |
35 | { |
36 | struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); | |
be6e6707 DH |
37 | |
38 | _enter(""); | |
39 | ||
494337c9 DH |
40 | memset(srx, 0, sizeof(*srx)); |
41 | srx->transport_type = local->srx.transport_type; | |
42 | srx->transport_len = local->srx.transport_len; | |
43 | srx->transport.family = local->srx.transport.family; | |
be6e6707 DH |
44 | |
45 | /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice | |
46 | * versa? | |
47 | */ | |
494337c9 | 48 | switch (srx->transport.family) { |
be6e6707 | 49 | case AF_INET: |
46894a13 DH |
50 | srx->transport_len = sizeof(srx->transport.sin); |
51 | srx->transport.family = AF_INET; | |
494337c9 | 52 | srx->transport.sin.sin_port = serr->port; |
be6e6707 DH |
53 | switch (serr->ee.ee_origin) { |
54 | case SO_EE_ORIGIN_ICMP: | |
55 | _net("Rx ICMP"); | |
494337c9 | 56 | memcpy(&srx->transport.sin.sin_addr, |
be6e6707 DH |
57 | skb_network_header(skb) + serr->addr_offset, |
58 | sizeof(struct in_addr)); | |
59 | break; | |
60 | case SO_EE_ORIGIN_ICMP6: | |
61 | _net("Rx ICMP6 on v4 sock"); | |
494337c9 | 62 | memcpy(&srx->transport.sin.sin_addr, |
be6e6707 DH |
63 | skb_network_header(skb) + serr->addr_offset + 12, |
64 | sizeof(struct in_addr)); | |
65 | break; | |
66 | default: | |
494337c9 | 67 | memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr, |
be6e6707 DH |
68 | sizeof(struct in_addr)); |
69 | break; | |
70 | } | |
71 | break; | |
72 | ||
d1912747 | 73 | #ifdef CONFIG_AF_RXRPC_IPV6 |
75b54cb5 | 74 | case AF_INET6: |
75b54cb5 DH |
75 | switch (serr->ee.ee_origin) { |
76 | case SO_EE_ORIGIN_ICMP6: | |
77 | _net("Rx ICMP6"); | |
46894a13 | 78 | srx->transport.sin6.sin6_port = serr->port; |
494337c9 | 79 | memcpy(&srx->transport.sin6.sin6_addr, |
75b54cb5 DH |
80 | skb_network_header(skb) + serr->addr_offset, |
81 | sizeof(struct in6_addr)); | |
82 | break; | |
83 | case SO_EE_ORIGIN_ICMP: | |
84 | _net("Rx ICMP on v6 sock"); | |
46894a13 DH |
85 | srx->transport_len = sizeof(srx->transport.sin); |
86 | srx->transport.family = AF_INET; | |
87 | srx->transport.sin.sin_port = serr->port; | |
88 | memcpy(&srx->transport.sin.sin_addr, | |
75b54cb5 DH |
89 | skb_network_header(skb) + serr->addr_offset, |
90 | sizeof(struct in_addr)); | |
91 | break; | |
92 | default: | |
494337c9 | 93 | memcpy(&srx->transport.sin6.sin6_addr, |
75b54cb5 DH |
94 | &ipv6_hdr(skb)->saddr, |
95 | sizeof(struct in6_addr)); | |
96 | break; | |
97 | } | |
98 | break; | |
d1912747 | 99 | #endif |
75b54cb5 | 100 | |
be6e6707 DH |
101 | default: |
102 | BUG(); | |
103 | } | |
104 | ||
494337c9 | 105 | return rxrpc_lookup_peer_rcu(local, srx); |
be6e6707 DH |
106 | } |
107 | ||
1a70c05b DH |
108 | /* |
109 | * Handle an MTU/fragmentation problem. | |
110 | */ | |
111 | static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr) | |
112 | { | |
113 | u32 mtu = serr->ee.ee_info; | |
114 | ||
115 | _net("Rx ICMP Fragmentation Needed (%d)", mtu); | |
116 | ||
117 | /* wind down the local interface MTU */ | |
118 | if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) { | |
119 | peer->if_mtu = mtu; | |
120 | _net("I/F MTU %u", mtu); | |
121 | } | |
122 | ||
123 | if (mtu == 0) { | |
124 | /* they didn't give us a size, estimate one */ | |
125 | mtu = peer->if_mtu; | |
126 | if (mtu > 1500) { | |
127 | mtu >>= 1; | |
128 | if (mtu < 1500) | |
129 | mtu = 1500; | |
130 | } else { | |
131 | mtu -= 100; | |
132 | if (mtu < peer->hdrsize) | |
133 | mtu = peer->hdrsize + 4; | |
134 | } | |
135 | } | |
136 | ||
137 | if (mtu < peer->mtu) { | |
138 | spin_lock_bh(&peer->lock); | |
139 | peer->mtu = mtu; | |
140 | peer->maxdata = peer->mtu - peer->hdrsize; | |
141 | spin_unlock_bh(&peer->lock); | |
142 | _net("Net MTU %u (maxdata %u)", | |
143 | peer->mtu, peer->maxdata); | |
144 | } | |
145 | } | |
146 | ||
17926a79 | 147 | /* |
f66d7490 | 148 | * Handle an error received on the local endpoint. |
17926a79 | 149 | */ |
abe89ef0 | 150 | void rxrpc_error_report(struct sock *sk) |
17926a79 DH |
151 | { |
152 | struct sock_exterr_skb *serr; | |
494337c9 | 153 | struct sockaddr_rxrpc srx; |
17926a79 DH |
154 | struct rxrpc_local *local = sk->sk_user_data; |
155 | struct rxrpc_peer *peer; | |
156 | struct sk_buff *skb; | |
17926a79 DH |
157 | |
158 | _enter("%p{%d}", sk, local->debug_id); | |
159 | ||
364a9e93 | 160 | skb = sock_dequeue_err_skb(sk); |
17926a79 DH |
161 | if (!skb) { |
162 | _leave("UDP socket errqueue empty"); | |
163 | return; | |
164 | } | |
71f3ca40 | 165 | rxrpc_new_skb(skb, rxrpc_skb_rx_received); |
c247f053 WB |
166 | serr = SKB_EXT_ERR(skb); |
167 | if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { | |
49ca0d8b | 168 | _leave("UDP empty message"); |
71f3ca40 | 169 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
49ca0d8b WB |
170 | return; |
171 | } | |
17926a79 | 172 | |
be6e6707 | 173 | rcu_read_lock(); |
494337c9 | 174 | peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx); |
be6e6707 DH |
175 | if (peer && !rxrpc_get_peer_maybe(peer)) |
176 | peer = NULL; | |
177 | if (!peer) { | |
178 | rcu_read_unlock(); | |
71f3ca40 | 179 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
17926a79 DH |
180 | _leave(" [no peer]"); |
181 | return; | |
182 | } | |
183 | ||
494337c9 DH |
184 | trace_rxrpc_rx_icmp(peer, &serr->ee, &srx); |
185 | ||
1a70c05b DH |
186 | if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP && |
187 | serr->ee.ee_type == ICMP_DEST_UNREACH && | |
188 | serr->ee.ee_code == ICMP_FRAG_NEEDED)) { | |
189 | rxrpc_adjust_mtu(peer, serr); | |
f66d7490 | 190 | rcu_read_unlock(); |
71f3ca40 | 191 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
f66d7490 DH |
192 | rxrpc_put_peer(peer); |
193 | _leave(" [MTU update]"); | |
194 | return; | |
17926a79 DH |
195 | } |
196 | ||
f66d7490 | 197 | rxrpc_store_error(peer, serr); |
be6e6707 | 198 | rcu_read_unlock(); |
71f3ca40 | 199 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
1890fea7 | 200 | rxrpc_put_peer(peer); |
17926a79 | 201 | |
17926a79 DH |
202 | _leave(""); |
203 | } | |
204 | ||
205 | /* | |
f66d7490 | 206 | * Map an error report to error codes on the peer record. |
17926a79 | 207 | */ |
f66d7490 DH |
208 | static void rxrpc_store_error(struct rxrpc_peer *peer, |
209 | struct sock_exterr_skb *serr) | |
17926a79 | 210 | { |
f3344303 | 211 | enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR; |
17926a79 | 212 | struct sock_extended_err *ee; |
c9d10c49 | 213 | int err; |
17926a79 DH |
214 | |
215 | _enter(""); | |
216 | ||
17926a79 DH |
217 | ee = &serr->ee; |
218 | ||
17926a79 DH |
219 | err = ee->ee_errno; |
220 | ||
221 | switch (ee->ee_origin) { | |
222 | case SO_EE_ORIGIN_ICMP: | |
17926a79 DH |
223 | switch (ee->ee_type) { |
224 | case ICMP_DEST_UNREACH: | |
225 | switch (ee->ee_code) { | |
226 | case ICMP_NET_UNREACH: | |
227 | _net("Rx Received ICMP Network Unreachable"); | |
17926a79 DH |
228 | break; |
229 | case ICMP_HOST_UNREACH: | |
230 | _net("Rx Received ICMP Host Unreachable"); | |
17926a79 DH |
231 | break; |
232 | case ICMP_PORT_UNREACH: | |
233 | _net("Rx Received ICMP Port Unreachable"); | |
17926a79 DH |
234 | break; |
235 | case ICMP_NET_UNKNOWN: | |
236 | _net("Rx Received ICMP Unknown Network"); | |
17926a79 DH |
237 | break; |
238 | case ICMP_HOST_UNKNOWN: | |
239 | _net("Rx Received ICMP Unknown Host"); | |
17926a79 DH |
240 | break; |
241 | default: | |
242 | _net("Rx Received ICMP DestUnreach code=%u", | |
243 | ee->ee_code); | |
244 | break; | |
245 | } | |
246 | break; | |
247 | ||
248 | case ICMP_TIME_EXCEEDED: | |
249 | _net("Rx Received ICMP TTL Exceeded"); | |
250 | break; | |
251 | ||
252 | default: | |
253 | _proto("Rx Received ICMP error { type=%u code=%u }", | |
254 | ee->ee_type, ee->ee_code); | |
255 | break; | |
256 | } | |
257 | break; | |
258 | ||
f66d7490 | 259 | case SO_EE_ORIGIN_NONE: |
17926a79 | 260 | case SO_EE_ORIGIN_LOCAL: |
fe77d5fc | 261 | _proto("Rx Received local error { error=%d }", err); |
f3344303 | 262 | compl = RXRPC_CALL_LOCAL_ERROR; |
17926a79 DH |
263 | break; |
264 | ||
17926a79 DH |
265 | case SO_EE_ORIGIN_ICMP6: |
266 | default: | |
fe77d5fc | 267 | _proto("Rx Received error report { orig=%u }", ee->ee_origin); |
17926a79 DH |
268 | break; |
269 | } | |
270 | ||
f3344303 | 271 | rxrpc_distribute_error(peer, err, compl); |
f66d7490 DH |
272 | } |
273 | ||
274 | /* | |
f3344303 | 275 | * Distribute an error that occurred on a peer. |
f66d7490 | 276 | */ |
f3344303 DH |
277 | static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error, |
278 | enum rxrpc_call_completion compl) | |
f66d7490 | 279 | { |
f66d7490 | 280 | struct rxrpc_call *call; |
17926a79 | 281 | |
f3344303 | 282 | hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) { |
e34d4234 | 283 | rxrpc_see_call(call); |
f3344303 DH |
284 | if (call->state < RXRPC_CALL_COMPLETE && |
285 | rxrpc_set_call_completion(call, compl, 0, -error)) | |
248f219c | 286 | rxrpc_notify_socket(call); |
17926a79 | 287 | } |
17926a79 | 288 | } |
cf1a6474 DH |
289 | |
290 | /* | |
291 | * Add RTT information to cache. This is called in softirq mode and has | |
292 | * exclusive access to the peer RTT data. | |
293 | */ | |
294 | void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, | |
295 | rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, | |
296 | ktime_t send_time, ktime_t resp_time) | |
297 | { | |
298 | struct rxrpc_peer *peer = call->peer; | |
299 | s64 rtt; | |
300 | u64 sum = peer->rtt_sum, avg; | |
301 | u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage; | |
302 | ||
303 | rtt = ktime_to_ns(ktime_sub(resp_time, send_time)); | |
304 | if (rtt < 0) | |
305 | return; | |
306 | ||
c1e15b49 DH |
307 | spin_lock(&peer->rtt_input_lock); |
308 | ||
cf1a6474 DH |
309 | /* Replace the oldest datum in the RTT buffer */ |
310 | sum -= peer->rtt_cache[cursor]; | |
311 | sum += rtt; | |
312 | peer->rtt_cache[cursor] = rtt; | |
313 | peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1); | |
314 | peer->rtt_sum = sum; | |
315 | if (usage < RXRPC_RTT_CACHE_SIZE) { | |
316 | usage++; | |
317 | peer->rtt_usage = usage; | |
318 | } | |
319 | ||
c1e15b49 DH |
320 | spin_unlock(&peer->rtt_input_lock); |
321 | ||
cf1a6474 DH |
322 | /* Now recalculate the average */ |
323 | if (usage == RXRPC_RTT_CACHE_SIZE) { | |
324 | avg = sum / RXRPC_RTT_CACHE_SIZE; | |
325 | } else { | |
326 | avg = sum; | |
327 | do_div(avg, usage); | |
328 | } | |
329 | ||
c1e15b49 | 330 | /* Don't need to update this under lock */ |
cf1a6474 DH |
331 | peer->rtt = avg; |
332 | trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, | |
333 | usage, avg); | |
334 | } | |
ace45bec DH |
335 | |
336 | /* | |
330bdcfa | 337 | * Perform keep-alive pings. |
ace45bec | 338 | */ |
330bdcfa DH |
339 | static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet, |
340 | struct list_head *collector, | |
341 | time64_t base, | |
342 | u8 cursor) | |
ace45bec | 343 | { |
ace45bec | 344 | struct rxrpc_peer *peer; |
330bdcfa DH |
345 | const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1; |
346 | time64_t keepalive_at; | |
347 | int slot; | |
ace45bec | 348 | |
330bdcfa | 349 | spin_lock_bh(&rxnet->peer_hash_lock); |
ace45bec | 350 | |
330bdcfa DH |
351 | while (!list_empty(collector)) { |
352 | peer = list_entry(collector->next, | |
353 | struct rxrpc_peer, keepalive_link); | |
ace45bec | 354 | |
330bdcfa DH |
355 | list_del_init(&peer->keepalive_link); |
356 | if (!rxrpc_get_peer_maybe(peer)) | |
357 | continue; | |
ace45bec | 358 | |
ace45bec | 359 | spin_unlock_bh(&rxnet->peer_hash_lock); |
ace45bec | 360 | |
330bdcfa DH |
361 | keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME; |
362 | slot = keepalive_at - base; | |
363 | _debug("%02x peer %u t=%d {%pISp}", | |
364 | cursor, peer->debug_id, slot, &peer->srx.transport); | |
365 | ||
366 | if (keepalive_at <= base || | |
367 | keepalive_at > base + RXRPC_KEEPALIVE_TIME) { | |
368 | rxrpc_send_keepalive(peer); | |
369 | slot = RXRPC_KEEPALIVE_TIME; | |
ace45bec DH |
370 | } |
371 | ||
330bdcfa DH |
372 | /* A transmission to this peer occurred since last we examined |
373 | * it so put it into the appropriate future bucket. | |
374 | */ | |
375 | slot += cursor; | |
376 | slot &= mask; | |
377 | spin_lock_bh(&rxnet->peer_hash_lock); | |
378 | list_add_tail(&peer->keepalive_link, | |
379 | &rxnet->peer_keepalive[slot & mask]); | |
380 | rxrpc_put_peer(peer); | |
ace45bec DH |
381 | } |
382 | ||
ace45bec | 383 | spin_unlock_bh(&rxnet->peer_hash_lock); |
330bdcfa | 384 | } |
ace45bec | 385 | |
330bdcfa DH |
386 | /* |
387 | * Perform keep-alive pings with VERSION packets to keep any NAT alive. | |
388 | */ | |
389 | void rxrpc_peer_keepalive_worker(struct work_struct *work) | |
390 | { | |
391 | struct rxrpc_net *rxnet = | |
392 | container_of(work, struct rxrpc_net, peer_keepalive_work); | |
393 | const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1; | |
394 | time64_t base, now, delay; | |
395 | u8 cursor, stop; | |
396 | LIST_HEAD(collector); | |
ace45bec | 397 | |
330bdcfa DH |
398 | now = ktime_get_seconds(); |
399 | base = rxnet->peer_keepalive_base; | |
400 | cursor = rxnet->peer_keepalive_cursor; | |
401 | _enter("%lld,%u", base - now, cursor); | |
ace45bec | 402 | |
330bdcfa DH |
403 | if (!rxnet->live) |
404 | return; | |
ace45bec | 405 | |
330bdcfa DH |
406 | /* Remove to a temporary list all the peers that are currently lodged |
407 | * in expired buckets plus all new peers. | |
408 | * | |
409 | * Everything in the bucket at the cursor is processed this | |
410 | * second; the bucket at cursor + 1 goes at now + 1s and so | |
411 | * on... | |
ace45bec | 412 | */ |
ace45bec | 413 | spin_lock_bh(&rxnet->peer_hash_lock); |
330bdcfa DH |
414 | list_splice_init(&rxnet->peer_keepalive_new, &collector); |
415 | ||
416 | stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive); | |
417 | while (base <= now && (s8)(cursor - stop) < 0) { | |
418 | list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask], | |
419 | &collector); | |
420 | base++; | |
421 | cursor++; | |
422 | } | |
ace45bec | 423 | |
330bdcfa DH |
424 | base = now; |
425 | spin_unlock_bh(&rxnet->peer_hash_lock); | |
ace45bec | 426 | |
ace45bec DH |
427 | rxnet->peer_keepalive_base = base; |
428 | rxnet->peer_keepalive_cursor = cursor; | |
330bdcfa DH |
429 | rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor); |
430 | ASSERT(list_empty(&collector)); | |
431 | ||
432 | /* Schedule the timer for the next occupied timeslot. */ | |
433 | cursor = rxnet->peer_keepalive_cursor; | |
434 | stop = cursor + RXRPC_KEEPALIVE_TIME - 1; | |
435 | for (; (s8)(cursor - stop) < 0; cursor++) { | |
436 | if (!list_empty(&rxnet->peer_keepalive[cursor & mask])) | |
437 | break; | |
438 | base++; | |
439 | } | |
440 | ||
441 | now = ktime_get_seconds(); | |
442 | delay = base - now; | |
443 | if (delay < 1) | |
444 | delay = 1; | |
445 | delay *= HZ; | |
446 | if (rxnet->live) | |
447 | timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay); | |
448 | ||
ace45bec DH |
449 | _leave(""); |
450 | } |