]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * The User Datagram Protocol (UDP). | |
7 | * | |
8 | * Version: $Id: udp.c,v 1.102 2002/02/01 22:01:04 davem Exp $ | |
9 | * | |
02c30a84 | 10 | * Authors: Ross Biro |
1da177e4 LT |
11 | * Fred N. van Kempen, <[email protected]> |
12 | * Arnt Gulbrandsen, <[email protected]> | |
13 | * Alan Cox, <[email protected]> | |
14 | * Hirokazu Takahashi, <[email protected]> | |
15 | * | |
16 | * Fixes: | |
17 | * Alan Cox : verify_area() calls | |
18 | * Alan Cox : stopped close while in use off icmp | |
19 | * messages. Not a fix but a botch that | |
20 | * for udp at least is 'valid'. | |
21 | * Alan Cox : Fixed icmp handling properly | |
22 | * Alan Cox : Correct error for oversized datagrams | |
23 | * Alan Cox : Tidied select() semantics. | |
24 | * Alan Cox : udp_err() fixed properly, also now | |
25 | * select and read wake correctly on errors | |
26 | * Alan Cox : udp_send verify_area moved to avoid mem leak | |
27 | * Alan Cox : UDP can count its memory | |
28 | * Alan Cox : send to an unknown connection causes | |
29 | * an ECONNREFUSED off the icmp, but | |
30 | * does NOT close. | |
31 | * Alan Cox : Switched to new sk_buff handlers. No more backlog! | |
32 | * Alan Cox : Using generic datagram code. Even smaller and the PEEK | |
33 | * bug no longer crashes it. | |
34 | * Fred Van Kempen : Net2e support for sk->broadcast. | |
35 | * Alan Cox : Uses skb_free_datagram | |
36 | * Alan Cox : Added get/set sockopt support. | |
37 | * Alan Cox : Broadcasting without option set returns EACCES. | |
38 | * Alan Cox : No wakeup calls. Instead we now use the callbacks. | |
39 | * Alan Cox : Use ip_tos and ip_ttl | |
40 | * Alan Cox : SNMP Mibs | |
41 | * Alan Cox : MSG_DONTROUTE, and 0.0.0.0 support. | |
42 | * Matt Dillon : UDP length checks. | |
43 | * Alan Cox : Smarter af_inet used properly. | |
44 | * Alan Cox : Use new kernel side addressing. | |
45 | * Alan Cox : Incorrect return on truncated datagram receive. | |
46 | * Arnt Gulbrandsen : New udp_send and stuff | |
47 | * Alan Cox : Cache last socket | |
48 | * Alan Cox : Route cache | |
49 | * Jon Peatfield : Minor efficiency fix to sendto(). | |
50 | * Mike Shaver : RFC1122 checks. | |
51 | * Alan Cox : Nonblocking error fix. | |
52 | * Willy Konynenberg : Transparent proxying support. | |
53 | * Mike McLagan : Routing by source | |
54 | * David S. Miller : New socket lookup architecture. | |
55 | * Last socket cache retained as it | |
56 | * does have a high hit rate. | |
57 | * Olaf Kirch : Don't linearise iovec on sendmsg. | |
58 | * Andi Kleen : Some cleanups, cache destination entry | |
59 | * for connect. | |
60 | * Vitaly E. Lavrov : Transparent proxy revived after year coma. | |
61 | * Melvin Smith : Check msg_name not msg_namelen in sendto(), | |
62 | * return ENOTCONN for unconnected sockets (POSIX) | |
63 | * Janos Farkas : don't deliver multi/broadcasts to a different | |
64 | * bound-to-device socket | |
65 | * Hirokazu Takahashi : HW checksumming for outgoing UDP | |
66 | * datagrams. | |
67 | * Hirokazu Takahashi : sendfile() on UDP works now. | |
68 | * Arnaldo C. Melo : convert /proc/net/udp to seq_file | |
69 | * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which | |
70 | * Alexey Kuznetsov: allow both IPv4 and IPv6 sockets to bind | |
71 | * a single port at the same time. | |
72 | * Derek Atkins <[email protected]>: Add Encapulation Support | |
73 | * | |
74 | * | |
75 | * This program is free software; you can redistribute it and/or | |
76 | * modify it under the terms of the GNU General Public License | |
77 | * as published by the Free Software Foundation; either version | |
78 | * 2 of the License, or (at your option) any later version. | |
79 | */ | |
80 | ||
81 | #include <asm/system.h> | |
82 | #include <asm/uaccess.h> | |
83 | #include <asm/ioctls.h> | |
84 | #include <linux/types.h> | |
85 | #include <linux/fcntl.h> | |
86 | #include <linux/module.h> | |
87 | #include <linux/socket.h> | |
88 | #include <linux/sockios.h> | |
14c85021 | 89 | #include <linux/igmp.h> |
1da177e4 LT |
90 | #include <linux/in.h> |
91 | #include <linux/errno.h> | |
92 | #include <linux/timer.h> | |
93 | #include <linux/mm.h> | |
1da177e4 LT |
94 | #include <linux/inet.h> |
95 | #include <linux/ipv6.h> | |
96 | #include <linux/netdevice.h> | |
97 | #include <net/snmp.h> | |
c752f073 ACM |
98 | #include <net/ip.h> |
99 | #include <net/tcp_states.h> | |
1da177e4 LT |
100 | #include <net/protocol.h> |
101 | #include <linux/skbuff.h> | |
102 | #include <linux/proc_fs.h> | |
103 | #include <linux/seq_file.h> | |
104 | #include <net/sock.h> | |
105 | #include <net/udp.h> | |
106 | #include <net/icmp.h> | |
107 | #include <net/route.h> | |
108 | #include <net/inet_common.h> | |
109 | #include <net/checksum.h> | |
110 | #include <net/xfrm.h> | |
111 | ||
112 | /* | |
113 | * Snmp MIB for the UDP layer | |
114 | */ | |
115 | ||
ba89966c | 116 | DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly; |
1da177e4 LT |
117 | |
118 | struct hlist_head udp_hash[UDP_HTABLE_SIZE]; | |
119 | DEFINE_RWLOCK(udp_hash_lock); | |
120 | ||
bed53ea7 | 121 | static int udp_port_rover; |
1da177e4 | 122 | |
25030a7f | 123 | static inline int udp_lport_inuse(u16 num) |
1da177e4 | 124 | { |
25030a7f | 125 | struct sock *sk; |
1da177e4 | 126 | struct hlist_node *node; |
25030a7f GR |
127 | |
128 | sk_for_each(sk, node, &udp_hash[num & (UDP_HTABLE_SIZE - 1)]) | |
129 | if (inet_sk(sk)->num == num) | |
130 | return 1; | |
131 | return 0; | |
132 | } | |
133 | ||
134 | /** | |
135 | * udp_get_port - common port lookup for IPv4 and IPv6 | |
136 | * | |
137 | * @sk: socket struct in question | |
138 | * @snum: port number to look up | |
139 | * @saddr_comp: AF-dependent comparison of bound local IP addresses | |
140 | */ | |
141 | int udp_get_port(struct sock *sk, unsigned short snum, | |
e3b4eadb | 142 | int (*saddr_cmp)(const struct sock *sk1, const struct sock *sk2)) |
25030a7f GR |
143 | { |
144 | struct hlist_node *node; | |
145 | struct hlist_head *head; | |
1da177e4 | 146 | struct sock *sk2; |
25030a7f | 147 | int error = 1; |
1da177e4 LT |
148 | |
149 | write_lock_bh(&udp_hash_lock); | |
150 | if (snum == 0) { | |
151 | int best_size_so_far, best, result, i; | |
152 | ||
153 | if (udp_port_rover > sysctl_local_port_range[1] || | |
154 | udp_port_rover < sysctl_local_port_range[0]) | |
155 | udp_port_rover = sysctl_local_port_range[0]; | |
156 | best_size_so_far = 32767; | |
157 | best = result = udp_port_rover; | |
158 | for (i = 0; i < UDP_HTABLE_SIZE; i++, result++) { | |
1da177e4 LT |
159 | int size; |
160 | ||
25030a7f GR |
161 | head = &udp_hash[result & (UDP_HTABLE_SIZE - 1)]; |
162 | if (hlist_empty(head)) { | |
1da177e4 LT |
163 | if (result > sysctl_local_port_range[1]) |
164 | result = sysctl_local_port_range[0] + | |
165 | ((result - sysctl_local_port_range[0]) & | |
166 | (UDP_HTABLE_SIZE - 1)); | |
167 | goto gotit; | |
168 | } | |
169 | size = 0; | |
25030a7f GR |
170 | sk_for_each(sk2, node, head) |
171 | if (++size < best_size_so_far) { | |
172 | best_size_so_far = size; | |
173 | best = result; | |
174 | } | |
1da177e4 LT |
175 | } |
176 | result = best; | |
177 | for(i = 0; i < (1 << 16) / UDP_HTABLE_SIZE; i++, result += UDP_HTABLE_SIZE) { | |
178 | if (result > sysctl_local_port_range[1]) | |
179 | result = sysctl_local_port_range[0] | |
180 | + ((result - sysctl_local_port_range[0]) & | |
181 | (UDP_HTABLE_SIZE - 1)); | |
182 | if (!udp_lport_inuse(result)) | |
183 | break; | |
184 | } | |
185 | if (i >= (1 << 16) / UDP_HTABLE_SIZE) | |
186 | goto fail; | |
187 | gotit: | |
188 | udp_port_rover = snum = result; | |
189 | } else { | |
25030a7f GR |
190 | head = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; |
191 | ||
192 | sk_for_each(sk2, node, head) | |
193 | if (inet_sk(sk2)->num == snum && | |
194 | sk2 != sk && | |
195 | (!sk2->sk_reuse || !sk->sk_reuse) && | |
196 | (!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if | |
197 | || sk2->sk_bound_dev_if == sk->sk_bound_dev_if) && | |
198 | (*saddr_cmp)(sk, sk2) ) | |
1da177e4 | 199 | goto fail; |
1da177e4 | 200 | } |
25030a7f | 201 | inet_sk(sk)->num = snum; |
1da177e4 | 202 | if (sk_unhashed(sk)) { |
25030a7f GR |
203 | head = &udp_hash[snum & (UDP_HTABLE_SIZE - 1)]; |
204 | sk_add_node(sk, head); | |
1da177e4 LT |
205 | sock_prot_inc_use(sk->sk_prot); |
206 | } | |
25030a7f | 207 | error = 0; |
1da177e4 LT |
208 | fail: |
209 | write_unlock_bh(&udp_hash_lock); | |
25030a7f GR |
210 | return error; |
211 | } | |
212 | ||
e3b4eadb | 213 | static inline int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2) |
25030a7f GR |
214 | { |
215 | struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2); | |
216 | ||
217 | return ( !ipv6_only_sock(sk2) && | |
218 | (!inet1->rcv_saddr || !inet2->rcv_saddr || | |
219 | inet1->rcv_saddr == inet2->rcv_saddr )); | |
220 | } | |
221 | ||
222 | static inline int udp_v4_get_port(struct sock *sk, unsigned short snum) | |
223 | { | |
224 | return udp_get_port(sk, snum, ipv4_rcv_saddr_equal); | |
1da177e4 LT |
225 | } |
226 | ||
25030a7f | 227 | |
1da177e4 LT |
228 | static void udp_v4_hash(struct sock *sk) |
229 | { | |
230 | BUG(); | |
231 | } | |
232 | ||
233 | static void udp_v4_unhash(struct sock *sk) | |
234 | { | |
235 | write_lock_bh(&udp_hash_lock); | |
236 | if (sk_del_node_init(sk)) { | |
237 | inet_sk(sk)->num = 0; | |
238 | sock_prot_dec_use(sk->sk_prot); | |
239 | } | |
240 | write_unlock_bh(&udp_hash_lock); | |
241 | } | |
242 | ||
243 | /* UDP is nearly always wildcards out the wazoo, it makes no sense to try | |
244 | * harder than this. -DaveM | |
245 | */ | |
734ab87f AV |
246 | static struct sock *udp_v4_lookup_longway(__be32 saddr, __be16 sport, |
247 | __be32 daddr, __be16 dport, int dif) | |
1da177e4 LT |
248 | { |
249 | struct sock *sk, *result = NULL; | |
250 | struct hlist_node *node; | |
251 | unsigned short hnum = ntohs(dport); | |
252 | int badness = -1; | |
253 | ||
254 | sk_for_each(sk, node, &udp_hash[hnum & (UDP_HTABLE_SIZE - 1)]) { | |
255 | struct inet_sock *inet = inet_sk(sk); | |
256 | ||
257 | if (inet->num == hnum && !ipv6_only_sock(sk)) { | |
258 | int score = (sk->sk_family == PF_INET ? 1 : 0); | |
259 | if (inet->rcv_saddr) { | |
260 | if (inet->rcv_saddr != daddr) | |
261 | continue; | |
262 | score+=2; | |
263 | } | |
264 | if (inet->daddr) { | |
265 | if (inet->daddr != saddr) | |
266 | continue; | |
267 | score+=2; | |
268 | } | |
269 | if (inet->dport) { | |
270 | if (inet->dport != sport) | |
271 | continue; | |
272 | score+=2; | |
273 | } | |
274 | if (sk->sk_bound_dev_if) { | |
275 | if (sk->sk_bound_dev_if != dif) | |
276 | continue; | |
277 | score+=2; | |
278 | } | |
279 | if(score == 9) { | |
280 | result = sk; | |
281 | break; | |
282 | } else if(score > badness) { | |
283 | result = sk; | |
284 | badness = score; | |
285 | } | |
286 | } | |
287 | } | |
288 | return result; | |
289 | } | |
290 | ||
734ab87f AV |
291 | static __inline__ struct sock *udp_v4_lookup(__be32 saddr, __be16 sport, |
292 | __be32 daddr, __be16 dport, int dif) | |
1da177e4 LT |
293 | { |
294 | struct sock *sk; | |
295 | ||
296 | read_lock(&udp_hash_lock); | |
297 | sk = udp_v4_lookup_longway(saddr, sport, daddr, dport, dif); | |
298 | if (sk) | |
299 | sock_hold(sk); | |
300 | read_unlock(&udp_hash_lock); | |
301 | return sk; | |
302 | } | |
303 | ||
304 | static inline struct sock *udp_v4_mcast_next(struct sock *sk, | |
734ab87f AV |
305 | __be16 loc_port, __be32 loc_addr, |
306 | __be16 rmt_port, __be32 rmt_addr, | |
1da177e4 LT |
307 | int dif) |
308 | { | |
309 | struct hlist_node *node; | |
310 | struct sock *s = sk; | |
311 | unsigned short hnum = ntohs(loc_port); | |
312 | ||
313 | sk_for_each_from(s, node) { | |
314 | struct inet_sock *inet = inet_sk(s); | |
315 | ||
316 | if (inet->num != hnum || | |
317 | (inet->daddr && inet->daddr != rmt_addr) || | |
318 | (inet->dport != rmt_port && inet->dport) || | |
319 | (inet->rcv_saddr && inet->rcv_saddr != loc_addr) || | |
320 | ipv6_only_sock(s) || | |
321 | (s->sk_bound_dev_if && s->sk_bound_dev_if != dif)) | |
322 | continue; | |
323 | if (!ip_mc_sf_allow(s, loc_addr, rmt_addr, dif)) | |
324 | continue; | |
325 | goto found; | |
326 | } | |
327 | s = NULL; | |
328 | found: | |
329 | return s; | |
330 | } | |
331 | ||
332 | /* | |
333 | * This routine is called by the ICMP module when it gets some | |
334 | * sort of error condition. If err < 0 then the socket should | |
335 | * be closed and the error returned to the user. If err > 0 | |
336 | * it's just the icmp type << 8 | icmp code. | |
337 | * Header points to the ip header of the error packet. We move | |
338 | * on past this. Then (as it used to claim before adjustment) | |
339 | * header points to the first 8 bytes of the udp header. We need | |
340 | * to find the appropriate port. | |
341 | */ | |
342 | ||
343 | void udp_err(struct sk_buff *skb, u32 info) | |
344 | { | |
345 | struct inet_sock *inet; | |
346 | struct iphdr *iph = (struct iphdr*)skb->data; | |
347 | struct udphdr *uh = (struct udphdr*)(skb->data+(iph->ihl<<2)); | |
348 | int type = skb->h.icmph->type; | |
349 | int code = skb->h.icmph->code; | |
350 | struct sock *sk; | |
351 | int harderr; | |
352 | int err; | |
353 | ||
354 | sk = udp_v4_lookup(iph->daddr, uh->dest, iph->saddr, uh->source, skb->dev->ifindex); | |
355 | if (sk == NULL) { | |
356 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | |
357 | return; /* No socket for error */ | |
358 | } | |
359 | ||
360 | err = 0; | |
361 | harderr = 0; | |
362 | inet = inet_sk(sk); | |
363 | ||
364 | switch (type) { | |
365 | default: | |
366 | case ICMP_TIME_EXCEEDED: | |
367 | err = EHOSTUNREACH; | |
368 | break; | |
369 | case ICMP_SOURCE_QUENCH: | |
370 | goto out; | |
371 | case ICMP_PARAMETERPROB: | |
372 | err = EPROTO; | |
373 | harderr = 1; | |
374 | break; | |
375 | case ICMP_DEST_UNREACH: | |
376 | if (code == ICMP_FRAG_NEEDED) { /* Path MTU discovery */ | |
377 | if (inet->pmtudisc != IP_PMTUDISC_DONT) { | |
378 | err = EMSGSIZE; | |
379 | harderr = 1; | |
380 | break; | |
381 | } | |
382 | goto out; | |
383 | } | |
384 | err = EHOSTUNREACH; | |
385 | if (code <= NR_ICMP_UNREACH) { | |
386 | harderr = icmp_err_convert[code].fatal; | |
387 | err = icmp_err_convert[code].errno; | |
388 | } | |
389 | break; | |
390 | } | |
391 | ||
392 | /* | |
393 | * RFC1122: OK. Passes ICMP errors back to application, as per | |
394 | * 4.1.3.3. | |
395 | */ | |
396 | if (!inet->recverr) { | |
397 | if (!harderr || sk->sk_state != TCP_ESTABLISHED) | |
398 | goto out; | |
399 | } else { | |
400 | ip_icmp_error(sk, skb, err, uh->dest, info, (u8*)(uh+1)); | |
401 | } | |
402 | sk->sk_err = err; | |
403 | sk->sk_error_report(sk); | |
404 | out: | |
405 | sock_put(sk); | |
406 | } | |
407 | ||
408 | /* | |
409 | * Throw away all pending data and cancel the corking. Socket is locked. | |
410 | */ | |
411 | static void udp_flush_pending_frames(struct sock *sk) | |
412 | { | |
413 | struct udp_sock *up = udp_sk(sk); | |
414 | ||
415 | if (up->pending) { | |
416 | up->len = 0; | |
417 | up->pending = 0; | |
418 | ip_flush_pending_frames(sk); | |
419 | } | |
420 | } | |
421 | ||
422 | /* | |
423 | * Push out all pending data as one UDP datagram. Socket is locked. | |
424 | */ | |
425 | static int udp_push_pending_frames(struct sock *sk, struct udp_sock *up) | |
426 | { | |
427 | struct inet_sock *inet = inet_sk(sk); | |
428 | struct flowi *fl = &inet->cork.fl; | |
429 | struct sk_buff *skb; | |
430 | struct udphdr *uh; | |
431 | int err = 0; | |
432 | ||
433 | /* Grab the skbuff where UDP header space exists. */ | |
434 | if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) | |
435 | goto out; | |
436 | ||
437 | /* | |
438 | * Create a UDP header | |
439 | */ | |
440 | uh = skb->h.uh; | |
441 | uh->source = fl->fl_ip_sport; | |
442 | uh->dest = fl->fl_ip_dport; | |
443 | uh->len = htons(up->len); | |
444 | uh->check = 0; | |
445 | ||
446 | if (sk->sk_no_check == UDP_CSUM_NOXMIT) { | |
447 | skb->ip_summed = CHECKSUM_NONE; | |
448 | goto send; | |
449 | } | |
450 | ||
451 | if (skb_queue_len(&sk->sk_write_queue) == 1) { | |
452 | /* | |
453 | * Only one fragment on the socket. | |
454 | */ | |
84fa7933 | 455 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1da177e4 LT |
456 | skb->csum = offsetof(struct udphdr, check); |
457 | uh->check = ~csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, | |
458 | up->len, IPPROTO_UDP, 0); | |
459 | } else { | |
460 | skb->csum = csum_partial((char *)uh, | |
461 | sizeof(struct udphdr), skb->csum); | |
462 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, | |
463 | up->len, IPPROTO_UDP, skb->csum); | |
464 | if (uh->check == 0) | |
465 | uh->check = -1; | |
466 | } | |
467 | } else { | |
468 | unsigned int csum = 0; | |
469 | /* | |
470 | * HW-checksum won't work as there are two or more | |
471 | * fragments on the socket so that all csums of sk_buffs | |
472 | * should be together. | |
473 | */ | |
84fa7933 | 474 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1da177e4 LT |
475 | int offset = (unsigned char *)uh - skb->data; |
476 | skb->csum = skb_checksum(skb, offset, skb->len - offset, 0); | |
477 | ||
478 | skb->ip_summed = CHECKSUM_NONE; | |
479 | } else { | |
480 | skb->csum = csum_partial((char *)uh, | |
481 | sizeof(struct udphdr), skb->csum); | |
482 | } | |
483 | ||
484 | skb_queue_walk(&sk->sk_write_queue, skb) { | |
485 | csum = csum_add(csum, skb->csum); | |
486 | } | |
487 | uh->check = csum_tcpudp_magic(fl->fl4_src, fl->fl4_dst, | |
488 | up->len, IPPROTO_UDP, csum); | |
489 | if (uh->check == 0) | |
490 | uh->check = -1; | |
491 | } | |
492 | send: | |
493 | err = ip_push_pending_frames(sk); | |
494 | out: | |
495 | up->len = 0; | |
496 | up->pending = 0; | |
497 | return err; | |
498 | } | |
499 | ||
500 | ||
734ab87f | 501 | static unsigned short udp_check(struct udphdr *uh, int len, __be32 saddr, __be32 daddr, unsigned long base) |
1da177e4 LT |
502 | { |
503 | return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base)); | |
504 | } | |
505 | ||
506 | int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
507 | size_t len) | |
508 | { | |
509 | struct inet_sock *inet = inet_sk(sk); | |
510 | struct udp_sock *up = udp_sk(sk); | |
511 | int ulen = len; | |
512 | struct ipcm_cookie ipc; | |
513 | struct rtable *rt = NULL; | |
514 | int free = 0; | |
515 | int connected = 0; | |
3ca3c68e | 516 | __be32 daddr, faddr, saddr; |
734ab87f | 517 | __be16 dport; |
1da177e4 LT |
518 | u8 tos; |
519 | int err; | |
520 | int corkreq = up->corkflag || msg->msg_flags&MSG_MORE; | |
521 | ||
522 | if (len > 0xFFFF) | |
523 | return -EMSGSIZE; | |
524 | ||
525 | /* | |
526 | * Check the flags. | |
527 | */ | |
528 | ||
529 | if (msg->msg_flags&MSG_OOB) /* Mirror BSD error message compatibility */ | |
530 | return -EOPNOTSUPP; | |
531 | ||
532 | ipc.opt = NULL; | |
533 | ||
534 | if (up->pending) { | |
535 | /* | |
536 | * There are pending frames. | |
537 | * The socket lock must be held while it's corked. | |
538 | */ | |
539 | lock_sock(sk); | |
540 | if (likely(up->pending)) { | |
541 | if (unlikely(up->pending != AF_INET)) { | |
542 | release_sock(sk); | |
543 | return -EINVAL; | |
544 | } | |
545 | goto do_append_data; | |
546 | } | |
547 | release_sock(sk); | |
548 | } | |
549 | ulen += sizeof(struct udphdr); | |
550 | ||
551 | /* | |
552 | * Get and verify the address. | |
553 | */ | |
554 | if (msg->msg_name) { | |
555 | struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name; | |
556 | if (msg->msg_namelen < sizeof(*usin)) | |
557 | return -EINVAL; | |
558 | if (usin->sin_family != AF_INET) { | |
559 | if (usin->sin_family != AF_UNSPEC) | |
560 | return -EAFNOSUPPORT; | |
561 | } | |
562 | ||
563 | daddr = usin->sin_addr.s_addr; | |
564 | dport = usin->sin_port; | |
565 | if (dport == 0) | |
566 | return -EINVAL; | |
567 | } else { | |
568 | if (sk->sk_state != TCP_ESTABLISHED) | |
569 | return -EDESTADDRREQ; | |
570 | daddr = inet->daddr; | |
571 | dport = inet->dport; | |
572 | /* Open fast path for connected socket. | |
573 | Route will not be used, if at least one option is set. | |
574 | */ | |
575 | connected = 1; | |
576 | } | |
577 | ipc.addr = inet->saddr; | |
578 | ||
579 | ipc.oif = sk->sk_bound_dev_if; | |
580 | if (msg->msg_controllen) { | |
581 | err = ip_cmsg_send(msg, &ipc); | |
582 | if (err) | |
583 | return err; | |
584 | if (ipc.opt) | |
585 | free = 1; | |
586 | connected = 0; | |
587 | } | |
588 | if (!ipc.opt) | |
589 | ipc.opt = inet->opt; | |
590 | ||
591 | saddr = ipc.addr; | |
592 | ipc.addr = faddr = daddr; | |
593 | ||
594 | if (ipc.opt && ipc.opt->srr) { | |
595 | if (!daddr) | |
596 | return -EINVAL; | |
597 | faddr = ipc.opt->faddr; | |
598 | connected = 0; | |
599 | } | |
600 | tos = RT_TOS(inet->tos); | |
601 | if (sock_flag(sk, SOCK_LOCALROUTE) || | |
602 | (msg->msg_flags & MSG_DONTROUTE) || | |
603 | (ipc.opt && ipc.opt->is_strictroute)) { | |
604 | tos |= RTO_ONLINK; | |
605 | connected = 0; | |
606 | } | |
607 | ||
608 | if (MULTICAST(daddr)) { | |
609 | if (!ipc.oif) | |
610 | ipc.oif = inet->mc_index; | |
611 | if (!saddr) | |
612 | saddr = inet->mc_addr; | |
613 | connected = 0; | |
614 | } | |
615 | ||
616 | if (connected) | |
617 | rt = (struct rtable*)sk_dst_check(sk, 0); | |
618 | ||
619 | if (rt == NULL) { | |
620 | struct flowi fl = { .oif = ipc.oif, | |
621 | .nl_u = { .ip4_u = | |
622 | { .daddr = faddr, | |
623 | .saddr = saddr, | |
624 | .tos = tos } }, | |
625 | .proto = IPPROTO_UDP, | |
626 | .uli_u = { .ports = | |
627 | { .sport = inet->sport, | |
628 | .dport = dport } } }; | |
beb8d13b | 629 | security_sk_classify_flow(sk, &fl); |
1da177e4 LT |
630 | err = ip_route_output_flow(&rt, &fl, sk, !(msg->msg_flags&MSG_DONTWAIT)); |
631 | if (err) | |
632 | goto out; | |
633 | ||
634 | err = -EACCES; | |
635 | if ((rt->rt_flags & RTCF_BROADCAST) && | |
636 | !sock_flag(sk, SOCK_BROADCAST)) | |
637 | goto out; | |
638 | if (connected) | |
639 | sk_dst_set(sk, dst_clone(&rt->u.dst)); | |
640 | } | |
641 | ||
642 | if (msg->msg_flags&MSG_CONFIRM) | |
643 | goto do_confirm; | |
644 | back_from_confirm: | |
645 | ||
646 | saddr = rt->rt_src; | |
647 | if (!ipc.addr) | |
648 | daddr = ipc.addr = rt->rt_dst; | |
649 | ||
650 | lock_sock(sk); | |
651 | if (unlikely(up->pending)) { | |
652 | /* The socket is already corked while preparing it. */ | |
653 | /* ... which is an evident application bug. --ANK */ | |
654 | release_sock(sk); | |
655 | ||
64ce2073 | 656 | LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 2\n"); |
1da177e4 LT |
657 | err = -EINVAL; |
658 | goto out; | |
659 | } | |
660 | /* | |
661 | * Now cork the socket to pend data. | |
662 | */ | |
663 | inet->cork.fl.fl4_dst = daddr; | |
664 | inet->cork.fl.fl_ip_dport = dport; | |
665 | inet->cork.fl.fl4_src = saddr; | |
666 | inet->cork.fl.fl_ip_sport = inet->sport; | |
667 | up->pending = AF_INET; | |
668 | ||
669 | do_append_data: | |
670 | up->len += ulen; | |
671 | err = ip_append_data(sk, ip_generic_getfrag, msg->msg_iov, ulen, | |
672 | sizeof(struct udphdr), &ipc, rt, | |
673 | corkreq ? msg->msg_flags|MSG_MORE : msg->msg_flags); | |
674 | if (err) | |
675 | udp_flush_pending_frames(sk); | |
676 | else if (!corkreq) | |
677 | err = udp_push_pending_frames(sk, up); | |
678 | release_sock(sk); | |
679 | ||
680 | out: | |
681 | ip_rt_put(rt); | |
682 | if (free) | |
683 | kfree(ipc.opt); | |
684 | if (!err) { | |
685 | UDP_INC_STATS_USER(UDP_MIB_OUTDATAGRAMS); | |
686 | return len; | |
687 | } | |
81aa646c MB |
688 | /* |
689 | * ENOBUFS = no kernel mem, SOCK_NOSPACE = no sndbuf space. Reporting | |
690 | * ENOBUFS might not be good (it's not tunable per se), but otherwise | |
691 | * we don't have a good statistic (IpOutDiscards but it can be too many | |
692 | * things). We could add another new stat but at least for now that | |
693 | * seems like overkill. | |
694 | */ | |
695 | if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { | |
696 | UDP_INC_STATS_USER(UDP_MIB_SNDBUFERRORS); | |
697 | } | |
1da177e4 LT |
698 | return err; |
699 | ||
700 | do_confirm: | |
701 | dst_confirm(&rt->u.dst); | |
702 | if (!(msg->msg_flags&MSG_PROBE) || len) | |
703 | goto back_from_confirm; | |
704 | err = 0; | |
705 | goto out; | |
706 | } | |
707 | ||
708 | static int udp_sendpage(struct sock *sk, struct page *page, int offset, | |
709 | size_t size, int flags) | |
710 | { | |
711 | struct udp_sock *up = udp_sk(sk); | |
712 | int ret; | |
713 | ||
714 | if (!up->pending) { | |
715 | struct msghdr msg = { .msg_flags = flags|MSG_MORE }; | |
716 | ||
717 | /* Call udp_sendmsg to specify destination address which | |
718 | * sendpage interface can't pass. | |
719 | * This will succeed only when the socket is connected. | |
720 | */ | |
721 | ret = udp_sendmsg(NULL, sk, &msg, 0); | |
722 | if (ret < 0) | |
723 | return ret; | |
724 | } | |
725 | ||
726 | lock_sock(sk); | |
727 | ||
728 | if (unlikely(!up->pending)) { | |
729 | release_sock(sk); | |
730 | ||
64ce2073 | 731 | LIMIT_NETDEBUG(KERN_DEBUG "udp cork app bug 3\n"); |
1da177e4 LT |
732 | return -EINVAL; |
733 | } | |
734 | ||
735 | ret = ip_append_page(sk, page, offset, size, flags); | |
736 | if (ret == -EOPNOTSUPP) { | |
737 | release_sock(sk); | |
738 | return sock_no_sendpage(sk->sk_socket, page, offset, | |
739 | size, flags); | |
740 | } | |
741 | if (ret < 0) { | |
742 | udp_flush_pending_frames(sk); | |
743 | goto out; | |
744 | } | |
745 | ||
746 | up->len += size; | |
747 | if (!(up->corkflag || (flags&MSG_MORE))) | |
748 | ret = udp_push_pending_frames(sk, up); | |
749 | if (!ret) | |
750 | ret = size; | |
751 | out: | |
752 | release_sock(sk); | |
753 | return ret; | |
754 | } | |
755 | ||
756 | /* | |
757 | * IOCTL requests applicable to the UDP protocol | |
758 | */ | |
759 | ||
760 | int udp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |
761 | { | |
762 | switch(cmd) | |
763 | { | |
764 | case SIOCOUTQ: | |
765 | { | |
766 | int amount = atomic_read(&sk->sk_wmem_alloc); | |
767 | return put_user(amount, (int __user *)arg); | |
768 | } | |
769 | ||
770 | case SIOCINQ: | |
771 | { | |
772 | struct sk_buff *skb; | |
773 | unsigned long amount; | |
774 | ||
775 | amount = 0; | |
208d8984 | 776 | spin_lock_bh(&sk->sk_receive_queue.lock); |
1da177e4 LT |
777 | skb = skb_peek(&sk->sk_receive_queue); |
778 | if (skb != NULL) { | |
779 | /* | |
780 | * We will only return the amount | |
781 | * of this packet since that is all | |
782 | * that will be read. | |
783 | */ | |
784 | amount = skb->len - sizeof(struct udphdr); | |
785 | } | |
208d8984 | 786 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
1da177e4 LT |
787 | return put_user(amount, (int __user *)arg); |
788 | } | |
789 | ||
790 | default: | |
791 | return -ENOIOCTLCMD; | |
792 | } | |
793 | return(0); | |
794 | } | |
795 | ||
796 | static __inline__ int __udp_checksum_complete(struct sk_buff *skb) | |
797 | { | |
fb286bb2 | 798 | return __skb_checksum_complete(skb); |
1da177e4 LT |
799 | } |
800 | ||
801 | static __inline__ int udp_checksum_complete(struct sk_buff *skb) | |
802 | { | |
803 | return skb->ip_summed != CHECKSUM_UNNECESSARY && | |
804 | __udp_checksum_complete(skb); | |
805 | } | |
806 | ||
807 | /* | |
808 | * This should be easy, if there is something there we | |
809 | * return it, otherwise we block. | |
810 | */ | |
811 | ||
812 | static int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
813 | size_t len, int noblock, int flags, int *addr_len) | |
814 | { | |
815 | struct inet_sock *inet = inet_sk(sk); | |
816 | struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name; | |
817 | struct sk_buff *skb; | |
818 | int copied, err; | |
819 | ||
820 | /* | |
821 | * Check any passed addresses | |
822 | */ | |
823 | if (addr_len) | |
824 | *addr_len=sizeof(*sin); | |
825 | ||
826 | if (flags & MSG_ERRQUEUE) | |
827 | return ip_recv_error(sk, msg, len); | |
828 | ||
829 | try_again: | |
830 | skb = skb_recv_datagram(sk, flags, noblock, &err); | |
831 | if (!skb) | |
832 | goto out; | |
833 | ||
834 | copied = skb->len - sizeof(struct udphdr); | |
835 | if (copied > len) { | |
836 | copied = len; | |
837 | msg->msg_flags |= MSG_TRUNC; | |
838 | } | |
839 | ||
840 | if (skb->ip_summed==CHECKSUM_UNNECESSARY) { | |
841 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, | |
842 | copied); | |
843 | } else if (msg->msg_flags&MSG_TRUNC) { | |
844 | if (__udp_checksum_complete(skb)) | |
845 | goto csum_copy_err; | |
846 | err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov, | |
847 | copied); | |
848 | } else { | |
849 | err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov); | |
850 | ||
851 | if (err == -EINVAL) | |
852 | goto csum_copy_err; | |
853 | } | |
854 | ||
855 | if (err) | |
856 | goto out_free; | |
857 | ||
858 | sock_recv_timestamp(msg, sk, skb); | |
859 | ||
860 | /* Copy the address. */ | |
861 | if (sin) | |
862 | { | |
863 | sin->sin_family = AF_INET; | |
864 | sin->sin_port = skb->h.uh->source; | |
865 | sin->sin_addr.s_addr = skb->nh.iph->saddr; | |
866 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | |
867 | } | |
868 | if (inet->cmsg_flags) | |
869 | ip_cmsg_recv(msg, skb); | |
870 | ||
871 | err = copied; | |
872 | if (flags & MSG_TRUNC) | |
873 | err = skb->len - sizeof(struct udphdr); | |
874 | ||
875 | out_free: | |
876 | skb_free_datagram(sk, skb); | |
877 | out: | |
878 | return err; | |
879 | ||
880 | csum_copy_err: | |
881 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | |
882 | ||
3305b80c | 883 | skb_kill_datagram(sk, skb, flags); |
1da177e4 LT |
884 | |
885 | if (noblock) | |
886 | return -EAGAIN; | |
887 | goto try_again; | |
888 | } | |
889 | ||
890 | ||
891 | int udp_disconnect(struct sock *sk, int flags) | |
892 | { | |
893 | struct inet_sock *inet = inet_sk(sk); | |
894 | /* | |
895 | * 1003.1g - break association. | |
896 | */ | |
897 | ||
898 | sk->sk_state = TCP_CLOSE; | |
899 | inet->daddr = 0; | |
900 | inet->dport = 0; | |
901 | sk->sk_bound_dev_if = 0; | |
902 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | |
903 | inet_reset_saddr(sk); | |
904 | ||
905 | if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) { | |
906 | sk->sk_prot->unhash(sk); | |
907 | inet->sport = 0; | |
908 | } | |
909 | sk_dst_reset(sk); | |
910 | return 0; | |
911 | } | |
912 | ||
913 | static void udp_close(struct sock *sk, long timeout) | |
914 | { | |
915 | sk_common_release(sk); | |
916 | } | |
917 | ||
918 | /* return: | |
919 | * 1 if the the UDP system should process it | |
920 | * 0 if we should drop this packet | |
921 | * -1 if it should get processed by xfrm4_rcv_encap | |
922 | */ | |
923 | static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb) | |
924 | { | |
925 | #ifndef CONFIG_XFRM | |
926 | return 1; | |
927 | #else | |
928 | struct udp_sock *up = udp_sk(sk); | |
929 | struct udphdr *uh = skb->h.uh; | |
930 | struct iphdr *iph; | |
931 | int iphlen, len; | |
932 | ||
933 | __u8 *udpdata = (__u8 *)uh + sizeof(struct udphdr); | |
734ab87f | 934 | __be32 *udpdata32 = (__be32 *)udpdata; |
1da177e4 LT |
935 | __u16 encap_type = up->encap_type; |
936 | ||
937 | /* if we're overly short, let UDP handle it */ | |
938 | if (udpdata > skb->tail) | |
939 | return 1; | |
940 | ||
941 | /* if this is not encapsulated socket, then just return now */ | |
942 | if (!encap_type) | |
943 | return 1; | |
944 | ||
945 | len = skb->tail - udpdata; | |
946 | ||
947 | switch (encap_type) { | |
948 | default: | |
949 | case UDP_ENCAP_ESPINUDP: | |
950 | /* Check if this is a keepalive packet. If so, eat it. */ | |
951 | if (len == 1 && udpdata[0] == 0xff) { | |
952 | return 0; | |
953 | } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0 ) { | |
954 | /* ESP Packet without Non-ESP header */ | |
955 | len = sizeof(struct udphdr); | |
956 | } else | |
957 | /* Must be an IKE packet.. pass it through */ | |
958 | return 1; | |
959 | break; | |
960 | case UDP_ENCAP_ESPINUDP_NON_IKE: | |
961 | /* Check if this is a keepalive packet. If so, eat it. */ | |
962 | if (len == 1 && udpdata[0] == 0xff) { | |
963 | return 0; | |
964 | } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && | |
965 | udpdata32[0] == 0 && udpdata32[1] == 0) { | |
966 | ||
967 | /* ESP Packet with Non-IKE marker */ | |
968 | len = sizeof(struct udphdr) + 2 * sizeof(u32); | |
969 | } else | |
970 | /* Must be an IKE packet.. pass it through */ | |
971 | return 1; | |
972 | break; | |
973 | } | |
974 | ||
975 | /* At this point we are sure that this is an ESPinUDP packet, | |
976 | * so we need to remove 'len' bytes from the packet (the UDP | |
977 | * header and optional ESP marker bytes) and then modify the | |
978 | * protocol to ESP, and then call into the transform receiver. | |
979 | */ | |
4d78b6c7 HX |
980 | if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
981 | return 0; | |
1da177e4 LT |
982 | |
983 | /* Now we can update and verify the packet length... */ | |
984 | iph = skb->nh.iph; | |
985 | iphlen = iph->ihl << 2; | |
986 | iph->tot_len = htons(ntohs(iph->tot_len) - len); | |
987 | if (skb->len < iphlen + len) { | |
988 | /* packet is too small!?! */ | |
989 | return 0; | |
990 | } | |
991 | ||
992 | /* pull the data buffer up to the ESP header and set the | |
993 | * transport header to point to ESP. Keep UDP on the stack | |
994 | * for later. | |
995 | */ | |
996 | skb->h.raw = skb_pull(skb, len); | |
997 | ||
998 | /* modify the protocol (it's ESP!) */ | |
999 | iph->protocol = IPPROTO_ESP; | |
1000 | ||
1001 | /* and let the caller know to send this into the ESP processor... */ | |
1002 | return -1; | |
1003 | #endif | |
1004 | } | |
1005 | ||
1006 | /* returns: | |
1007 | * -1: error | |
1008 | * 0: success | |
1009 | * >0: "udp encap" protocol resubmission | |
1010 | * | |
1011 | * Note that in the success and error cases, the skb is assumed to | |
1012 | * have either been requeued or freed. | |
1013 | */ | |
1014 | static int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb) | |
1015 | { | |
1016 | struct udp_sock *up = udp_sk(sk); | |
81aa646c | 1017 | int rc; |
1da177e4 LT |
1018 | |
1019 | /* | |
1020 | * Charge it to the socket, dropping if the queue is full. | |
1021 | */ | |
1022 | if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { | |
1023 | kfree_skb(skb); | |
1024 | return -1; | |
1025 | } | |
b59c2701 | 1026 | nf_reset(skb); |
1da177e4 LT |
1027 | |
1028 | if (up->encap_type) { | |
1029 | /* | |
1030 | * This is an encapsulation socket, so let's see if this is | |
1031 | * an encapsulated packet. | |
1032 | * If it's a keepalive packet, then just eat it. | |
1033 | * If it's an encapsulateed packet, then pass it to the | |
1034 | * IPsec xfrm input and return the response | |
1035 | * appropriately. Otherwise, just fall through and | |
1036 | * pass this up the UDP socket. | |
1037 | */ | |
1038 | int ret; | |
1039 | ||
1040 | ret = udp_encap_rcv(sk, skb); | |
1041 | if (ret == 0) { | |
1042 | /* Eat the packet .. */ | |
1043 | kfree_skb(skb); | |
1044 | return 0; | |
1045 | } | |
1046 | if (ret < 0) { | |
1047 | /* process the ESP packet */ | |
1048 | ret = xfrm4_rcv_encap(skb, up->encap_type); | |
1049 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS); | |
1050 | return -ret; | |
1051 | } | |
1052 | /* FALLTHROUGH -- it's a UDP Packet */ | |
1053 | } | |
1054 | ||
1055 | if (sk->sk_filter && skb->ip_summed != CHECKSUM_UNNECESSARY) { | |
1056 | if (__udp_checksum_complete(skb)) { | |
1057 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | |
1058 | kfree_skb(skb); | |
1059 | return -1; | |
1060 | } | |
1061 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1062 | } | |
1063 | ||
81aa646c MB |
1064 | if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) { |
1065 | /* Note that an ENOMEM error is charged twice */ | |
1066 | if (rc == -ENOMEM) | |
1067 | UDP_INC_STATS_BH(UDP_MIB_RCVBUFERRORS); | |
1da177e4 LT |
1068 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); |
1069 | kfree_skb(skb); | |
1070 | return -1; | |
1071 | } | |
1072 | UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS); | |
1073 | return 0; | |
1074 | } | |
1075 | ||
1076 | /* | |
1077 | * Multicasts and broadcasts go to each listener. | |
1078 | * | |
1079 | * Note: called only from the BH handler context, | |
1080 | * so we don't need to lock the hashes. | |
1081 | */ | |
1082 | static int udp_v4_mcast_deliver(struct sk_buff *skb, struct udphdr *uh, | |
734ab87f | 1083 | __be32 saddr, __be32 daddr) |
1da177e4 LT |
1084 | { |
1085 | struct sock *sk; | |
1086 | int dif; | |
1087 | ||
1088 | read_lock(&udp_hash_lock); | |
1089 | sk = sk_head(&udp_hash[ntohs(uh->dest) & (UDP_HTABLE_SIZE - 1)]); | |
1090 | dif = skb->dev->ifindex; | |
1091 | sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif); | |
1092 | if (sk) { | |
1093 | struct sock *sknext = NULL; | |
1094 | ||
1095 | do { | |
1096 | struct sk_buff *skb1 = skb; | |
1097 | ||
1098 | sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr, | |
1099 | uh->source, saddr, dif); | |
1100 | if(sknext) | |
1101 | skb1 = skb_clone(skb, GFP_ATOMIC); | |
1102 | ||
1103 | if(skb1) { | |
1104 | int ret = udp_queue_rcv_skb(sk, skb1); | |
1105 | if (ret > 0) | |
1106 | /* we should probably re-process instead | |
1107 | * of dropping packets here. */ | |
1108 | kfree_skb(skb1); | |
1109 | } | |
1110 | sk = sknext; | |
1111 | } while(sknext); | |
1112 | } else | |
1113 | kfree_skb(skb); | |
1114 | read_unlock(&udp_hash_lock); | |
1115 | return 0; | |
1116 | } | |
1117 | ||
1118 | /* Initialize UDP checksum. If exited with zero value (success), | |
1119 | * CHECKSUM_UNNECESSARY means, that no more checks are required. | |
1120 | * Otherwise, csum completion requires chacksumming packet body, | |
1121 | * including udp header and folding it to skb->csum. | |
1122 | */ | |
65a45441 | 1123 | static void udp_checksum_init(struct sk_buff *skb, struct udphdr *uh, |
734ab87f | 1124 | unsigned short ulen, __be32 saddr, __be32 daddr) |
1da177e4 LT |
1125 | { |
1126 | if (uh->check == 0) { | |
1127 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
84fa7933 | 1128 | } else if (skb->ip_summed == CHECKSUM_COMPLETE) { |
1da177e4 | 1129 | if (!udp_check(uh, ulen, saddr, daddr, skb->csum)) |
fb286bb2 | 1130 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1da177e4 LT |
1131 | } |
1132 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
1133 | skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); | |
1134 | /* Probably, we should checksum udp header (it should be in cache | |
1135 | * in any case) and data in tiny packets (< rx copybreak). | |
1136 | */ | |
1da177e4 LT |
1137 | } |
1138 | ||
1139 | /* | |
1140 | * All we need to do is get the socket, and then do a checksum. | |
1141 | */ | |
1142 | ||
1143 | int udp_rcv(struct sk_buff *skb) | |
1144 | { | |
1145 | struct sock *sk; | |
1146 | struct udphdr *uh; | |
1147 | unsigned short ulen; | |
1148 | struct rtable *rt = (struct rtable*)skb->dst; | |
734ab87f AV |
1149 | __be32 saddr = skb->nh.iph->saddr; |
1150 | __be32 daddr = skb->nh.iph->daddr; | |
1da177e4 LT |
1151 | int len = skb->len; |
1152 | ||
1153 | /* | |
1154 | * Validate the packet and the UDP length. | |
1155 | */ | |
1156 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) | |
1157 | goto no_header; | |
1158 | ||
1159 | uh = skb->h.uh; | |
1160 | ||
1161 | ulen = ntohs(uh->len); | |
1162 | ||
1163 | if (ulen > len || ulen < sizeof(*uh)) | |
1164 | goto short_packet; | |
1165 | ||
e308e25c | 1166 | if (pskb_trim_rcsum(skb, ulen)) |
1da177e4 LT |
1167 | goto short_packet; |
1168 | ||
65a45441 | 1169 | udp_checksum_init(skb, uh, ulen, saddr, daddr); |
1da177e4 LT |
1170 | |
1171 | if(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST)) | |
1172 | return udp_v4_mcast_deliver(skb, uh, saddr, daddr); | |
1173 | ||
1174 | sk = udp_v4_lookup(saddr, uh->source, daddr, uh->dest, skb->dev->ifindex); | |
1175 | ||
1176 | if (sk != NULL) { | |
1177 | int ret = udp_queue_rcv_skb(sk, skb); | |
1178 | sock_put(sk); | |
1179 | ||
1180 | /* a return value > 0 means to resubmit the input, but | |
1181 | * it it wants the return to be -protocol, or 0 | |
1182 | */ | |
1183 | if (ret > 0) | |
1184 | return -ret; | |
1185 | return 0; | |
1186 | } | |
1187 | ||
1188 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | |
1189 | goto drop; | |
b59c2701 | 1190 | nf_reset(skb); |
1da177e4 LT |
1191 | |
1192 | /* No socket. Drop packet silently, if checksum is wrong */ | |
1193 | if (udp_checksum_complete(skb)) | |
1194 | goto csum_error; | |
1195 | ||
1196 | UDP_INC_STATS_BH(UDP_MIB_NOPORTS); | |
1197 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); | |
1198 | ||
1199 | /* | |
1200 | * Hmm. We got an UDP packet to a port to which we | |
1201 | * don't wanna listen. Ignore it. | |
1202 | */ | |
1203 | kfree_skb(skb); | |
1204 | return(0); | |
1205 | ||
1206 | short_packet: | |
64ce2073 PM |
1207 | LIMIT_NETDEBUG(KERN_DEBUG "UDP: short packet: From %u.%u.%u.%u:%u %d/%d to %u.%u.%u.%u:%u\n", |
1208 | NIPQUAD(saddr), | |
1209 | ntohs(uh->source), | |
1210 | ulen, | |
1211 | len, | |
1212 | NIPQUAD(daddr), | |
1213 | ntohs(uh->dest)); | |
1da177e4 LT |
1214 | no_header: |
1215 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | |
1216 | kfree_skb(skb); | |
1217 | return(0); | |
1218 | ||
1219 | csum_error: | |
1220 | /* | |
1221 | * RFC1122: OK. Discards the bad packet silently (as far as | |
1222 | * the network is concerned, anyway) as per 4.1.3.4 (MUST). | |
1223 | */ | |
64ce2073 PM |
1224 | LIMIT_NETDEBUG(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n", |
1225 | NIPQUAD(saddr), | |
1226 | ntohs(uh->source), | |
1227 | NIPQUAD(daddr), | |
1228 | ntohs(uh->dest), | |
1229 | ulen); | |
1da177e4 LT |
1230 | drop: |
1231 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | |
1232 | kfree_skb(skb); | |
1233 | return(0); | |
1234 | } | |
1235 | ||
1236 | static int udp_destroy_sock(struct sock *sk) | |
1237 | { | |
1238 | lock_sock(sk); | |
1239 | udp_flush_pending_frames(sk); | |
1240 | release_sock(sk); | |
1241 | return 0; | |
1242 | } | |
1243 | ||
1244 | /* | |
1245 | * Socket option code for UDP | |
1246 | */ | |
3fdadf7d | 1247 | static int do_udp_setsockopt(struct sock *sk, int level, int optname, |
1da177e4 LT |
1248 | char __user *optval, int optlen) |
1249 | { | |
1250 | struct udp_sock *up = udp_sk(sk); | |
1251 | int val; | |
1252 | int err = 0; | |
1253 | ||
1da177e4 LT |
1254 | if(optlen<sizeof(int)) |
1255 | return -EINVAL; | |
1256 | ||
1257 | if (get_user(val, (int __user *)optval)) | |
1258 | return -EFAULT; | |
1259 | ||
1260 | switch(optname) { | |
1261 | case UDP_CORK: | |
1262 | if (val != 0) { | |
1263 | up->corkflag = 1; | |
1264 | } else { | |
1265 | up->corkflag = 0; | |
1266 | lock_sock(sk); | |
1267 | udp_push_pending_frames(sk, up); | |
1268 | release_sock(sk); | |
1269 | } | |
1270 | break; | |
1271 | ||
1272 | case UDP_ENCAP: | |
1273 | switch (val) { | |
1274 | case 0: | |
1275 | case UDP_ENCAP_ESPINUDP: | |
1276 | case UDP_ENCAP_ESPINUDP_NON_IKE: | |
1277 | up->encap_type = val; | |
1278 | break; | |
1279 | default: | |
1280 | err = -ENOPROTOOPT; | |
1281 | break; | |
1282 | } | |
1283 | break; | |
1284 | ||
1285 | default: | |
1286 | err = -ENOPROTOOPT; | |
1287 | break; | |
1288 | }; | |
1289 | ||
1290 | return err; | |
1291 | } | |
1292 | ||
3fdadf7d DM |
1293 | static int udp_setsockopt(struct sock *sk, int level, int optname, |
1294 | char __user *optval, int optlen) | |
1295 | { | |
1296 | if (level != SOL_UDP) | |
1297 | return ip_setsockopt(sk, level, optname, optval, optlen); | |
1298 | return do_udp_setsockopt(sk, level, optname, optval, optlen); | |
1299 | } | |
1300 | ||
1301 | #ifdef CONFIG_COMPAT | |
1302 | static int compat_udp_setsockopt(struct sock *sk, int level, int optname, | |
543d9cfe | 1303 | char __user *optval, int optlen) |
3fdadf7d DM |
1304 | { |
1305 | if (level != SOL_UDP) | |
543d9cfe | 1306 | return compat_ip_setsockopt(sk, level, optname, optval, optlen); |
3fdadf7d DM |
1307 | return do_udp_setsockopt(sk, level, optname, optval, optlen); |
1308 | } | |
1309 | #endif | |
1310 | ||
1311 | static int do_udp_getsockopt(struct sock *sk, int level, int optname, | |
1da177e4 LT |
1312 | char __user *optval, int __user *optlen) |
1313 | { | |
1314 | struct udp_sock *up = udp_sk(sk); | |
1315 | int val, len; | |
1316 | ||
1da177e4 LT |
1317 | if(get_user(len,optlen)) |
1318 | return -EFAULT; | |
1319 | ||
1320 | len = min_t(unsigned int, len, sizeof(int)); | |
1321 | ||
1322 | if(len < 0) | |
1323 | return -EINVAL; | |
1324 | ||
1325 | switch(optname) { | |
1326 | case UDP_CORK: | |
1327 | val = up->corkflag; | |
1328 | break; | |
1329 | ||
1330 | case UDP_ENCAP: | |
1331 | val = up->encap_type; | |
1332 | break; | |
1333 | ||
1334 | default: | |
1335 | return -ENOPROTOOPT; | |
1336 | }; | |
1337 | ||
1338 | if(put_user(len, optlen)) | |
1339 | return -EFAULT; | |
1340 | if(copy_to_user(optval, &val,len)) | |
1341 | return -EFAULT; | |
1342 | return 0; | |
1343 | } | |
1344 | ||
3fdadf7d DM |
1345 | static int udp_getsockopt(struct sock *sk, int level, int optname, |
1346 | char __user *optval, int __user *optlen) | |
1347 | { | |
1348 | if (level != SOL_UDP) | |
1349 | return ip_getsockopt(sk, level, optname, optval, optlen); | |
1350 | return do_udp_getsockopt(sk, level, optname, optval, optlen); | |
1351 | } | |
1352 | ||
1353 | #ifdef CONFIG_COMPAT | |
1354 | static int compat_udp_getsockopt(struct sock *sk, int level, int optname, | |
543d9cfe | 1355 | char __user *optval, int __user *optlen) |
3fdadf7d DM |
1356 | { |
1357 | if (level != SOL_UDP) | |
543d9cfe | 1358 | return compat_ip_getsockopt(sk, level, optname, optval, optlen); |
3fdadf7d DM |
1359 | return do_udp_getsockopt(sk, level, optname, optval, optlen); |
1360 | } | |
1361 | #endif | |
1da177e4 LT |
1362 | /** |
1363 | * udp_poll - wait for a UDP event. | |
1364 | * @file - file struct | |
1365 | * @sock - socket | |
1366 | * @wait - poll table | |
1367 | * | |
1368 | * This is same as datagram poll, except for the special case of | |
1369 | * blocking sockets. If application is using a blocking fd | |
1370 | * and a packet with checksum error is in the queue; | |
1371 | * then it could get return from select indicating data available | |
1372 | * but then block when reading it. Add special case code | |
1373 | * to work around these arguably broken applications. | |
1374 | */ | |
1375 | unsigned int udp_poll(struct file *file, struct socket *sock, poll_table *wait) | |
1376 | { | |
1377 | unsigned int mask = datagram_poll(file, sock, wait); | |
1378 | struct sock *sk = sock->sk; | |
1379 | ||
1380 | /* Check for false positives due to checksum errors */ | |
1381 | if ( (mask & POLLRDNORM) && | |
1382 | !(file->f_flags & O_NONBLOCK) && | |
1383 | !(sk->sk_shutdown & RCV_SHUTDOWN)){ | |
1384 | struct sk_buff_head *rcvq = &sk->sk_receive_queue; | |
1385 | struct sk_buff *skb; | |
1386 | ||
208d8984 | 1387 | spin_lock_bh(&rcvq->lock); |
1da177e4 LT |
1388 | while ((skb = skb_peek(rcvq)) != NULL) { |
1389 | if (udp_checksum_complete(skb)) { | |
1390 | UDP_INC_STATS_BH(UDP_MIB_INERRORS); | |
1391 | __skb_unlink(skb, rcvq); | |
1392 | kfree_skb(skb); | |
1393 | } else { | |
1394 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
1395 | break; | |
1396 | } | |
1397 | } | |
208d8984 | 1398 | spin_unlock_bh(&rcvq->lock); |
1da177e4 LT |
1399 | |
1400 | /* nothing to see, move along */ | |
1401 | if (skb == NULL) | |
1402 | mask &= ~(POLLIN | POLLRDNORM); | |
1403 | } | |
1404 | ||
1405 | return mask; | |
1406 | ||
1407 | } | |
1408 | ||
1409 | struct proto udp_prot = { | |
543d9cfe ACM |
1410 | .name = "UDP", |
1411 | .owner = THIS_MODULE, | |
1412 | .close = udp_close, | |
1413 | .connect = ip4_datagram_connect, | |
1414 | .disconnect = udp_disconnect, | |
1415 | .ioctl = udp_ioctl, | |
1416 | .destroy = udp_destroy_sock, | |
1417 | .setsockopt = udp_setsockopt, | |
1418 | .getsockopt = udp_getsockopt, | |
1419 | .sendmsg = udp_sendmsg, | |
1420 | .recvmsg = udp_recvmsg, | |
1421 | .sendpage = udp_sendpage, | |
1422 | .backlog_rcv = udp_queue_rcv_skb, | |
1423 | .hash = udp_v4_hash, | |
1424 | .unhash = udp_v4_unhash, | |
1425 | .get_port = udp_v4_get_port, | |
1426 | .obj_size = sizeof(struct udp_sock), | |
3fdadf7d | 1427 | #ifdef CONFIG_COMPAT |
543d9cfe ACM |
1428 | .compat_setsockopt = compat_udp_setsockopt, |
1429 | .compat_getsockopt = compat_udp_getsockopt, | |
3fdadf7d | 1430 | #endif |
1da177e4 LT |
1431 | }; |
1432 | ||
1433 | /* ------------------------------------------------------------------------ */ | |
1434 | #ifdef CONFIG_PROC_FS | |
1435 | ||
1436 | static struct sock *udp_get_first(struct seq_file *seq) | |
1437 | { | |
1438 | struct sock *sk; | |
1439 | struct udp_iter_state *state = seq->private; | |
1440 | ||
1441 | for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) { | |
1442 | struct hlist_node *node; | |
1443 | sk_for_each(sk, node, &udp_hash[state->bucket]) { | |
1444 | if (sk->sk_family == state->family) | |
1445 | goto found; | |
1446 | } | |
1447 | } | |
1448 | sk = NULL; | |
1449 | found: | |
1450 | return sk; | |
1451 | } | |
1452 | ||
1453 | static struct sock *udp_get_next(struct seq_file *seq, struct sock *sk) | |
1454 | { | |
1455 | struct udp_iter_state *state = seq->private; | |
1456 | ||
1457 | do { | |
1458 | sk = sk_next(sk); | |
1459 | try_again: | |
1460 | ; | |
1461 | } while (sk && sk->sk_family != state->family); | |
1462 | ||
1463 | if (!sk && ++state->bucket < UDP_HTABLE_SIZE) { | |
1464 | sk = sk_head(&udp_hash[state->bucket]); | |
1465 | goto try_again; | |
1466 | } | |
1467 | return sk; | |
1468 | } | |
1469 | ||
1470 | static struct sock *udp_get_idx(struct seq_file *seq, loff_t pos) | |
1471 | { | |
1472 | struct sock *sk = udp_get_first(seq); | |
1473 | ||
1474 | if (sk) | |
1475 | while(pos && (sk = udp_get_next(seq, sk)) != NULL) | |
1476 | --pos; | |
1477 | return pos ? NULL : sk; | |
1478 | } | |
1479 | ||
1480 | static void *udp_seq_start(struct seq_file *seq, loff_t *pos) | |
1481 | { | |
1482 | read_lock(&udp_hash_lock); | |
1483 | return *pos ? udp_get_idx(seq, *pos-1) : (void *)1; | |
1484 | } | |
1485 | ||
1486 | static void *udp_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1487 | { | |
1488 | struct sock *sk; | |
1489 | ||
1490 | if (v == (void *)1) | |
1491 | sk = udp_get_idx(seq, 0); | |
1492 | else | |
1493 | sk = udp_get_next(seq, v); | |
1494 | ||
1495 | ++*pos; | |
1496 | return sk; | |
1497 | } | |
1498 | ||
1499 | static void udp_seq_stop(struct seq_file *seq, void *v) | |
1500 | { | |
1501 | read_unlock(&udp_hash_lock); | |
1502 | } | |
1503 | ||
1504 | static int udp_seq_open(struct inode *inode, struct file *file) | |
1505 | { | |
1506 | struct udp_seq_afinfo *afinfo = PDE(inode)->data; | |
1507 | struct seq_file *seq; | |
1508 | int rc = -ENOMEM; | |
0da974f4 | 1509 | struct udp_iter_state *s = kzalloc(sizeof(*s), GFP_KERNEL); |
1da177e4 LT |
1510 | |
1511 | if (!s) | |
1512 | goto out; | |
1da177e4 LT |
1513 | s->family = afinfo->family; |
1514 | s->seq_ops.start = udp_seq_start; | |
1515 | s->seq_ops.next = udp_seq_next; | |
1516 | s->seq_ops.show = afinfo->seq_show; | |
1517 | s->seq_ops.stop = udp_seq_stop; | |
1518 | ||
1519 | rc = seq_open(file, &s->seq_ops); | |
1520 | if (rc) | |
1521 | goto out_kfree; | |
1522 | ||
1523 | seq = file->private_data; | |
1524 | seq->private = s; | |
1525 | out: | |
1526 | return rc; | |
1527 | out_kfree: | |
1528 | kfree(s); | |
1529 | goto out; | |
1530 | } | |
1531 | ||
1532 | /* ------------------------------------------------------------------------ */ | |
1533 | int udp_proc_register(struct udp_seq_afinfo *afinfo) | |
1534 | { | |
1535 | struct proc_dir_entry *p; | |
1536 | int rc = 0; | |
1537 | ||
1538 | if (!afinfo) | |
1539 | return -EINVAL; | |
1540 | afinfo->seq_fops->owner = afinfo->owner; | |
1541 | afinfo->seq_fops->open = udp_seq_open; | |
1542 | afinfo->seq_fops->read = seq_read; | |
1543 | afinfo->seq_fops->llseek = seq_lseek; | |
1544 | afinfo->seq_fops->release = seq_release_private; | |
1545 | ||
1546 | p = proc_net_fops_create(afinfo->name, S_IRUGO, afinfo->seq_fops); | |
1547 | if (p) | |
1548 | p->data = afinfo; | |
1549 | else | |
1550 | rc = -ENOMEM; | |
1551 | return rc; | |
1552 | } | |
1553 | ||
1554 | void udp_proc_unregister(struct udp_seq_afinfo *afinfo) | |
1555 | { | |
1556 | if (!afinfo) | |
1557 | return; | |
1558 | proc_net_remove(afinfo->name); | |
1559 | memset(afinfo->seq_fops, 0, sizeof(*afinfo->seq_fops)); | |
1560 | } | |
1561 | ||
1562 | /* ------------------------------------------------------------------------ */ | |
1563 | static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket) | |
1564 | { | |
1565 | struct inet_sock *inet = inet_sk(sp); | |
734ab87f AV |
1566 | __be32 dest = inet->daddr; |
1567 | __be32 src = inet->rcv_saddr; | |
1da177e4 LT |
1568 | __u16 destp = ntohs(inet->dport); |
1569 | __u16 srcp = ntohs(inet->sport); | |
1570 | ||
1571 | sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" | |
1572 | " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", | |
1573 | bucket, src, srcp, dest, destp, sp->sk_state, | |
1574 | atomic_read(&sp->sk_wmem_alloc), | |
1575 | atomic_read(&sp->sk_rmem_alloc), | |
1576 | 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), | |
1577 | atomic_read(&sp->sk_refcnt), sp); | |
1578 | } | |
1579 | ||
1580 | static int udp4_seq_show(struct seq_file *seq, void *v) | |
1581 | { | |
1582 | if (v == SEQ_START_TOKEN) | |
1583 | seq_printf(seq, "%-127s\n", | |
1584 | " sl local_address rem_address st tx_queue " | |
1585 | "rx_queue tr tm->when retrnsmt uid timeout " | |
1586 | "inode"); | |
1587 | else { | |
1588 | char tmpbuf[129]; | |
1589 | struct udp_iter_state *state = seq->private; | |
1590 | ||
1591 | udp4_format_sock(v, tmpbuf, state->bucket); | |
1592 | seq_printf(seq, "%-127s\n", tmpbuf); | |
1593 | } | |
1594 | return 0; | |
1595 | } | |
1596 | ||
1597 | /* ------------------------------------------------------------------------ */ | |
1598 | static struct file_operations udp4_seq_fops; | |
1599 | static struct udp_seq_afinfo udp4_seq_afinfo = { | |
1600 | .owner = THIS_MODULE, | |
1601 | .name = "udp", | |
1602 | .family = AF_INET, | |
1603 | .seq_show = udp4_seq_show, | |
1604 | .seq_fops = &udp4_seq_fops, | |
1605 | }; | |
1606 | ||
1607 | int __init udp4_proc_init(void) | |
1608 | { | |
1609 | return udp_proc_register(&udp4_seq_afinfo); | |
1610 | } | |
1611 | ||
1612 | void udp4_proc_exit(void) | |
1613 | { | |
1614 | udp_proc_unregister(&udp4_seq_afinfo); | |
1615 | } | |
1616 | #endif /* CONFIG_PROC_FS */ | |
1617 | ||
1618 | EXPORT_SYMBOL(udp_disconnect); | |
1619 | EXPORT_SYMBOL(udp_hash); | |
1620 | EXPORT_SYMBOL(udp_hash_lock); | |
1621 | EXPORT_SYMBOL(udp_ioctl); | |
25030a7f | 1622 | EXPORT_SYMBOL(udp_get_port); |
1da177e4 LT |
1623 | EXPORT_SYMBOL(udp_prot); |
1624 | EXPORT_SYMBOL(udp_sendmsg); | |
1625 | EXPORT_SYMBOL(udp_poll); | |
1626 | ||
1627 | #ifdef CONFIG_PROC_FS | |
1628 | EXPORT_SYMBOL(udp_proc_register); | |
1629 | EXPORT_SYMBOL(udp_proc_unregister); | |
1630 | #endif |