]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * IPv6 fragment reassembly | |
1ab1457c | 3 | * Linux INET6 implementation |
1da177e4 LT |
4 | * |
5 | * Authors: | |
1ab1457c | 6 | * Pedro Roque <[email protected]> |
1da177e4 | 7 | * |
1da177e4 LT |
8 | * Based on: net/ipv4/ip_fragment.c |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public License | |
12 | * as published by the Free Software Foundation; either version | |
13 | * 2 of the License, or (at your option) any later version. | |
14 | */ | |
15 | ||
1ab1457c YH |
16 | /* |
17 | * Fixes: | |
1da177e4 LT |
18 | * Andi Kleen Make it work with multiple hosts. |
19 | * More RFC compliance. | |
20 | * | |
21 | * Horst von Brand Add missing #include <linux/string.h> | |
22 | * Alexey Kuznetsov SMP races, threading, cleanup. | |
23 | * Patrick McHardy LRU queue of frag heads for evictor. | |
24 | * Mitsuru KANDA @USAGI Register inet6_protocol{}. | |
25 | * David Stevens and | |
26 | * YOSHIFUJI,H. @USAGI Always remove fragment header to | |
27 | * calculate ICV correctly. | |
28 | */ | |
1da177e4 LT |
29 | #include <linux/errno.h> |
30 | #include <linux/types.h> | |
31 | #include <linux/string.h> | |
32 | #include <linux/socket.h> | |
33 | #include <linux/sockios.h> | |
34 | #include <linux/jiffies.h> | |
35 | #include <linux/net.h> | |
36 | #include <linux/list.h> | |
37 | #include <linux/netdevice.h> | |
38 | #include <linux/in6.h> | |
39 | #include <linux/ipv6.h> | |
40 | #include <linux/icmpv6.h> | |
41 | #include <linux/random.h> | |
42 | #include <linux/jhash.h> | |
f61944ef | 43 | #include <linux/skbuff.h> |
5a0e3ad6 | 44 | #include <linux/slab.h> |
1da177e4 LT |
45 | |
46 | #include <net/sock.h> | |
47 | #include <net/snmp.h> | |
48 | ||
49 | #include <net/ipv6.h> | |
a11d206d | 50 | #include <net/ip6_route.h> |
1da177e4 LT |
51 | #include <net/protocol.h> |
52 | #include <net/transp_v6.h> | |
53 | #include <net/rawv6.h> | |
54 | #include <net/ndisc.h> | |
55 | #include <net/addrconf.h> | |
5ab11c98 | 56 | #include <net/inet_frag.h> |
1da177e4 | 57 | |
1da177e4 LT |
58 | struct ip6frag_skb_cb |
59 | { | |
60 | struct inet6_skb_parm h; | |
61 | int offset; | |
62 | }; | |
63 | ||
64 | #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) | |
65 | ||
66 | ||
67 | /* | |
68 | * Equivalent of ipv4 struct ipq | |
69 | */ | |
70 | ||
71 | struct frag_queue | |
72 | { | |
5ab11c98 | 73 | struct inet_frag_queue q; |
1da177e4 | 74 | |
e69a4adc | 75 | __be32 id; /* fragment id */ |
0b5ccb2e | 76 | u32 user; |
1da177e4 LT |
77 | struct in6_addr saddr; |
78 | struct in6_addr daddr; | |
79 | ||
1da177e4 | 80 | int iif; |
1da177e4 | 81 | unsigned int csum; |
1da177e4 | 82 | __u16 nhoffset; |
1da177e4 LT |
83 | }; |
84 | ||
7eb95156 | 85 | static struct inet_frags ip6_frags; |
1da177e4 | 86 | |
e5a2bb84 | 87 | int ip6_frag_nqueues(struct net *net) |
7eb95156 | 88 | { |
e5a2bb84 | 89 | return net->ipv6.frags.nqueues; |
7eb95156 | 90 | } |
1da177e4 | 91 | |
6ddc0822 | 92 | int ip6_frag_mem(struct net *net) |
7eb95156 | 93 | { |
6ddc0822 | 94 | return atomic_read(&net->ipv6.frags.mem); |
7eb95156 | 95 | } |
1da177e4 | 96 | |
f61944ef HX |
97 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
98 | struct net_device *dev); | |
99 | ||
f6596f9d ZB |
100 | /* |
101 | * callers should be careful not to use the hash value outside the ipfrag_lock | |
102 | * as doing so could race with ipfrag_hash_rnd being recalculated. | |
103 | */ | |
93c8b90f IJ |
104 | unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr, |
105 | const struct in6_addr *daddr, u32 rnd) | |
1da177e4 | 106 | { |
82a39eb6 JK |
107 | u32 c; |
108 | ||
109 | c = jhash_3words((__force u32)saddr->s6_addr32[0], | |
110 | (__force u32)saddr->s6_addr32[1], | |
111 | (__force u32)saddr->s6_addr32[2], | |
112 | rnd); | |
113 | ||
114 | c = jhash_3words((__force u32)saddr->s6_addr32[3], | |
115 | (__force u32)daddr->s6_addr32[0], | |
116 | (__force u32)daddr->s6_addr32[1], | |
117 | c); | |
118 | ||
119 | c = jhash_3words((__force u32)daddr->s6_addr32[2], | |
120 | (__force u32)daddr->s6_addr32[3], | |
121 | (__force u32)id, | |
122 | c); | |
1da177e4 | 123 | |
7eb95156 | 124 | return c & (INETFRAGS_HASHSZ - 1); |
1da177e4 | 125 | } |
93c8b90f | 126 | EXPORT_SYMBOL_GPL(inet6_hash_frag); |
1da177e4 | 127 | |
321a3a99 | 128 | static unsigned int ip6_hashfn(struct inet_frag_queue *q) |
1da177e4 | 129 | { |
321a3a99 | 130 | struct frag_queue *fq; |
1da177e4 | 131 | |
321a3a99 | 132 | fq = container_of(q, struct frag_queue, q); |
93c8b90f | 133 | return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr, ip6_frags.rnd); |
1da177e4 LT |
134 | } |
135 | ||
abd6523d PE |
136 | int ip6_frag_match(struct inet_frag_queue *q, void *a) |
137 | { | |
138 | struct frag_queue *fq; | |
139 | struct ip6_create_arg *arg = a; | |
140 | ||
141 | fq = container_of(q, struct frag_queue, q); | |
0b5ccb2e | 142 | return (fq->id == arg->id && fq->user == arg->user && |
abd6523d PE |
143 | ipv6_addr_equal(&fq->saddr, arg->src) && |
144 | ipv6_addr_equal(&fq->daddr, arg->dst)); | |
145 | } | |
146 | EXPORT_SYMBOL(ip6_frag_match); | |
147 | ||
c6fda282 | 148 | void ip6_frag_init(struct inet_frag_queue *q, void *a) |
1da177e4 | 149 | { |
c6fda282 PE |
150 | struct frag_queue *fq = container_of(q, struct frag_queue, q); |
151 | struct ip6_create_arg *arg = a; | |
152 | ||
153 | fq->id = arg->id; | |
0b5ccb2e | 154 | fq->user = arg->user; |
c6fda282 PE |
155 | ipv6_addr_copy(&fq->saddr, arg->src); |
156 | ipv6_addr_copy(&fq->daddr, arg->dst); | |
1da177e4 | 157 | } |
c6fda282 | 158 | EXPORT_SYMBOL(ip6_frag_init); |
1da177e4 | 159 | |
1da177e4 LT |
160 | /* Destruction primitives. */ |
161 | ||
4b6cb5d8 | 162 | static __inline__ void fq_put(struct frag_queue *fq) |
1da177e4 | 163 | { |
762cc408 | 164 | inet_frag_put(&fq->q, &ip6_frags); |
1da177e4 LT |
165 | } |
166 | ||
167 | /* Kill fq entry. It is not destroyed immediately, | |
168 | * because caller (and someone more) holds reference count. | |
169 | */ | |
170 | static __inline__ void fq_kill(struct frag_queue *fq) | |
171 | { | |
277e650d | 172 | inet_frag_kill(&fq->q, &ip6_frags); |
1da177e4 LT |
173 | } |
174 | ||
6ddc0822 | 175 | static void ip6_evictor(struct net *net, struct inet6_dev *idev) |
1da177e4 | 176 | { |
8e7999c4 PE |
177 | int evicted; |
178 | ||
6ddc0822 | 179 | evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags); |
8e7999c4 | 180 | if (evicted) |
821d5777 | 181 | IP6_ADD_STATS_BH(net, idev, IPSTATS_MIB_REASMFAILS, evicted); |
1da177e4 LT |
182 | } |
183 | ||
184 | static void ip6_frag_expire(unsigned long data) | |
185 | { | |
e521db9d | 186 | struct frag_queue *fq; |
a11d206d | 187 | struct net_device *dev = NULL; |
4ac2ccd0 | 188 | struct net *net; |
1da177e4 | 189 | |
e521db9d PE |
190 | fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); |
191 | ||
5ab11c98 | 192 | spin_lock(&fq->q.lock); |
1da177e4 | 193 | |
bc578a54 | 194 | if (fq->q.last_in & INET_FRAG_COMPLETE) |
1da177e4 LT |
195 | goto out; |
196 | ||
197 | fq_kill(fq); | |
198 | ||
4ac2ccd0 | 199 | net = container_of(fq->q.net, struct net, ipv6.frags); |
69df9d59 ED |
200 | rcu_read_lock(); |
201 | dev = dev_get_by_index_rcu(net, fq->iif); | |
a11d206d | 202 | if (!dev) |
69df9d59 | 203 | goto out_rcu_unlock; |
a11d206d | 204 | |
483a47d2 DL |
205 | IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); |
206 | IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); | |
1da177e4 | 207 | |
78c784c4 | 208 | /* Don't send error if the first segment did not arrive. */ |
bc578a54 | 209 | if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments) |
69df9d59 | 210 | goto out_rcu_unlock; |
78c784c4 | 211 | |
78c784c4 IO |
212 | /* |
213 | But use as source device on which LAST ARRIVED | |
214 | segment was received. And do not use fq->dev | |
215 | pointer directly, device might already disappeared. | |
216 | */ | |
5ab11c98 | 217 | fq->q.fragments->dev = dev; |
3ffe533c | 218 | icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0); |
69df9d59 ED |
219 | out_rcu_unlock: |
220 | rcu_read_unlock(); | |
1da177e4 | 221 | out: |
5ab11c98 | 222 | spin_unlock(&fq->q.lock); |
4b6cb5d8 | 223 | fq_put(fq); |
1da177e4 LT |
224 | } |
225 | ||
abd6523d | 226 | static __inline__ struct frag_queue * |
9546377c | 227 | fq_find(struct net *net, __be32 id, struct in6_addr *src, struct in6_addr *dst) |
1da177e4 | 228 | { |
c6fda282 PE |
229 | struct inet_frag_queue *q; |
230 | struct ip6_create_arg arg; | |
abd6523d | 231 | unsigned int hash; |
1da177e4 | 232 | |
c6fda282 | 233 | arg.id = id; |
0b5ccb2e | 234 | arg.user = IP6_DEFRAG_LOCAL_DELIVER; |
c6fda282 PE |
235 | arg.src = src; |
236 | arg.dst = dst; | |
9a375803 PE |
237 | |
238 | read_lock(&ip6_frags.lock); | |
93c8b90f | 239 | hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd); |
1da177e4 | 240 | |
ac18e750 | 241 | q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); |
c6fda282 | 242 | if (q == NULL) |
9546377c | 243 | return NULL; |
1da177e4 | 244 | |
c6fda282 | 245 | return container_of(q, struct frag_queue, q); |
1da177e4 LT |
246 | } |
247 | ||
f61944ef | 248 | static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, |
1da177e4 LT |
249 | struct frag_hdr *fhdr, int nhoff) |
250 | { | |
251 | struct sk_buff *prev, *next; | |
f61944ef | 252 | struct net_device *dev; |
1da177e4 | 253 | int offset, end; |
adf30907 | 254 | struct net *net = dev_net(skb_dst(skb)->dev); |
1da177e4 | 255 | |
bc578a54 | 256 | if (fq->q.last_in & INET_FRAG_COMPLETE) |
1da177e4 LT |
257 | goto err; |
258 | ||
259 | offset = ntohs(fhdr->frag_off) & ~0x7; | |
0660e03f ACM |
260 | end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - |
261 | ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); | |
1da177e4 LT |
262 | |
263 | if ((unsigned int)end > IPV6_MAXPLEN) { | |
adf30907 | 264 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
a11d206d | 265 | IPSTATS_MIB_INHDRERRORS); |
d56f90a7 ACM |
266 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
267 | ((u8 *)&fhdr->frag_off - | |
268 | skb_network_header(skb))); | |
f61944ef | 269 | return -1; |
1da177e4 LT |
270 | } |
271 | ||
d56f90a7 ACM |
272 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
273 | const unsigned char *nh = skb_network_header(skb); | |
1ab1457c | 274 | skb->csum = csum_sub(skb->csum, |
d56f90a7 ACM |
275 | csum_partial(nh, (u8 *)(fhdr + 1) - nh, |
276 | 0)); | |
277 | } | |
1da177e4 LT |
278 | |
279 | /* Is this the final fragment? */ | |
280 | if (!(fhdr->frag_off & htons(IP6_MF))) { | |
281 | /* If we already have some bits beyond end | |
282 | * or have different end, the segment is corrupted. | |
283 | */ | |
5ab11c98 | 284 | if (end < fq->q.len || |
bc578a54 | 285 | ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) |
1da177e4 | 286 | goto err; |
bc578a54 | 287 | fq->q.last_in |= INET_FRAG_LAST_IN; |
5ab11c98 | 288 | fq->q.len = end; |
1da177e4 LT |
289 | } else { |
290 | /* Check if the fragment is rounded to 8 bytes. | |
291 | * Required by the RFC. | |
292 | */ | |
293 | if (end & 0x7) { | |
294 | /* RFC2460 says always send parameter problem in | |
295 | * this case. -DaveM | |
296 | */ | |
adf30907 | 297 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), |
a11d206d | 298 | IPSTATS_MIB_INHDRERRORS); |
1ab1457c | 299 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
1da177e4 | 300 | offsetof(struct ipv6hdr, payload_len)); |
f61944ef | 301 | return -1; |
1da177e4 | 302 | } |
5ab11c98 | 303 | if (end > fq->q.len) { |
1da177e4 | 304 | /* Some bits beyond end -> corruption. */ |
bc578a54 | 305 | if (fq->q.last_in & INET_FRAG_LAST_IN) |
1da177e4 | 306 | goto err; |
5ab11c98 | 307 | fq->q.len = end; |
1da177e4 LT |
308 | } |
309 | } | |
310 | ||
311 | if (end == offset) | |
312 | goto err; | |
313 | ||
314 | /* Point into the IP datagram 'data' part. */ | |
315 | if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) | |
316 | goto err; | |
1ab1457c | 317 | |
42ca89c1 SH |
318 | if (pskb_trim_rcsum(skb, end - offset)) |
319 | goto err; | |
1da177e4 LT |
320 | |
321 | /* Find out which fragments are in front and at the back of us | |
322 | * in the chain of fragments so far. We must know where to put | |
323 | * this fragment, right? | |
324 | */ | |
d6bebca9 CG |
325 | prev = fq->q.fragments_tail; |
326 | if (!prev || FRAG6_CB(prev)->offset < offset) { | |
327 | next = NULL; | |
328 | goto found; | |
329 | } | |
1da177e4 | 330 | prev = NULL; |
5ab11c98 | 331 | for(next = fq->q.fragments; next != NULL; next = next->next) { |
1da177e4 LT |
332 | if (FRAG6_CB(next)->offset >= offset) |
333 | break; /* bingo! */ | |
334 | prev = next; | |
335 | } | |
336 | ||
d6bebca9 | 337 | found: |
70789d70 ND |
338 | /* RFC5722, Section 4: |
339 | * When reassembling an IPv6 datagram, if | |
340 | * one or more its constituent fragments is determined to be an | |
341 | * overlapping fragment, the entire datagram (and any constituent | |
342 | * fragments, including those not yet received) MUST be silently | |
343 | * discarded. | |
1da177e4 | 344 | */ |
1da177e4 | 345 | |
70789d70 ND |
346 | /* Check for overlap with preceding fragment. */ |
347 | if (prev && | |
f4642141 | 348 | (FRAG6_CB(prev)->offset + prev->len) > offset) |
70789d70 | 349 | goto discard_fq; |
1da177e4 | 350 | |
70789d70 ND |
351 | /* Look for overlap with succeeding segment. */ |
352 | if (next && FRAG6_CB(next)->offset < end) | |
353 | goto discard_fq; | |
1da177e4 LT |
354 | |
355 | FRAG6_CB(skb)->offset = offset; | |
356 | ||
357 | /* Insert this fragment in the chain of fragments. */ | |
358 | skb->next = next; | |
d6bebca9 CG |
359 | if (!next) |
360 | fq->q.fragments_tail = skb; | |
1da177e4 LT |
361 | if (prev) |
362 | prev->next = skb; | |
363 | else | |
5ab11c98 | 364 | fq->q.fragments = skb; |
1da177e4 | 365 | |
f61944ef HX |
366 | dev = skb->dev; |
367 | if (dev) { | |
368 | fq->iif = dev->ifindex; | |
369 | skb->dev = NULL; | |
370 | } | |
5ab11c98 PE |
371 | fq->q.stamp = skb->tstamp; |
372 | fq->q.meat += skb->len; | |
6ddc0822 | 373 | atomic_add(skb->truesize, &fq->q.net->mem); |
1da177e4 LT |
374 | |
375 | /* The first fragment. | |
376 | * nhoffset is obtained from the first fragment, of course. | |
377 | */ | |
378 | if (offset == 0) { | |
379 | fq->nhoffset = nhoff; | |
bc578a54 | 380 | fq->q.last_in |= INET_FRAG_FIRST_IN; |
1da177e4 | 381 | } |
f61944ef | 382 | |
bc578a54 JP |
383 | if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
384 | fq->q.meat == fq->q.len) | |
f61944ef HX |
385 | return ip6_frag_reasm(fq, prev, dev); |
386 | ||
7eb95156 | 387 | write_lock(&ip6_frags.lock); |
3140c25c | 388 | list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); |
7eb95156 | 389 | write_unlock(&ip6_frags.lock); |
f61944ef | 390 | return -1; |
1da177e4 | 391 | |
70789d70 ND |
392 | discard_fq: |
393 | fq_kill(fq); | |
1da177e4 | 394 | err: |
adf30907 | 395 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), |
3bd653c8 | 396 | IPSTATS_MIB_REASMFAILS); |
1da177e4 | 397 | kfree_skb(skb); |
f61944ef | 398 | return -1; |
1da177e4 LT |
399 | } |
400 | ||
401 | /* | |
402 | * Check if this packet is complete. | |
403 | * Returns NULL on failure by any reason, and pointer | |
404 | * to current nexthdr field in reassembled frame. | |
405 | * | |
406 | * It is called with locked fq, and caller must check that | |
407 | * queue is eligible for reassembly i.e. it is not COMPLETE, | |
408 | * the last and the first frames arrived and all the bits are here. | |
409 | */ | |
f61944ef | 410 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
1da177e4 LT |
411 | struct net_device *dev) |
412 | { | |
2bad35b7 | 413 | struct net *net = container_of(fq->q.net, struct net, ipv6.frags); |
5ab11c98 | 414 | struct sk_buff *fp, *head = fq->q.fragments; |
1da177e4 LT |
415 | int payload_len; |
416 | unsigned int nhoff; | |
417 | ||
418 | fq_kill(fq); | |
419 | ||
f61944ef HX |
420 | /* Make the one we just received the head. */ |
421 | if (prev) { | |
422 | head = prev->next; | |
423 | fp = skb_clone(head, GFP_ATOMIC); | |
424 | ||
425 | if (!fp) | |
426 | goto out_oom; | |
427 | ||
428 | fp->next = head->next; | |
d6bebca9 CG |
429 | if (!fp->next) |
430 | fq->q.fragments_tail = fp; | |
f61944ef HX |
431 | prev->next = fp; |
432 | ||
5ab11c98 PE |
433 | skb_morph(head, fq->q.fragments); |
434 | head->next = fq->q.fragments->next; | |
f61944ef | 435 | |
5ab11c98 PE |
436 | kfree_skb(fq->q.fragments); |
437 | fq->q.fragments = head; | |
f61944ef HX |
438 | } |
439 | ||
547b792c IJ |
440 | WARN_ON(head == NULL); |
441 | WARN_ON(FRAG6_CB(head)->offset != 0); | |
1da177e4 LT |
442 | |
443 | /* Unfragmented part is taken from the first segment. */ | |
d56f90a7 | 444 | payload_len = ((head->data - skb_network_header(head)) - |
5ab11c98 | 445 | sizeof(struct ipv6hdr) + fq->q.len - |
d56f90a7 | 446 | sizeof(struct frag_hdr)); |
1da177e4 LT |
447 | if (payload_len > IPV6_MAXPLEN) |
448 | goto out_oversize; | |
449 | ||
450 | /* Head of list must not be cloned. */ | |
451 | if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) | |
452 | goto out_oom; | |
453 | ||
454 | /* If the first fragment is fragmented itself, we split | |
455 | * it to two chunks: the first with data and paged part | |
456 | * and the second, holding only fragments. */ | |
21dc3301 | 457 | if (skb_has_frag_list(head)) { |
1da177e4 LT |
458 | struct sk_buff *clone; |
459 | int i, plen = 0; | |
460 | ||
461 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) | |
462 | goto out_oom; | |
463 | clone->next = head->next; | |
464 | head->next = clone; | |
465 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | |
4d9092bb | 466 | skb_frag_list_init(head); |
1da177e4 LT |
467 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) |
468 | plen += skb_shinfo(head)->frags[i].size; | |
469 | clone->len = clone->data_len = head->data_len - plen; | |
470 | head->data_len -= clone->len; | |
471 | head->len -= clone->len; | |
472 | clone->csum = 0; | |
473 | clone->ip_summed = head->ip_summed; | |
6ddc0822 | 474 | atomic_add(clone->truesize, &fq->q.net->mem); |
1da177e4 LT |
475 | } |
476 | ||
477 | /* We have to remove fragment header from datagram and to relocate | |
478 | * header in order to calculate ICV correctly. */ | |
479 | nhoff = fq->nhoffset; | |
b0e380b1 | 480 | skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; |
1ab1457c | 481 | memmove(head->head + sizeof(struct frag_hdr), head->head, |
1da177e4 | 482 | (head->data - head->head) - sizeof(struct frag_hdr)); |
b0e380b1 ACM |
483 | head->mac_header += sizeof(struct frag_hdr); |
484 | head->network_header += sizeof(struct frag_hdr); | |
1da177e4 LT |
485 | |
486 | skb_shinfo(head)->frag_list = head->next; | |
badff6d0 | 487 | skb_reset_transport_header(head); |
d56f90a7 | 488 | skb_push(head, head->data - skb_network_header(head)); |
1da177e4 LT |
489 | |
490 | for (fp=head->next; fp; fp = fp->next) { | |
491 | head->data_len += fp->len; | |
492 | head->len += fp->len; | |
493 | if (head->ip_summed != fp->ip_summed) | |
494 | head->ip_summed = CHECKSUM_NONE; | |
84fa7933 | 495 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
1da177e4 LT |
496 | head->csum = csum_add(head->csum, fp->csum); |
497 | head->truesize += fp->truesize; | |
1da177e4 | 498 | } |
d27f9b35 | 499 | atomic_sub(head->truesize, &fq->q.net->mem); |
1da177e4 LT |
500 | |
501 | head->next = NULL; | |
502 | head->dev = dev; | |
5ab11c98 | 503 | head->tstamp = fq->q.stamp; |
0660e03f | 504 | ipv6_hdr(head)->payload_len = htons(payload_len); |
951dbc8a | 505 | IP6CB(head)->nhoff = nhoff; |
1da177e4 | 506 | |
1da177e4 | 507 | /* Yes, and fold redundant checksum back. 8) */ |
84fa7933 | 508 | if (head->ip_summed == CHECKSUM_COMPLETE) |
d56f90a7 | 509 | head->csum = csum_partial(skb_network_header(head), |
cfe1fc77 | 510 | skb_network_header_len(head), |
d56f90a7 | 511 | head->csum); |
1da177e4 | 512 | |
a11d206d | 513 | rcu_read_lock(); |
2bad35b7 | 514 | IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); |
a11d206d | 515 | rcu_read_unlock(); |
5ab11c98 | 516 | fq->q.fragments = NULL; |
d6bebca9 | 517 | fq->q.fragments_tail = NULL; |
1da177e4 LT |
518 | return 1; |
519 | ||
520 | out_oversize: | |
521 | if (net_ratelimit()) | |
522 | printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); | |
523 | goto out_fail; | |
524 | out_oom: | |
525 | if (net_ratelimit()) | |
526 | printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); | |
527 | out_fail: | |
a11d206d | 528 | rcu_read_lock(); |
2bad35b7 | 529 | IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); |
a11d206d | 530 | rcu_read_unlock(); |
1da177e4 LT |
531 | return -1; |
532 | } | |
533 | ||
e5bbef20 | 534 | static int ipv6_frag_rcv(struct sk_buff *skb) |
1da177e4 | 535 | { |
1da177e4 LT |
536 | struct frag_hdr *fhdr; |
537 | struct frag_queue *fq; | |
0660e03f | 538 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
adf30907 | 539 | struct net *net = dev_net(skb_dst(skb)->dev); |
1da177e4 | 540 | |
adf30907 | 541 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); |
1da177e4 LT |
542 | |
543 | /* Jumbo payload inhibits frag. header */ | |
98b3377c DL |
544 | if (hdr->payload_len==0) |
545 | goto fail_hdr; | |
546 | ||
ea2ae17d | 547 | if (!pskb_may_pull(skb, (skb_transport_offset(skb) + |
98b3377c DL |
548 | sizeof(struct frag_hdr)))) |
549 | goto fail_hdr; | |
1da177e4 | 550 | |
0660e03f | 551 | hdr = ipv6_hdr(skb); |
9c70220b | 552 | fhdr = (struct frag_hdr *)skb_transport_header(skb); |
1da177e4 LT |
553 | |
554 | if (!(fhdr->frag_off & htons(0xFFF9))) { | |
555 | /* It is not a fragmented frame */ | |
b0e380b1 | 556 | skb->transport_header += sizeof(struct frag_hdr); |
483a47d2 | 557 | IP6_INC_STATS_BH(net, |
adf30907 | 558 | ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); |
1da177e4 | 559 | |
d56f90a7 | 560 | IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); |
1da177e4 LT |
561 | return 1; |
562 | } | |
563 | ||
e31e0bdc | 564 | if (atomic_read(&net->ipv6.frags.mem) > net->ipv6.frags.high_thresh) |
adf30907 | 565 | ip6_evictor(net, ip6_dst_idev(skb_dst(skb))); |
1da177e4 | 566 | |
9546377c SW |
567 | fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr); |
568 | if (fq != NULL) { | |
f61944ef | 569 | int ret; |
1da177e4 | 570 | |
5ab11c98 | 571 | spin_lock(&fq->q.lock); |
1da177e4 | 572 | |
f61944ef | 573 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); |
1da177e4 | 574 | |
5ab11c98 | 575 | spin_unlock(&fq->q.lock); |
4b6cb5d8 | 576 | fq_put(fq); |
1da177e4 LT |
577 | return ret; |
578 | } | |
579 | ||
adf30907 | 580 | IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); |
1da177e4 LT |
581 | kfree_skb(skb); |
582 | return -1; | |
98b3377c DL |
583 | |
584 | fail_hdr: | |
adf30907 | 585 | IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); |
98b3377c DL |
586 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); |
587 | return -1; | |
1da177e4 LT |
588 | } |
589 | ||
41135cc8 | 590 | static const struct inet6_protocol frag_protocol = |
1da177e4 LT |
591 | { |
592 | .handler = ipv6_frag_rcv, | |
593 | .flags = INET6_PROTO_NOPOLICY, | |
594 | }; | |
595 | ||
8d8354d2 | 596 | #ifdef CONFIG_SYSCTL |
0a64b4b8 | 597 | static struct ctl_table ip6_frags_ns_ctl_table[] = { |
8d8354d2 | 598 | { |
8d8354d2 | 599 | .procname = "ip6frag_high_thresh", |
e31e0bdc | 600 | .data = &init_net.ipv6.frags.high_thresh, |
8d8354d2 PE |
601 | .maxlen = sizeof(int), |
602 | .mode = 0644, | |
6d9f239a | 603 | .proc_handler = proc_dointvec |
8d8354d2 PE |
604 | }, |
605 | { | |
8d8354d2 | 606 | .procname = "ip6frag_low_thresh", |
e31e0bdc | 607 | .data = &init_net.ipv6.frags.low_thresh, |
8d8354d2 PE |
608 | .maxlen = sizeof(int), |
609 | .mode = 0644, | |
6d9f239a | 610 | .proc_handler = proc_dointvec |
8d8354d2 PE |
611 | }, |
612 | { | |
8d8354d2 | 613 | .procname = "ip6frag_time", |
b2fd5321 | 614 | .data = &init_net.ipv6.frags.timeout, |
8d8354d2 PE |
615 | .maxlen = sizeof(int), |
616 | .mode = 0644, | |
6d9f239a | 617 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 | 618 | }, |
7d291ebb PE |
619 | { } |
620 | }; | |
621 | ||
622 | static struct ctl_table ip6_frags_ctl_table[] = { | |
8d8354d2 | 623 | { |
8d8354d2 | 624 | .procname = "ip6frag_secret_interval", |
3b4bc4a2 | 625 | .data = &ip6_frags.secret_interval, |
8d8354d2 PE |
626 | .maxlen = sizeof(int), |
627 | .mode = 0644, | |
6d9f239a | 628 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 PE |
629 | }, |
630 | { } | |
631 | }; | |
632 | ||
2c8c1e72 | 633 | static int __net_init ip6_frags_ns_sysctl_register(struct net *net) |
8d8354d2 | 634 | { |
e4a2d5c2 | 635 | struct ctl_table *table; |
8d8354d2 PE |
636 | struct ctl_table_header *hdr; |
637 | ||
0a64b4b8 | 638 | table = ip6_frags_ns_ctl_table; |
09ad9bc7 | 639 | if (!net_eq(net, &init_net)) { |
0a64b4b8 | 640 | table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL); |
e4a2d5c2 PE |
641 | if (table == NULL) |
642 | goto err_alloc; | |
643 | ||
e31e0bdc PE |
644 | table[0].data = &net->ipv6.frags.high_thresh; |
645 | table[1].data = &net->ipv6.frags.low_thresh; | |
b2fd5321 | 646 | table[2].data = &net->ipv6.frags.timeout; |
e4a2d5c2 PE |
647 | } |
648 | ||
649 | hdr = register_net_sysctl_table(net, net_ipv6_ctl_path, table); | |
650 | if (hdr == NULL) | |
651 | goto err_reg; | |
652 | ||
653 | net->ipv6.sysctl.frags_hdr = hdr; | |
654 | return 0; | |
655 | ||
656 | err_reg: | |
09ad9bc7 | 657 | if (!net_eq(net, &init_net)) |
e4a2d5c2 PE |
658 | kfree(table); |
659 | err_alloc: | |
660 | return -ENOMEM; | |
661 | } | |
662 | ||
2c8c1e72 | 663 | static void __net_exit ip6_frags_ns_sysctl_unregister(struct net *net) |
e4a2d5c2 PE |
664 | { |
665 | struct ctl_table *table; | |
666 | ||
667 | table = net->ipv6.sysctl.frags_hdr->ctl_table_arg; | |
668 | unregister_net_sysctl_table(net->ipv6.sysctl.frags_hdr); | |
3705e11a YH |
669 | if (!net_eq(net, &init_net)) |
670 | kfree(table); | |
8d8354d2 | 671 | } |
7d291ebb PE |
672 | |
673 | static struct ctl_table_header *ip6_ctl_header; | |
674 | ||
675 | static int ip6_frags_sysctl_register(void) | |
676 | { | |
677 | ip6_ctl_header = register_net_sysctl_rotable(net_ipv6_ctl_path, | |
678 | ip6_frags_ctl_table); | |
679 | return ip6_ctl_header == NULL ? -ENOMEM : 0; | |
680 | } | |
681 | ||
682 | static void ip6_frags_sysctl_unregister(void) | |
683 | { | |
684 | unregister_net_sysctl_table(ip6_ctl_header); | |
685 | } | |
8d8354d2 | 686 | #else |
0a64b4b8 | 687 | static inline int ip6_frags_ns_sysctl_register(struct net *net) |
e71e0349 | 688 | { |
8d8354d2 PE |
689 | return 0; |
690 | } | |
e4a2d5c2 | 691 | |
0a64b4b8 | 692 | static inline void ip6_frags_ns_sysctl_unregister(struct net *net) |
e4a2d5c2 PE |
693 | { |
694 | } | |
7d291ebb PE |
695 | |
696 | static inline int ip6_frags_sysctl_register(void) | |
697 | { | |
698 | return 0; | |
699 | } | |
700 | ||
701 | static inline void ip6_frags_sysctl_unregister(void) | |
702 | { | |
703 | } | |
8d8354d2 | 704 | #endif |
7d460db9 | 705 | |
2c8c1e72 | 706 | static int __net_init ipv6_frags_init_net(struct net *net) |
8d8354d2 | 707 | { |
7c070aa9 SW |
708 | net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH; |
709 | net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH; | |
b2fd5321 | 710 | net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT; |
8d8354d2 | 711 | |
e5a2bb84 PE |
712 | inet_frags_init_net(&net->ipv6.frags); |
713 | ||
0a64b4b8 | 714 | return ip6_frags_ns_sysctl_register(net); |
e71e0349 DL |
715 | } |
716 | ||
2c8c1e72 | 717 | static void __net_exit ipv6_frags_exit_net(struct net *net) |
81566e83 | 718 | { |
0a64b4b8 | 719 | ip6_frags_ns_sysctl_unregister(net); |
81566e83 PE |
720 | inet_frags_exit_net(&net->ipv6.frags, &ip6_frags); |
721 | } | |
722 | ||
723 | static struct pernet_operations ip6_frags_ops = { | |
724 | .init = ipv6_frags_init_net, | |
725 | .exit = ipv6_frags_exit_net, | |
726 | }; | |
727 | ||
853cbbaa | 728 | int __init ipv6_frag_init(void) |
1da177e4 | 729 | { |
853cbbaa | 730 | int ret; |
1da177e4 | 731 | |
853cbbaa DL |
732 | ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); |
733 | if (ret) | |
734 | goto out; | |
e71e0349 | 735 | |
7d291ebb PE |
736 | ret = ip6_frags_sysctl_register(); |
737 | if (ret) | |
738 | goto err_sysctl; | |
739 | ||
0002c630 PE |
740 | ret = register_pernet_subsys(&ip6_frags_ops); |
741 | if (ret) | |
742 | goto err_pernet; | |
8d8354d2 | 743 | |
321a3a99 | 744 | ip6_frags.hashfn = ip6_hashfn; |
c6fda282 | 745 | ip6_frags.constructor = ip6_frag_init; |
c9547709 | 746 | ip6_frags.destructor = NULL; |
1e4b8287 PE |
747 | ip6_frags.skb_free = NULL; |
748 | ip6_frags.qsize = sizeof(struct frag_queue); | |
abd6523d | 749 | ip6_frags.match = ip6_frag_match; |
e521db9d | 750 | ip6_frags.frag_expire = ip6_frag_expire; |
3b4bc4a2 | 751 | ip6_frags.secret_interval = 10 * 60 * HZ; |
7eb95156 | 752 | inet_frags_init(&ip6_frags); |
853cbbaa DL |
753 | out: |
754 | return ret; | |
0002c630 PE |
755 | |
756 | err_pernet: | |
7d291ebb PE |
757 | ip6_frags_sysctl_unregister(); |
758 | err_sysctl: | |
0002c630 PE |
759 | inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); |
760 | goto out; | |
853cbbaa DL |
761 | } |
762 | ||
763 | void ipv6_frag_exit(void) | |
764 | { | |
765 | inet_frags_fini(&ip6_frags); | |
7d291ebb | 766 | ip6_frags_sysctl_unregister(); |
81566e83 | 767 | unregister_pernet_subsys(&ip6_frags_ops); |
853cbbaa | 768 | inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); |
1da177e4 | 769 | } |