]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * IPv6 fragment reassembly | |
1ab1457c | 3 | * Linux INET6 implementation |
1da177e4 LT |
4 | * |
5 | * Authors: | |
1ab1457c | 6 | * Pedro Roque <[email protected]> |
1da177e4 LT |
7 | * |
8 | * $Id: reassembly.c,v 1.26 2001/03/07 22:00:57 davem Exp $ | |
9 | * | |
10 | * Based on: net/ipv4/ip_fragment.c | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or | |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | */ | |
17 | ||
1ab1457c YH |
18 | /* |
19 | * Fixes: | |
1da177e4 LT |
20 | * Andi Kleen Make it work with multiple hosts. |
21 | * More RFC compliance. | |
22 | * | |
23 | * Horst von Brand Add missing #include <linux/string.h> | |
24 | * Alexey Kuznetsov SMP races, threading, cleanup. | |
25 | * Patrick McHardy LRU queue of frag heads for evictor. | |
26 | * Mitsuru KANDA @USAGI Register inet6_protocol{}. | |
27 | * David Stevens and | |
28 | * YOSHIFUJI,H. @USAGI Always remove fragment header to | |
29 | * calculate ICV correctly. | |
30 | */ | |
1da177e4 LT |
31 | #include <linux/errno.h> |
32 | #include <linux/types.h> | |
33 | #include <linux/string.h> | |
34 | #include <linux/socket.h> | |
35 | #include <linux/sockios.h> | |
36 | #include <linux/jiffies.h> | |
37 | #include <linux/net.h> | |
38 | #include <linux/list.h> | |
39 | #include <linux/netdevice.h> | |
40 | #include <linux/in6.h> | |
41 | #include <linux/ipv6.h> | |
42 | #include <linux/icmpv6.h> | |
43 | #include <linux/random.h> | |
44 | #include <linux/jhash.h> | |
f61944ef | 45 | #include <linux/skbuff.h> |
1da177e4 LT |
46 | |
47 | #include <net/sock.h> | |
48 | #include <net/snmp.h> | |
49 | ||
50 | #include <net/ipv6.h> | |
a11d206d | 51 | #include <net/ip6_route.h> |
1da177e4 LT |
52 | #include <net/protocol.h> |
53 | #include <net/transp_v6.h> | |
54 | #include <net/rawv6.h> | |
55 | #include <net/ndisc.h> | |
56 | #include <net/addrconf.h> | |
5ab11c98 | 57 | #include <net/inet_frag.h> |
1da177e4 | 58 | |
1da177e4 LT |
59 | struct ip6frag_skb_cb |
60 | { | |
61 | struct inet6_skb_parm h; | |
62 | int offset; | |
63 | }; | |
64 | ||
65 | #define FRAG6_CB(skb) ((struct ip6frag_skb_cb*)((skb)->cb)) | |
66 | ||
67 | ||
68 | /* | |
69 | * Equivalent of ipv4 struct ipq | |
70 | */ | |
71 | ||
72 | struct frag_queue | |
73 | { | |
5ab11c98 | 74 | struct inet_frag_queue q; |
1da177e4 | 75 | |
e69a4adc | 76 | __be32 id; /* fragment id */ |
1da177e4 LT |
77 | struct in6_addr saddr; |
78 | struct in6_addr daddr; | |
79 | ||
1da177e4 | 80 | int iif; |
1da177e4 | 81 | unsigned int csum; |
1da177e4 | 82 | __u16 nhoffset; |
1da177e4 LT |
83 | }; |
84 | ||
7eb95156 | 85 | static struct inet_frags ip6_frags; |
1da177e4 | 86 | |
7eb95156 PE |
87 | int ip6_frag_nqueues(void) |
88 | { | |
89 | return ip6_frags.nqueues; | |
90 | } | |
1da177e4 | 91 | |
7eb95156 PE |
92 | int ip6_frag_mem(void) |
93 | { | |
94 | return atomic_read(&ip6_frags.mem); | |
95 | } | |
1da177e4 | 96 | |
f61944ef HX |
97 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
98 | struct net_device *dev); | |
99 | ||
f6596f9d ZB |
100 | /* |
101 | * callers should be careful not to use the hash value outside the ipfrag_lock | |
102 | * as doing so could race with ipfrag_hash_rnd being recalculated. | |
103 | */ | |
e69a4adc | 104 | static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, |
1da177e4 LT |
105 | struct in6_addr *daddr) |
106 | { | |
107 | u32 a, b, c; | |
108 | ||
e69a4adc AV |
109 | a = (__force u32)saddr->s6_addr32[0]; |
110 | b = (__force u32)saddr->s6_addr32[1]; | |
111 | c = (__force u32)saddr->s6_addr32[2]; | |
1da177e4 LT |
112 | |
113 | a += JHASH_GOLDEN_RATIO; | |
114 | b += JHASH_GOLDEN_RATIO; | |
7eb95156 | 115 | c += ip6_frags.rnd; |
1da177e4 LT |
116 | __jhash_mix(a, b, c); |
117 | ||
e69a4adc AV |
118 | a += (__force u32)saddr->s6_addr32[3]; |
119 | b += (__force u32)daddr->s6_addr32[0]; | |
120 | c += (__force u32)daddr->s6_addr32[1]; | |
1da177e4 LT |
121 | __jhash_mix(a, b, c); |
122 | ||
e69a4adc AV |
123 | a += (__force u32)daddr->s6_addr32[2]; |
124 | b += (__force u32)daddr->s6_addr32[3]; | |
125 | c += (__force u32)id; | |
1da177e4 LT |
126 | __jhash_mix(a, b, c); |
127 | ||
7eb95156 | 128 | return c & (INETFRAGS_HASHSZ - 1); |
1da177e4 LT |
129 | } |
130 | ||
321a3a99 | 131 | static unsigned int ip6_hashfn(struct inet_frag_queue *q) |
1da177e4 | 132 | { |
321a3a99 | 133 | struct frag_queue *fq; |
1da177e4 | 134 | |
321a3a99 PE |
135 | fq = container_of(q, struct frag_queue, q); |
136 | return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr); | |
1da177e4 LT |
137 | } |
138 | ||
abd6523d PE |
139 | int ip6_frag_match(struct inet_frag_queue *q, void *a) |
140 | { | |
141 | struct frag_queue *fq; | |
142 | struct ip6_create_arg *arg = a; | |
143 | ||
144 | fq = container_of(q, struct frag_queue, q); | |
145 | return (fq->id == arg->id && | |
146 | ipv6_addr_equal(&fq->saddr, arg->src) && | |
147 | ipv6_addr_equal(&fq->daddr, arg->dst)); | |
148 | } | |
149 | EXPORT_SYMBOL(ip6_frag_match); | |
150 | ||
1da177e4 LT |
151 | /* Memory Tracking Functions. */ |
152 | static inline void frag_kfree_skb(struct sk_buff *skb, int *work) | |
153 | { | |
154 | if (work) | |
155 | *work -= skb->truesize; | |
7eb95156 | 156 | atomic_sub(skb->truesize, &ip6_frags.mem); |
1da177e4 LT |
157 | kfree_skb(skb); |
158 | } | |
159 | ||
c6fda282 | 160 | void ip6_frag_init(struct inet_frag_queue *q, void *a) |
1da177e4 | 161 | { |
c6fda282 PE |
162 | struct frag_queue *fq = container_of(q, struct frag_queue, q); |
163 | struct ip6_create_arg *arg = a; | |
164 | ||
165 | fq->id = arg->id; | |
166 | ipv6_addr_copy(&fq->saddr, arg->src); | |
167 | ipv6_addr_copy(&fq->daddr, arg->dst); | |
1da177e4 | 168 | } |
c6fda282 | 169 | EXPORT_SYMBOL(ip6_frag_init); |
1da177e4 | 170 | |
1da177e4 LT |
171 | /* Destruction primitives. */ |
172 | ||
4b6cb5d8 | 173 | static __inline__ void fq_put(struct frag_queue *fq) |
1da177e4 | 174 | { |
762cc408 | 175 | inet_frag_put(&fq->q, &ip6_frags); |
1da177e4 LT |
176 | } |
177 | ||
178 | /* Kill fq entry. It is not destroyed immediately, | |
179 | * because caller (and someone more) holds reference count. | |
180 | */ | |
181 | static __inline__ void fq_kill(struct frag_queue *fq) | |
182 | { | |
277e650d | 183 | inet_frag_kill(&fq->q, &ip6_frags); |
1da177e4 LT |
184 | } |
185 | ||
a11d206d | 186 | static void ip6_evictor(struct inet6_dev *idev) |
1da177e4 | 187 | { |
8e7999c4 PE |
188 | int evicted; |
189 | ||
190 | evicted = inet_frag_evictor(&ip6_frags); | |
191 | if (evicted) | |
192 | IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted); | |
1da177e4 LT |
193 | } |
194 | ||
195 | static void ip6_frag_expire(unsigned long data) | |
196 | { | |
e521db9d | 197 | struct frag_queue *fq; |
a11d206d | 198 | struct net_device *dev = NULL; |
1da177e4 | 199 | |
e521db9d PE |
200 | fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); |
201 | ||
5ab11c98 | 202 | spin_lock(&fq->q.lock); |
1da177e4 | 203 | |
5ab11c98 | 204 | if (fq->q.last_in & COMPLETE) |
1da177e4 LT |
205 | goto out; |
206 | ||
207 | fq_kill(fq); | |
208 | ||
881d966b | 209 | dev = dev_get_by_index(&init_net, fq->iif); |
a11d206d YH |
210 | if (!dev) |
211 | goto out; | |
212 | ||
213 | rcu_read_lock(); | |
214 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); | |
215 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); | |
216 | rcu_read_unlock(); | |
1da177e4 | 217 | |
78c784c4 | 218 | /* Don't send error if the first segment did not arrive. */ |
5ab11c98 | 219 | if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments) |
78c784c4 IO |
220 | goto out; |
221 | ||
78c784c4 IO |
222 | /* |
223 | But use as source device on which LAST ARRIVED | |
224 | segment was received. And do not use fq->dev | |
225 | pointer directly, device might already disappeared. | |
226 | */ | |
5ab11c98 PE |
227 | fq->q.fragments->dev = dev; |
228 | icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); | |
1da177e4 | 229 | out: |
a11d206d YH |
230 | if (dev) |
231 | dev_put(dev); | |
5ab11c98 | 232 | spin_unlock(&fq->q.lock); |
4b6cb5d8 | 233 | fq_put(fq); |
1da177e4 LT |
234 | } |
235 | ||
abd6523d PE |
236 | static __inline__ struct frag_queue * |
237 | fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst, | |
238 | struct inet6_dev *idev) | |
1da177e4 | 239 | { |
c6fda282 PE |
240 | struct inet_frag_queue *q; |
241 | struct ip6_create_arg arg; | |
abd6523d | 242 | unsigned int hash; |
1da177e4 | 243 | |
c6fda282 PE |
244 | arg.id = id; |
245 | arg.src = src; | |
246 | arg.dst = dst; | |
abd6523d | 247 | hash = ip6qhashfn(id, src, dst); |
1da177e4 | 248 | |
abd6523d | 249 | q = inet_frag_find(&ip6_frags, &arg, hash); |
c6fda282 PE |
250 | if (q == NULL) |
251 | goto oom; | |
1da177e4 | 252 | |
c6fda282 | 253 | return container_of(q, struct frag_queue, q); |
1da177e4 LT |
254 | |
255 | oom: | |
a11d206d | 256 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); |
1da177e4 LT |
257 | return NULL; |
258 | } | |
259 | ||
f61944ef | 260 | static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, |
1da177e4 LT |
261 | struct frag_hdr *fhdr, int nhoff) |
262 | { | |
263 | struct sk_buff *prev, *next; | |
f61944ef | 264 | struct net_device *dev; |
1da177e4 LT |
265 | int offset, end; |
266 | ||
5ab11c98 | 267 | if (fq->q.last_in & COMPLETE) |
1da177e4 LT |
268 | goto err; |
269 | ||
270 | offset = ntohs(fhdr->frag_off) & ~0x7; | |
0660e03f ACM |
271 | end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - |
272 | ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); | |
1da177e4 LT |
273 | |
274 | if ((unsigned int)end > IPV6_MAXPLEN) { | |
a11d206d YH |
275 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
276 | IPSTATS_MIB_INHDRERRORS); | |
d56f90a7 ACM |
277 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
278 | ((u8 *)&fhdr->frag_off - | |
279 | skb_network_header(skb))); | |
f61944ef | 280 | return -1; |
1da177e4 LT |
281 | } |
282 | ||
d56f90a7 ACM |
283 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
284 | const unsigned char *nh = skb_network_header(skb); | |
1ab1457c | 285 | skb->csum = csum_sub(skb->csum, |
d56f90a7 ACM |
286 | csum_partial(nh, (u8 *)(fhdr + 1) - nh, |
287 | 0)); | |
288 | } | |
1da177e4 LT |
289 | |
290 | /* Is this the final fragment? */ | |
291 | if (!(fhdr->frag_off & htons(IP6_MF))) { | |
292 | /* If we already have some bits beyond end | |
293 | * or have different end, the segment is corrupted. | |
294 | */ | |
5ab11c98 PE |
295 | if (end < fq->q.len || |
296 | ((fq->q.last_in & LAST_IN) && end != fq->q.len)) | |
1da177e4 | 297 | goto err; |
5ab11c98 PE |
298 | fq->q.last_in |= LAST_IN; |
299 | fq->q.len = end; | |
1da177e4 LT |
300 | } else { |
301 | /* Check if the fragment is rounded to 8 bytes. | |
302 | * Required by the RFC. | |
303 | */ | |
304 | if (end & 0x7) { | |
305 | /* RFC2460 says always send parameter problem in | |
306 | * this case. -DaveM | |
307 | */ | |
a11d206d YH |
308 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), |
309 | IPSTATS_MIB_INHDRERRORS); | |
1ab1457c | 310 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
1da177e4 | 311 | offsetof(struct ipv6hdr, payload_len)); |
f61944ef | 312 | return -1; |
1da177e4 | 313 | } |
5ab11c98 | 314 | if (end > fq->q.len) { |
1da177e4 | 315 | /* Some bits beyond end -> corruption. */ |
5ab11c98 | 316 | if (fq->q.last_in & LAST_IN) |
1da177e4 | 317 | goto err; |
5ab11c98 | 318 | fq->q.len = end; |
1da177e4 LT |
319 | } |
320 | } | |
321 | ||
322 | if (end == offset) | |
323 | goto err; | |
324 | ||
325 | /* Point into the IP datagram 'data' part. */ | |
326 | if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) | |
327 | goto err; | |
1ab1457c | 328 | |
42ca89c1 SH |
329 | if (pskb_trim_rcsum(skb, end - offset)) |
330 | goto err; | |
1da177e4 LT |
331 | |
332 | /* Find out which fragments are in front and at the back of us | |
333 | * in the chain of fragments so far. We must know where to put | |
334 | * this fragment, right? | |
335 | */ | |
336 | prev = NULL; | |
5ab11c98 | 337 | for(next = fq->q.fragments; next != NULL; next = next->next) { |
1da177e4 LT |
338 | if (FRAG6_CB(next)->offset >= offset) |
339 | break; /* bingo! */ | |
340 | prev = next; | |
341 | } | |
342 | ||
343 | /* We found where to put this one. Check for overlap with | |
344 | * preceding fragment, and, if needed, align things so that | |
345 | * any overlaps are eliminated. | |
346 | */ | |
347 | if (prev) { | |
348 | int i = (FRAG6_CB(prev)->offset + prev->len) - offset; | |
349 | ||
350 | if (i > 0) { | |
351 | offset += i; | |
352 | if (end <= offset) | |
353 | goto err; | |
354 | if (!pskb_pull(skb, i)) | |
355 | goto err; | |
356 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
357 | skb->ip_summed = CHECKSUM_NONE; | |
358 | } | |
359 | } | |
360 | ||
361 | /* Look for overlap with succeeding segments. | |
362 | * If we can merge fragments, do it. | |
363 | */ | |
364 | while (next && FRAG6_CB(next)->offset < end) { | |
365 | int i = end - FRAG6_CB(next)->offset; /* overlap is 'i' bytes */ | |
366 | ||
367 | if (i < next->len) { | |
368 | /* Eat head of the next overlapped fragment | |
369 | * and leave the loop. The next ones cannot overlap. | |
370 | */ | |
371 | if (!pskb_pull(next, i)) | |
372 | goto err; | |
373 | FRAG6_CB(next)->offset += i; /* next fragment */ | |
5ab11c98 | 374 | fq->q.meat -= i; |
1da177e4 LT |
375 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
376 | next->ip_summed = CHECKSUM_NONE; | |
377 | break; | |
378 | } else { | |
379 | struct sk_buff *free_it = next; | |
380 | ||
381 | /* Old fragment is completely overridden with | |
382 | * new one drop it. | |
383 | */ | |
384 | next = next->next; | |
385 | ||
386 | if (prev) | |
387 | prev->next = next; | |
388 | else | |
5ab11c98 | 389 | fq->q.fragments = next; |
1da177e4 | 390 | |
5ab11c98 | 391 | fq->q.meat -= free_it->len; |
1da177e4 LT |
392 | frag_kfree_skb(free_it, NULL); |
393 | } | |
394 | } | |
395 | ||
396 | FRAG6_CB(skb)->offset = offset; | |
397 | ||
398 | /* Insert this fragment in the chain of fragments. */ | |
399 | skb->next = next; | |
400 | if (prev) | |
401 | prev->next = skb; | |
402 | else | |
5ab11c98 | 403 | fq->q.fragments = skb; |
1da177e4 | 404 | |
f61944ef HX |
405 | dev = skb->dev; |
406 | if (dev) { | |
407 | fq->iif = dev->ifindex; | |
408 | skb->dev = NULL; | |
409 | } | |
5ab11c98 PE |
410 | fq->q.stamp = skb->tstamp; |
411 | fq->q.meat += skb->len; | |
7eb95156 | 412 | atomic_add(skb->truesize, &ip6_frags.mem); |
1da177e4 LT |
413 | |
414 | /* The first fragment. | |
415 | * nhoffset is obtained from the first fragment, of course. | |
416 | */ | |
417 | if (offset == 0) { | |
418 | fq->nhoffset = nhoff; | |
5ab11c98 | 419 | fq->q.last_in |= FIRST_IN; |
1da177e4 | 420 | } |
f61944ef | 421 | |
5ab11c98 | 422 | if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len) |
f61944ef HX |
423 | return ip6_frag_reasm(fq, prev, dev); |
424 | ||
7eb95156 PE |
425 | write_lock(&ip6_frags.lock); |
426 | list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list); | |
427 | write_unlock(&ip6_frags.lock); | |
f61944ef | 428 | return -1; |
1da177e4 LT |
429 | |
430 | err: | |
a11d206d | 431 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); |
1da177e4 | 432 | kfree_skb(skb); |
f61944ef | 433 | return -1; |
1da177e4 LT |
434 | } |
435 | ||
436 | /* | |
437 | * Check if this packet is complete. | |
438 | * Returns NULL on failure by any reason, and pointer | |
439 | * to current nexthdr field in reassembled frame. | |
440 | * | |
441 | * It is called with locked fq, and caller must check that | |
442 | * queue is eligible for reassembly i.e. it is not COMPLETE, | |
443 | * the last and the first frames arrived and all the bits are here. | |
444 | */ | |
f61944ef | 445 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
1da177e4 LT |
446 | struct net_device *dev) |
447 | { | |
5ab11c98 | 448 | struct sk_buff *fp, *head = fq->q.fragments; |
1da177e4 LT |
449 | int payload_len; |
450 | unsigned int nhoff; | |
451 | ||
452 | fq_kill(fq); | |
453 | ||
f61944ef HX |
454 | /* Make the one we just received the head. */ |
455 | if (prev) { | |
456 | head = prev->next; | |
457 | fp = skb_clone(head, GFP_ATOMIC); | |
458 | ||
459 | if (!fp) | |
460 | goto out_oom; | |
461 | ||
462 | fp->next = head->next; | |
463 | prev->next = fp; | |
464 | ||
5ab11c98 PE |
465 | skb_morph(head, fq->q.fragments); |
466 | head->next = fq->q.fragments->next; | |
f61944ef | 467 | |
5ab11c98 PE |
468 | kfree_skb(fq->q.fragments); |
469 | fq->q.fragments = head; | |
f61944ef HX |
470 | } |
471 | ||
1da177e4 LT |
472 | BUG_TRAP(head != NULL); |
473 | BUG_TRAP(FRAG6_CB(head)->offset == 0); | |
474 | ||
475 | /* Unfragmented part is taken from the first segment. */ | |
d56f90a7 | 476 | payload_len = ((head->data - skb_network_header(head)) - |
5ab11c98 | 477 | sizeof(struct ipv6hdr) + fq->q.len - |
d56f90a7 | 478 | sizeof(struct frag_hdr)); |
1da177e4 LT |
479 | if (payload_len > IPV6_MAXPLEN) |
480 | goto out_oversize; | |
481 | ||
482 | /* Head of list must not be cloned. */ | |
483 | if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) | |
484 | goto out_oom; | |
485 | ||
486 | /* If the first fragment is fragmented itself, we split | |
487 | * it to two chunks: the first with data and paged part | |
488 | * and the second, holding only fragments. */ | |
489 | if (skb_shinfo(head)->frag_list) { | |
490 | struct sk_buff *clone; | |
491 | int i, plen = 0; | |
492 | ||
493 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) | |
494 | goto out_oom; | |
495 | clone->next = head->next; | |
496 | head->next = clone; | |
497 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | |
498 | skb_shinfo(head)->frag_list = NULL; | |
499 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) | |
500 | plen += skb_shinfo(head)->frags[i].size; | |
501 | clone->len = clone->data_len = head->data_len - plen; | |
502 | head->data_len -= clone->len; | |
503 | head->len -= clone->len; | |
504 | clone->csum = 0; | |
505 | clone->ip_summed = head->ip_summed; | |
7eb95156 | 506 | atomic_add(clone->truesize, &ip6_frags.mem); |
1da177e4 LT |
507 | } |
508 | ||
509 | /* We have to remove fragment header from datagram and to relocate | |
510 | * header in order to calculate ICV correctly. */ | |
511 | nhoff = fq->nhoffset; | |
b0e380b1 | 512 | skb_network_header(head)[nhoff] = skb_transport_header(head)[0]; |
1ab1457c | 513 | memmove(head->head + sizeof(struct frag_hdr), head->head, |
1da177e4 | 514 | (head->data - head->head) - sizeof(struct frag_hdr)); |
b0e380b1 ACM |
515 | head->mac_header += sizeof(struct frag_hdr); |
516 | head->network_header += sizeof(struct frag_hdr); | |
1da177e4 LT |
517 | |
518 | skb_shinfo(head)->frag_list = head->next; | |
badff6d0 | 519 | skb_reset_transport_header(head); |
d56f90a7 | 520 | skb_push(head, head->data - skb_network_header(head)); |
7eb95156 | 521 | atomic_sub(head->truesize, &ip6_frags.mem); |
1da177e4 LT |
522 | |
523 | for (fp=head->next; fp; fp = fp->next) { | |
524 | head->data_len += fp->len; | |
525 | head->len += fp->len; | |
526 | if (head->ip_summed != fp->ip_summed) | |
527 | head->ip_summed = CHECKSUM_NONE; | |
84fa7933 | 528 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
1da177e4 LT |
529 | head->csum = csum_add(head->csum, fp->csum); |
530 | head->truesize += fp->truesize; | |
7eb95156 | 531 | atomic_sub(fp->truesize, &ip6_frags.mem); |
1da177e4 LT |
532 | } |
533 | ||
534 | head->next = NULL; | |
535 | head->dev = dev; | |
5ab11c98 | 536 | head->tstamp = fq->q.stamp; |
0660e03f | 537 | ipv6_hdr(head)->payload_len = htons(payload_len); |
951dbc8a | 538 | IP6CB(head)->nhoff = nhoff; |
1da177e4 | 539 | |
1da177e4 | 540 | /* Yes, and fold redundant checksum back. 8) */ |
84fa7933 | 541 | if (head->ip_summed == CHECKSUM_COMPLETE) |
d56f90a7 | 542 | head->csum = csum_partial(skb_network_header(head), |
cfe1fc77 | 543 | skb_network_header_len(head), |
d56f90a7 | 544 | head->csum); |
1da177e4 | 545 | |
a11d206d YH |
546 | rcu_read_lock(); |
547 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); | |
548 | rcu_read_unlock(); | |
5ab11c98 | 549 | fq->q.fragments = NULL; |
1da177e4 LT |
550 | return 1; |
551 | ||
552 | out_oversize: | |
553 | if (net_ratelimit()) | |
554 | printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len); | |
555 | goto out_fail; | |
556 | out_oom: | |
557 | if (net_ratelimit()) | |
558 | printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n"); | |
559 | out_fail: | |
a11d206d YH |
560 | rcu_read_lock(); |
561 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); | |
562 | rcu_read_unlock(); | |
1da177e4 LT |
563 | return -1; |
564 | } | |
565 | ||
e5bbef20 | 566 | static int ipv6_frag_rcv(struct sk_buff *skb) |
1da177e4 | 567 | { |
1da177e4 LT |
568 | struct frag_hdr *fhdr; |
569 | struct frag_queue *fq; | |
0660e03f | 570 | struct ipv6hdr *hdr = ipv6_hdr(skb); |
1da177e4 | 571 | |
a11d206d | 572 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMREQDS); |
1da177e4 LT |
573 | |
574 | /* Jumbo payload inhibits frag. header */ | |
575 | if (hdr->payload_len==0) { | |
a11d206d | 576 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); |
cfe1fc77 ACM |
577 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
578 | skb_network_header_len(skb)); | |
1da177e4 LT |
579 | return -1; |
580 | } | |
ea2ae17d ACM |
581 | if (!pskb_may_pull(skb, (skb_transport_offset(skb) + |
582 | sizeof(struct frag_hdr)))) { | |
a11d206d | 583 | IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); |
cfe1fc77 ACM |
584 | icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, |
585 | skb_network_header_len(skb)); | |
1da177e4 LT |
586 | return -1; |
587 | } | |
588 | ||
0660e03f | 589 | hdr = ipv6_hdr(skb); |
9c70220b | 590 | fhdr = (struct frag_hdr *)skb_transport_header(skb); |
1da177e4 LT |
591 | |
592 | if (!(fhdr->frag_off & htons(0xFFF9))) { | |
593 | /* It is not a fragmented frame */ | |
b0e380b1 | 594 | skb->transport_header += sizeof(struct frag_hdr); |
a11d206d | 595 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); |
1da177e4 | 596 | |
d56f90a7 | 597 | IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); |
1da177e4 LT |
598 | return 1; |
599 | } | |
600 | ||
e71e0349 | 601 | if (atomic_read(&ip6_frags.mem) > init_net.ipv6.sysctl.frags.high_thresh) |
a11d206d | 602 | ip6_evictor(ip6_dst_idev(skb->dst)); |
1da177e4 | 603 | |
a11d206d YH |
604 | if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, |
605 | ip6_dst_idev(skb->dst))) != NULL) { | |
f61944ef | 606 | int ret; |
1da177e4 | 607 | |
5ab11c98 | 608 | spin_lock(&fq->q.lock); |
1da177e4 | 609 | |
f61944ef | 610 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); |
1da177e4 | 611 | |
5ab11c98 | 612 | spin_unlock(&fq->q.lock); |
4b6cb5d8 | 613 | fq_put(fq); |
1da177e4 LT |
614 | return ret; |
615 | } | |
616 | ||
a11d206d | 617 | IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); |
1da177e4 LT |
618 | kfree_skb(skb); |
619 | return -1; | |
620 | } | |
621 | ||
622 | static struct inet6_protocol frag_protocol = | |
623 | { | |
624 | .handler = ipv6_frag_rcv, | |
625 | .flags = INET6_PROTO_NOPOLICY, | |
626 | }; | |
627 | ||
e71e0349 DL |
628 | void ipv6_frag_sysctl_init(struct net *net) |
629 | { | |
630 | ip6_frags.ctl = &net->ipv6.sysctl.frags; | |
631 | } | |
632 | ||
853cbbaa | 633 | int __init ipv6_frag_init(void) |
1da177e4 | 634 | { |
853cbbaa | 635 | int ret; |
1da177e4 | 636 | |
853cbbaa DL |
637 | ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT); |
638 | if (ret) | |
639 | goto out; | |
e71e0349 | 640 | |
321a3a99 | 641 | ip6_frags.hashfn = ip6_hashfn; |
c6fda282 | 642 | ip6_frags.constructor = ip6_frag_init; |
c9547709 | 643 | ip6_frags.destructor = NULL; |
1e4b8287 PE |
644 | ip6_frags.skb_free = NULL; |
645 | ip6_frags.qsize = sizeof(struct frag_queue); | |
abd6523d | 646 | ip6_frags.match = ip6_frag_match; |
e521db9d | 647 | ip6_frags.frag_expire = ip6_frag_expire; |
7eb95156 | 648 | inet_frags_init(&ip6_frags); |
853cbbaa DL |
649 | out: |
650 | return ret; | |
651 | } | |
652 | ||
653 | void ipv6_frag_exit(void) | |
654 | { | |
655 | inet_frags_fini(&ip6_frags); | |
656 | inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT); | |
1da177e4 | 657 | } |