]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * The IP fragmentation functionality. | |
e905a9ed | 7 | * |
1da177e4 | 8 | * Authors: Fred N. van Kempen <[email protected]> |
113aa838 | 9 | * Alan Cox <[email protected]> |
1da177e4 LT |
10 | * |
11 | * Fixes: | |
12 | * Alan Cox : Split from ip.c , see ip_input.c for history. | |
13 | * David S. Miller : Begin massive cleanup... | |
14 | * Andi Kleen : Add sysctls. | |
15 | * xxxx : Overlapfrag bug. | |
16 | * Ultima : ip_expire() kernel panic. | |
17 | * Bill Hawes : Frag accounting and evictor fixes. | |
18 | * John McDonald : 0 length frag bug. | |
19 | * Alexey Kuznetsov: SMP races, threading, cleanup. | |
20 | * Patrick McHardy : LRU queue of frag heads for evictor. | |
21 | */ | |
22 | ||
afd46503 JP |
23 | #define pr_fmt(fmt) "IPv4: " fmt |
24 | ||
89cee8b1 | 25 | #include <linux/compiler.h> |
1da177e4 LT |
26 | #include <linux/module.h> |
27 | #include <linux/types.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/jiffies.h> | |
30 | #include <linux/skbuff.h> | |
31 | #include <linux/list.h> | |
32 | #include <linux/ip.h> | |
33 | #include <linux/icmp.h> | |
34 | #include <linux/netdevice.h> | |
35 | #include <linux/jhash.h> | |
36 | #include <linux/random.h> | |
5a0e3ad6 | 37 | #include <linux/slab.h> |
e9017b55 SW |
38 | #include <net/route.h> |
39 | #include <net/dst.h> | |
1da177e4 LT |
40 | #include <net/sock.h> |
41 | #include <net/ip.h> | |
42 | #include <net/icmp.h> | |
43 | #include <net/checksum.h> | |
89cee8b1 | 44 | #include <net/inetpeer.h> |
5ab11c98 | 45 | #include <net/inet_frag.h> |
1da177e4 LT |
46 | #include <linux/tcp.h> |
47 | #include <linux/udp.h> | |
48 | #include <linux/inet.h> | |
49 | #include <linux/netfilter_ipv4.h> | |
6623e3b2 | 50 | #include <net/inet_ecn.h> |
1da177e4 LT |
51 | |
52 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 | |
53 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c | |
54 | * as well. Or notify me, at least. --ANK | |
55 | */ | |
56 | ||
8d8354d2 | 57 | static int sysctl_ipfrag_max_dist __read_mostly = 64; |
89cee8b1 | 58 | |
1da177e4 LT |
59 | struct ipfrag_skb_cb |
60 | { | |
61 | struct inet_skb_parm h; | |
62 | int offset; | |
63 | }; | |
64 | ||
fd3f8c4c | 65 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) |
1da177e4 LT |
66 | |
67 | /* Describe an entry in the "incomplete datagrams" queue. */ | |
68 | struct ipq { | |
5ab11c98 PE |
69 | struct inet_frag_queue q; |
70 | ||
1da177e4 | 71 | u32 user; |
18277770 AV |
72 | __be32 saddr; |
73 | __be32 daddr; | |
74 | __be16 id; | |
1da177e4 | 75 | u8 protocol; |
6623e3b2 | 76 | u8 ecn; /* RFC3168 support */ |
89cee8b1 HX |
77 | int iif; |
78 | unsigned int rid; | |
79 | struct inet_peer *peer; | |
1da177e4 LT |
80 | }; |
81 | ||
5173cc05 ED |
82 | /* RFC 3168 support : |
83 | * We want to check ECN values of all fragments, do detect invalid combinations. | |
84 | * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value. | |
85 | */ | |
1d1652cb DM |
86 | #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */ |
87 | #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */ | |
88 | #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */ | |
89 | #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */ | |
6623e3b2 ED |
90 | |
91 | static inline u8 ip4_frag_ecn(u8 tos) | |
92 | { | |
5173cc05 | 93 | return 1 << (tos & INET_ECN_MASK); |
6623e3b2 ED |
94 | } |
95 | ||
5173cc05 ED |
96 | /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements |
97 | * Value : 0xff if frame should be dropped. | |
98 | * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field | |
99 | */ | |
100 | static const u8 ip4_frag_ecn_table[16] = { | |
101 | /* at least one fragment had CE, and others ECT_0 or ECT_1 */ | |
102 | [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE, | |
103 | [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE, | |
104 | [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE, | |
105 | ||
106 | /* invalid combinations : drop frame */ | |
107 | [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff, | |
108 | [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff, | |
109 | [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff, | |
110 | [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff, | |
111 | [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff, | |
112 | [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff, | |
113 | [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff, | |
114 | }; | |
115 | ||
7eb95156 | 116 | static struct inet_frags ip4_frags; |
1da177e4 | 117 | |
e5a2bb84 | 118 | int ip_frag_nqueues(struct net *net) |
7eb95156 | 119 | { |
e5a2bb84 | 120 | return net->ipv4.frags.nqueues; |
7eb95156 | 121 | } |
1da177e4 | 122 | |
6ddc0822 | 123 | int ip_frag_mem(struct net *net) |
7eb95156 | 124 | { |
6ddc0822 | 125 | return atomic_read(&net->ipv4.frags.mem); |
7eb95156 | 126 | } |
1da177e4 | 127 | |
1706d587 HX |
128 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
129 | struct net_device *dev); | |
130 | ||
c6fda282 PE |
131 | struct ip4_create_arg { |
132 | struct iphdr *iph; | |
133 | u32 user; | |
134 | }; | |
135 | ||
18277770 | 136 | static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) |
1da177e4 | 137 | { |
18277770 AV |
138 | return jhash_3words((__force u32)id << 16 | prot, |
139 | (__force u32)saddr, (__force u32)daddr, | |
7eb95156 | 140 | ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); |
1da177e4 LT |
141 | } |
142 | ||
321a3a99 | 143 | static unsigned int ip4_hashfn(struct inet_frag_queue *q) |
1da177e4 | 144 | { |
321a3a99 | 145 | struct ipq *ipq; |
1da177e4 | 146 | |
321a3a99 PE |
147 | ipq = container_of(q, struct ipq, q); |
148 | return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); | |
1da177e4 LT |
149 | } |
150 | ||
abd6523d PE |
151 | static int ip4_frag_match(struct inet_frag_queue *q, void *a) |
152 | { | |
153 | struct ipq *qp; | |
154 | struct ip4_create_arg *arg = a; | |
155 | ||
156 | qp = container_of(q, struct ipq, q); | |
a02cec21 | 157 | return qp->id == arg->iph->id && |
abd6523d PE |
158 | qp->saddr == arg->iph->saddr && |
159 | qp->daddr == arg->iph->daddr && | |
160 | qp->protocol == arg->iph->protocol && | |
a02cec21 | 161 | qp->user == arg->user; |
abd6523d PE |
162 | } |
163 | ||
1da177e4 | 164 | /* Memory Tracking Functions. */ |
a95d8c88 | 165 | static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) |
1da177e4 | 166 | { |
6ddc0822 | 167 | atomic_sub(skb->truesize, &nf->mem); |
1da177e4 LT |
168 | kfree_skb(skb); |
169 | } | |
170 | ||
c6fda282 PE |
171 | static void ip4_frag_init(struct inet_frag_queue *q, void *a) |
172 | { | |
173 | struct ipq *qp = container_of(q, struct ipq, q); | |
174 | struct ip4_create_arg *arg = a; | |
175 | ||
176 | qp->protocol = arg->iph->protocol; | |
177 | qp->id = arg->iph->id; | |
6623e3b2 | 178 | qp->ecn = ip4_frag_ecn(arg->iph->tos); |
c6fda282 PE |
179 | qp->saddr = arg->iph->saddr; |
180 | qp->daddr = arg->iph->daddr; | |
181 | qp->user = arg->user; | |
182 | qp->peer = sysctl_ipfrag_max_dist ? | |
b534ecf1 | 183 | inet_getpeer_v4(arg->iph->saddr, 1) : NULL; |
c6fda282 PE |
184 | } |
185 | ||
1e4b8287 | 186 | static __inline__ void ip4_frag_free(struct inet_frag_queue *q) |
1da177e4 | 187 | { |
1e4b8287 PE |
188 | struct ipq *qp; |
189 | ||
190 | qp = container_of(q, struct ipq, q); | |
191 | if (qp->peer) | |
192 | inet_putpeer(qp->peer); | |
1da177e4 LT |
193 | } |
194 | ||
1da177e4 LT |
195 | |
196 | /* Destruction primitives. */ | |
197 | ||
4b6cb5d8 | 198 | static __inline__ void ipq_put(struct ipq *ipq) |
1da177e4 | 199 | { |
762cc408 | 200 | inet_frag_put(&ipq->q, &ip4_frags); |
1da177e4 LT |
201 | } |
202 | ||
203 | /* Kill ipq entry. It is not destroyed immediately, | |
204 | * because caller (and someone more) holds reference count. | |
205 | */ | |
206 | static void ipq_kill(struct ipq *ipq) | |
207 | { | |
277e650d | 208 | inet_frag_kill(&ipq->q, &ip4_frags); |
1da177e4 LT |
209 | } |
210 | ||
e905a9ed | 211 | /* Memory limiting on fragments. Evictor trashes the oldest |
1da177e4 LT |
212 | * fragment queue until we are back under the threshold. |
213 | */ | |
6ddc0822 | 214 | static void ip_evictor(struct net *net) |
1da177e4 | 215 | { |
8e7999c4 PE |
216 | int evicted; |
217 | ||
6ddc0822 | 218 | evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags); |
8e7999c4 | 219 | if (evicted) |
c5346fe3 | 220 | IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted); |
1da177e4 LT |
221 | } |
222 | ||
223 | /* | |
224 | * Oops, a fragment queue timed out. Kill it and send an ICMP reply. | |
225 | */ | |
226 | static void ip_expire(unsigned long arg) | |
227 | { | |
e521db9d | 228 | struct ipq *qp; |
84a3aa00 | 229 | struct net *net; |
e521db9d PE |
230 | |
231 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); | |
84a3aa00 | 232 | net = container_of(qp->q.net, struct net, ipv4.frags); |
1da177e4 | 233 | |
5ab11c98 | 234 | spin_lock(&qp->q.lock); |
1da177e4 | 235 | |
bc578a54 | 236 | if (qp->q.last_in & INET_FRAG_COMPLETE) |
1da177e4 LT |
237 | goto out; |
238 | ||
239 | ipq_kill(qp); | |
240 | ||
7c73a6fa PE |
241 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); |
242 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); | |
1da177e4 | 243 | |
bc578a54 | 244 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { |
5ab11c98 | 245 | struct sk_buff *head = qp->q.fragments; |
64f3b9e2 ED |
246 | const struct iphdr *iph; |
247 | int err; | |
cb84663e | 248 | |
69df9d59 ED |
249 | rcu_read_lock(); |
250 | head->dev = dev_get_by_index_rcu(net, qp->iif); | |
e9017b55 SW |
251 | if (!head->dev) |
252 | goto out_rcu_unlock; | |
253 | ||
64f3b9e2 ED |
254 | /* skb dst is stale, drop it, and perform route lookup again */ |
255 | skb_dst_drop(head); | |
256 | iph = ip_hdr(head); | |
257 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, | |
258 | iph->tos, head->dev); | |
259 | if (err) | |
260 | goto out_rcu_unlock; | |
261 | ||
e9017b55 | 262 | /* |
64f3b9e2 ED |
263 | * Only an end host needs to send an ICMP |
264 | * "Fragment Reassembly Timeout" message, per RFC792. | |
e9017b55 | 265 | */ |
595fc71b DM |
266 | if (qp->user == IP_DEFRAG_AF_PACKET || |
267 | (qp->user == IP_DEFRAG_CONNTRACK_IN && | |
268 | skb_rtable(head)->rt_type != RTN_LOCAL)) | |
64f3b9e2 ED |
269 | goto out_rcu_unlock; |
270 | ||
e9017b55 SW |
271 | |
272 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | |
273 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | |
e9017b55 | 274 | out_rcu_unlock: |
d1c9ae6d PM |
275 | rcu_read_unlock(); |
276 | } | |
1da177e4 | 277 | out: |
5ab11c98 | 278 | spin_unlock(&qp->q.lock); |
4b6cb5d8 | 279 | ipq_put(qp); |
1da177e4 LT |
280 | } |
281 | ||
abd6523d PE |
282 | /* Find the correct entry in the "incomplete datagrams" queue for |
283 | * this IP datagram, and create new one, if nothing is found. | |
284 | */ | |
ac18e750 | 285 | static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) |
1da177e4 | 286 | { |
c6fda282 PE |
287 | struct inet_frag_queue *q; |
288 | struct ip4_create_arg arg; | |
abd6523d | 289 | unsigned int hash; |
1da177e4 | 290 | |
c6fda282 PE |
291 | arg.iph = iph; |
292 | arg.user = user; | |
9a375803 PE |
293 | |
294 | read_lock(&ip4_frags.lock); | |
abd6523d | 295 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
1da177e4 | 296 | |
ac18e750 | 297 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
c6fda282 PE |
298 | if (q == NULL) |
299 | goto out_nomem; | |
1da177e4 | 300 | |
c6fda282 | 301 | return container_of(q, struct ipq, q); |
1da177e4 LT |
302 | |
303 | out_nomem: | |
afd46503 | 304 | LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); |
1da177e4 LT |
305 | return NULL; |
306 | } | |
307 | ||
89cee8b1 HX |
308 | /* Is the fragment too far ahead to be part of ipq? */ |
309 | static inline int ip_frag_too_far(struct ipq *qp) | |
310 | { | |
311 | struct inet_peer *peer = qp->peer; | |
312 | unsigned int max = sysctl_ipfrag_max_dist; | |
313 | unsigned int start, end; | |
314 | ||
315 | int rc; | |
316 | ||
317 | if (!peer || !max) | |
318 | return 0; | |
319 | ||
320 | start = qp->rid; | |
321 | end = atomic_inc_return(&peer->rid); | |
322 | qp->rid = end; | |
323 | ||
5ab11c98 | 324 | rc = qp->q.fragments && (end - start) > max; |
89cee8b1 HX |
325 | |
326 | if (rc) { | |
7c73a6fa PE |
327 | struct net *net; |
328 | ||
329 | net = container_of(qp->q.net, struct net, ipv4.frags); | |
330 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); | |
89cee8b1 HX |
331 | } |
332 | ||
333 | return rc; | |
334 | } | |
335 | ||
336 | static int ip_frag_reinit(struct ipq *qp) | |
337 | { | |
338 | struct sk_buff *fp; | |
339 | ||
b2fd5321 | 340 | if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { |
5ab11c98 | 341 | atomic_inc(&qp->q.refcnt); |
89cee8b1 HX |
342 | return -ETIMEDOUT; |
343 | } | |
344 | ||
5ab11c98 | 345 | fp = qp->q.fragments; |
89cee8b1 HX |
346 | do { |
347 | struct sk_buff *xp = fp->next; | |
a95d8c88 | 348 | frag_kfree_skb(qp->q.net, fp); |
89cee8b1 HX |
349 | fp = xp; |
350 | } while (fp); | |
351 | ||
5ab11c98 PE |
352 | qp->q.last_in = 0; |
353 | qp->q.len = 0; | |
354 | qp->q.meat = 0; | |
355 | qp->q.fragments = NULL; | |
d6bebca9 | 356 | qp->q.fragments_tail = NULL; |
89cee8b1 | 357 | qp->iif = 0; |
6623e3b2 | 358 | qp->ecn = 0; |
89cee8b1 HX |
359 | |
360 | return 0; | |
361 | } | |
362 | ||
1da177e4 | 363 | /* Add new segment to existing queue. */ |
1706d587 | 364 | static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
1da177e4 LT |
365 | { |
366 | struct sk_buff *prev, *next; | |
1706d587 | 367 | struct net_device *dev; |
1da177e4 LT |
368 | int flags, offset; |
369 | int ihl, end; | |
1706d587 | 370 | int err = -ENOENT; |
6623e3b2 | 371 | u8 ecn; |
1da177e4 | 372 | |
bc578a54 | 373 | if (qp->q.last_in & INET_FRAG_COMPLETE) |
1da177e4 LT |
374 | goto err; |
375 | ||
89cee8b1 | 376 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
1706d587 HX |
377 | unlikely(ip_frag_too_far(qp)) && |
378 | unlikely(err = ip_frag_reinit(qp))) { | |
89cee8b1 HX |
379 | ipq_kill(qp); |
380 | goto err; | |
381 | } | |
382 | ||
6623e3b2 | 383 | ecn = ip4_frag_ecn(ip_hdr(skb)->tos); |
eddc9ec5 | 384 | offset = ntohs(ip_hdr(skb)->frag_off); |
1da177e4 LT |
385 | flags = offset & ~IP_OFFSET; |
386 | offset &= IP_OFFSET; | |
387 | offset <<= 3; /* offset is in 8-byte chunks */ | |
c9bdd4b5 | 388 | ihl = ip_hdrlen(skb); |
1da177e4 LT |
389 | |
390 | /* Determine the position of this fragment. */ | |
e905a9ed | 391 | end = offset + skb->len - ihl; |
1706d587 | 392 | err = -EINVAL; |
1da177e4 LT |
393 | |
394 | /* Is this the final fragment? */ | |
395 | if ((flags & IP_MF) == 0) { | |
396 | /* If we already have some bits beyond end | |
42b2aa86 | 397 | * or have different end, the segment is corrupted. |
1da177e4 | 398 | */ |
5ab11c98 | 399 | if (end < qp->q.len || |
bc578a54 | 400 | ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len)) |
1da177e4 | 401 | goto err; |
bc578a54 | 402 | qp->q.last_in |= INET_FRAG_LAST_IN; |
5ab11c98 | 403 | qp->q.len = end; |
1da177e4 LT |
404 | } else { |
405 | if (end&7) { | |
406 | end &= ~7; | |
407 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
408 | skb->ip_summed = CHECKSUM_NONE; | |
409 | } | |
5ab11c98 | 410 | if (end > qp->q.len) { |
1da177e4 | 411 | /* Some bits beyond end -> corruption. */ |
bc578a54 | 412 | if (qp->q.last_in & INET_FRAG_LAST_IN) |
1da177e4 | 413 | goto err; |
5ab11c98 | 414 | qp->q.len = end; |
1da177e4 LT |
415 | } |
416 | } | |
417 | if (end == offset) | |
418 | goto err; | |
419 | ||
1706d587 | 420 | err = -ENOMEM; |
1da177e4 LT |
421 | if (pskb_pull(skb, ihl) == NULL) |
422 | goto err; | |
1706d587 HX |
423 | |
424 | err = pskb_trim_rcsum(skb, end - offset); | |
425 | if (err) | |
1da177e4 LT |
426 | goto err; |
427 | ||
428 | /* Find out which fragments are in front and at the back of us | |
429 | * in the chain of fragments so far. We must know where to put | |
430 | * this fragment, right? | |
431 | */ | |
d6bebca9 CG |
432 | prev = qp->q.fragments_tail; |
433 | if (!prev || FRAG_CB(prev)->offset < offset) { | |
434 | next = NULL; | |
435 | goto found; | |
436 | } | |
1da177e4 | 437 | prev = NULL; |
5ab11c98 | 438 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
1da177e4 LT |
439 | if (FRAG_CB(next)->offset >= offset) |
440 | break; /* bingo! */ | |
441 | prev = next; | |
442 | } | |
443 | ||
d6bebca9 | 444 | found: |
1da177e4 LT |
445 | /* We found where to put this one. Check for overlap with |
446 | * preceding fragment, and, if needed, align things so that | |
447 | * any overlaps are eliminated. | |
448 | */ | |
449 | if (prev) { | |
450 | int i = (FRAG_CB(prev)->offset + prev->len) - offset; | |
451 | ||
452 | if (i > 0) { | |
453 | offset += i; | |
1706d587 | 454 | err = -EINVAL; |
1da177e4 LT |
455 | if (end <= offset) |
456 | goto err; | |
1706d587 | 457 | err = -ENOMEM; |
1da177e4 LT |
458 | if (!pskb_pull(skb, i)) |
459 | goto err; | |
460 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
461 | skb->ip_summed = CHECKSUM_NONE; | |
462 | } | |
463 | } | |
464 | ||
1706d587 HX |
465 | err = -ENOMEM; |
466 | ||
1da177e4 LT |
467 | while (next && FRAG_CB(next)->offset < end) { |
468 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ | |
469 | ||
470 | if (i < next->len) { | |
471 | /* Eat head of the next overlapped fragment | |
472 | * and leave the loop. The next ones cannot overlap. | |
473 | */ | |
474 | if (!pskb_pull(next, i)) | |
475 | goto err; | |
476 | FRAG_CB(next)->offset += i; | |
5ab11c98 | 477 | qp->q.meat -= i; |
1da177e4 LT |
478 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
479 | next->ip_summed = CHECKSUM_NONE; | |
480 | break; | |
481 | } else { | |
482 | struct sk_buff *free_it = next; | |
483 | ||
47c6bf77 | 484 | /* Old fragment is completely overridden with |
1da177e4 LT |
485 | * new one drop it. |
486 | */ | |
487 | next = next->next; | |
488 | ||
489 | if (prev) | |
490 | prev->next = next; | |
491 | else | |
5ab11c98 | 492 | qp->q.fragments = next; |
1da177e4 | 493 | |
5ab11c98 | 494 | qp->q.meat -= free_it->len; |
a95d8c88 | 495 | frag_kfree_skb(qp->q.net, free_it); |
1da177e4 LT |
496 | } |
497 | } | |
498 | ||
499 | FRAG_CB(skb)->offset = offset; | |
500 | ||
501 | /* Insert this fragment in the chain of fragments. */ | |
502 | skb->next = next; | |
d6bebca9 CG |
503 | if (!next) |
504 | qp->q.fragments_tail = skb; | |
1da177e4 LT |
505 | if (prev) |
506 | prev->next = skb; | |
507 | else | |
5ab11c98 | 508 | qp->q.fragments = skb; |
1da177e4 | 509 | |
1706d587 HX |
510 | dev = skb->dev; |
511 | if (dev) { | |
512 | qp->iif = dev->ifindex; | |
513 | skb->dev = NULL; | |
514 | } | |
5ab11c98 PE |
515 | qp->q.stamp = skb->tstamp; |
516 | qp->q.meat += skb->len; | |
6623e3b2 | 517 | qp->ecn |= ecn; |
6ddc0822 | 518 | atomic_add(skb->truesize, &qp->q.net->mem); |
1da177e4 | 519 | if (offset == 0) |
bc578a54 | 520 | qp->q.last_in |= INET_FRAG_FIRST_IN; |
1da177e4 | 521 | |
bc578a54 JP |
522 | if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
523 | qp->q.meat == qp->q.len) | |
1706d587 HX |
524 | return ip_frag_reasm(qp, prev, dev); |
525 | ||
7eb95156 | 526 | write_lock(&ip4_frags.lock); |
3140c25c | 527 | list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list); |
7eb95156 | 528 | write_unlock(&ip4_frags.lock); |
1706d587 | 529 | return -EINPROGRESS; |
1da177e4 LT |
530 | |
531 | err: | |
532 | kfree_skb(skb); | |
1706d587 | 533 | return err; |
1da177e4 LT |
534 | } |
535 | ||
536 | ||
537 | /* Build a new IP datagram from all its fragments. */ | |
538 | ||
1706d587 HX |
539 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
540 | struct net_device *dev) | |
1da177e4 | 541 | { |
2bad35b7 | 542 | struct net *net = container_of(qp->q.net, struct net, ipv4.frags); |
1da177e4 | 543 | struct iphdr *iph; |
5ab11c98 | 544 | struct sk_buff *fp, *head = qp->q.fragments; |
1da177e4 LT |
545 | int len; |
546 | int ihlen; | |
1706d587 | 547 | int err; |
5173cc05 | 548 | u8 ecn; |
1da177e4 LT |
549 | |
550 | ipq_kill(qp); | |
551 | ||
5173cc05 ED |
552 | ecn = ip4_frag_ecn_table[qp->ecn]; |
553 | if (unlikely(ecn == 0xff)) { | |
554 | err = -EINVAL; | |
555 | goto out_fail; | |
556 | } | |
1706d587 HX |
557 | /* Make the one we just received the head. */ |
558 | if (prev) { | |
559 | head = prev->next; | |
560 | fp = skb_clone(head, GFP_ATOMIC); | |
1706d587 HX |
561 | if (!fp) |
562 | goto out_nomem; | |
563 | ||
564 | fp->next = head->next; | |
d6bebca9 CG |
565 | if (!fp->next) |
566 | qp->q.fragments_tail = fp; | |
1706d587 HX |
567 | prev->next = fp; |
568 | ||
5ab11c98 PE |
569 | skb_morph(head, qp->q.fragments); |
570 | head->next = qp->q.fragments->next; | |
1706d587 | 571 | |
5ab11c98 PE |
572 | kfree_skb(qp->q.fragments); |
573 | qp->q.fragments = head; | |
1706d587 HX |
574 | } |
575 | ||
547b792c IJ |
576 | WARN_ON(head == NULL); |
577 | WARN_ON(FRAG_CB(head)->offset != 0); | |
1da177e4 LT |
578 | |
579 | /* Allocate a new buffer for the datagram. */ | |
c9bdd4b5 | 580 | ihlen = ip_hdrlen(head); |
5ab11c98 | 581 | len = ihlen + qp->q.len; |
1da177e4 | 582 | |
1706d587 | 583 | err = -E2BIG; |
132adf54 | 584 | if (len > 65535) |
1da177e4 LT |
585 | goto out_oversize; |
586 | ||
587 | /* Head of list must not be cloned. */ | |
588 | if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) | |
589 | goto out_nomem; | |
590 | ||
591 | /* If the first fragment is fragmented itself, we split | |
592 | * it to two chunks: the first with data and paged part | |
593 | * and the second, holding only fragments. */ | |
21dc3301 | 594 | if (skb_has_frag_list(head)) { |
1da177e4 LT |
595 | struct sk_buff *clone; |
596 | int i, plen = 0; | |
597 | ||
598 | if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) | |
599 | goto out_nomem; | |
600 | clone->next = head->next; | |
601 | head->next = clone; | |
602 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | |
d7fcf1a5 | 603 | skb_frag_list_init(head); |
9e903e08 ED |
604 | for (i = 0; i < skb_shinfo(head)->nr_frags; i++) |
605 | plen += skb_frag_size(&skb_shinfo(head)->frags[i]); | |
1da177e4 LT |
606 | clone->len = clone->data_len = head->data_len - plen; |
607 | head->data_len -= clone->len; | |
608 | head->len -= clone->len; | |
609 | clone->csum = 0; | |
610 | clone->ip_summed = head->ip_summed; | |
6ddc0822 | 611 | atomic_add(clone->truesize, &qp->q.net->mem); |
1da177e4 LT |
612 | } |
613 | ||
614 | skb_shinfo(head)->frag_list = head->next; | |
d56f90a7 | 615 | skb_push(head, head->data - skb_network_header(head)); |
1da177e4 LT |
616 | |
617 | for (fp=head->next; fp; fp = fp->next) { | |
618 | head->data_len += fp->len; | |
619 | head->len += fp->len; | |
620 | if (head->ip_summed != fp->ip_summed) | |
621 | head->ip_summed = CHECKSUM_NONE; | |
84fa7933 | 622 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
1da177e4 LT |
623 | head->csum = csum_add(head->csum, fp->csum); |
624 | head->truesize += fp->truesize; | |
1da177e4 | 625 | } |
d27f9b35 | 626 | atomic_sub(head->truesize, &qp->q.net->mem); |
1da177e4 LT |
627 | |
628 | head->next = NULL; | |
629 | head->dev = dev; | |
5ab11c98 | 630 | head->tstamp = qp->q.stamp; |
1da177e4 | 631 | |
eddc9ec5 | 632 | iph = ip_hdr(head); |
1da177e4 LT |
633 | iph->frag_off = 0; |
634 | iph->tot_len = htons(len); | |
5173cc05 | 635 | iph->tos |= ecn; |
2bad35b7 | 636 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
5ab11c98 | 637 | qp->q.fragments = NULL; |
d6bebca9 | 638 | qp->q.fragments_tail = NULL; |
1706d587 | 639 | return 0; |
1da177e4 LT |
640 | |
641 | out_nomem: | |
afd46503 JP |
642 | LIMIT_NETDEBUG(KERN_ERR pr_fmt("queue_glue: no memory for gluing queue %p\n"), |
643 | qp); | |
45542479 | 644 | err = -ENOMEM; |
1da177e4 LT |
645 | goto out_fail; |
646 | out_oversize: | |
647 | if (net_ratelimit()) | |
058bd4d2 | 648 | pr_info("Oversized IP packet from %pI4\n", &qp->saddr); |
1da177e4 | 649 | out_fail: |
bbf31bf1 | 650 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
1706d587 | 651 | return err; |
1da177e4 LT |
652 | } |
653 | ||
654 | /* Process an incoming IP datagram fragment. */ | |
776c729e | 655 | int ip_defrag(struct sk_buff *skb, u32 user) |
1da177e4 | 656 | { |
1da177e4 | 657 | struct ipq *qp; |
ac18e750 | 658 | struct net *net; |
e905a9ed | 659 | |
adf30907 | 660 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); |
7c73a6fa | 661 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); |
1da177e4 LT |
662 | |
663 | /* Start by cleaning up the memory. */ | |
e31e0bdc | 664 | if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) |
6ddc0822 | 665 | ip_evictor(net); |
1da177e4 | 666 | |
1da177e4 | 667 | /* Lookup (or create) queue header */ |
ac18e750 | 668 | if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { |
1706d587 | 669 | int ret; |
1da177e4 | 670 | |
5ab11c98 | 671 | spin_lock(&qp->q.lock); |
1da177e4 | 672 | |
1706d587 | 673 | ret = ip_frag_queue(qp, skb); |
1da177e4 | 674 | |
5ab11c98 | 675 | spin_unlock(&qp->q.lock); |
4b6cb5d8 | 676 | ipq_put(qp); |
776c729e | 677 | return ret; |
1da177e4 LT |
678 | } |
679 | ||
7c73a6fa | 680 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
1da177e4 | 681 | kfree_skb(skb); |
776c729e | 682 | return -ENOMEM; |
1da177e4 | 683 | } |
4bc2f18b | 684 | EXPORT_SYMBOL(ip_defrag); |
1da177e4 | 685 | |
bc416d97 ED |
686 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) |
687 | { | |
688 | const struct iphdr *iph; | |
689 | u32 len; | |
690 | ||
691 | if (skb->protocol != htons(ETH_P_IP)) | |
692 | return skb; | |
693 | ||
694 | if (!pskb_may_pull(skb, sizeof(struct iphdr))) | |
695 | return skb; | |
696 | ||
697 | iph = ip_hdr(skb); | |
698 | if (iph->ihl < 5 || iph->version != 4) | |
699 | return skb; | |
700 | if (!pskb_may_pull(skb, iph->ihl*4)) | |
701 | return skb; | |
702 | iph = ip_hdr(skb); | |
703 | len = ntohs(iph->tot_len); | |
704 | if (skb->len < len || len < (iph->ihl * 4)) | |
705 | return skb; | |
706 | ||
707 | if (ip_is_fragment(ip_hdr(skb))) { | |
708 | skb = skb_share_check(skb, GFP_ATOMIC); | |
709 | if (skb) { | |
710 | if (pskb_trim_rcsum(skb, len)) | |
711 | return skb; | |
712 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | |
713 | if (ip_defrag(skb, user)) | |
714 | return NULL; | |
715 | skb->rxhash = 0; | |
716 | } | |
717 | } | |
718 | return skb; | |
719 | } | |
720 | EXPORT_SYMBOL(ip_check_defrag); | |
721 | ||
8d8354d2 PE |
722 | #ifdef CONFIG_SYSCTL |
723 | static int zero; | |
724 | ||
0a64b4b8 | 725 | static struct ctl_table ip4_frags_ns_ctl_table[] = { |
8d8354d2 | 726 | { |
8d8354d2 | 727 | .procname = "ipfrag_high_thresh", |
e31e0bdc | 728 | .data = &init_net.ipv4.frags.high_thresh, |
8d8354d2 PE |
729 | .maxlen = sizeof(int), |
730 | .mode = 0644, | |
6d9f239a | 731 | .proc_handler = proc_dointvec |
8d8354d2 PE |
732 | }, |
733 | { | |
8d8354d2 | 734 | .procname = "ipfrag_low_thresh", |
e31e0bdc | 735 | .data = &init_net.ipv4.frags.low_thresh, |
8d8354d2 PE |
736 | .maxlen = sizeof(int), |
737 | .mode = 0644, | |
6d9f239a | 738 | .proc_handler = proc_dointvec |
8d8354d2 PE |
739 | }, |
740 | { | |
8d8354d2 | 741 | .procname = "ipfrag_time", |
b2fd5321 | 742 | .data = &init_net.ipv4.frags.timeout, |
8d8354d2 PE |
743 | .maxlen = sizeof(int), |
744 | .mode = 0644, | |
6d9f239a | 745 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 | 746 | }, |
7d291ebb PE |
747 | { } |
748 | }; | |
749 | ||
750 | static struct ctl_table ip4_frags_ctl_table[] = { | |
8d8354d2 | 751 | { |
8d8354d2 | 752 | .procname = "ipfrag_secret_interval", |
3b4bc4a2 | 753 | .data = &ip4_frags.secret_interval, |
8d8354d2 PE |
754 | .maxlen = sizeof(int), |
755 | .mode = 0644, | |
6d9f239a | 756 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 PE |
757 | }, |
758 | { | |
759 | .procname = "ipfrag_max_dist", | |
760 | .data = &sysctl_ipfrag_max_dist, | |
761 | .maxlen = sizeof(int), | |
762 | .mode = 0644, | |
6d9f239a | 763 | .proc_handler = proc_dointvec_minmax, |
8d8354d2 PE |
764 | .extra1 = &zero |
765 | }, | |
766 | { } | |
767 | }; | |
768 | ||
2c8c1e72 | 769 | static int __net_init ip4_frags_ns_ctl_register(struct net *net) |
8d8354d2 | 770 | { |
e4a2d5c2 | 771 | struct ctl_table *table; |
8d8354d2 PE |
772 | struct ctl_table_header *hdr; |
773 | ||
0a64b4b8 | 774 | table = ip4_frags_ns_ctl_table; |
09ad9bc7 | 775 | if (!net_eq(net, &init_net)) { |
0a64b4b8 | 776 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
e4a2d5c2 PE |
777 | if (table == NULL) |
778 | goto err_alloc; | |
779 | ||
e31e0bdc PE |
780 | table[0].data = &net->ipv4.frags.high_thresh; |
781 | table[1].data = &net->ipv4.frags.low_thresh; | |
b2fd5321 | 782 | table[2].data = &net->ipv4.frags.timeout; |
e4a2d5c2 PE |
783 | } |
784 | ||
785 | hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); | |
786 | if (hdr == NULL) | |
787 | goto err_reg; | |
788 | ||
789 | net->ipv4.frags_hdr = hdr; | |
790 | return 0; | |
791 | ||
792 | err_reg: | |
09ad9bc7 | 793 | if (!net_eq(net, &init_net)) |
e4a2d5c2 PE |
794 | kfree(table); |
795 | err_alloc: | |
796 | return -ENOMEM; | |
797 | } | |
798 | ||
2c8c1e72 | 799 | static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) |
e4a2d5c2 PE |
800 | { |
801 | struct ctl_table *table; | |
802 | ||
803 | table = net->ipv4.frags_hdr->ctl_table_arg; | |
804 | unregister_net_sysctl_table(net->ipv4.frags_hdr); | |
805 | kfree(table); | |
8d8354d2 | 806 | } |
7d291ebb PE |
807 | |
808 | static void ip4_frags_ctl_register(void) | |
809 | { | |
810 | register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); | |
811 | } | |
8d8354d2 | 812 | #else |
0a64b4b8 | 813 | static inline int ip4_frags_ns_ctl_register(struct net *net) |
8d8354d2 PE |
814 | { |
815 | return 0; | |
816 | } | |
e4a2d5c2 | 817 | |
0a64b4b8 | 818 | static inline void ip4_frags_ns_ctl_unregister(struct net *net) |
e4a2d5c2 PE |
819 | { |
820 | } | |
7d291ebb PE |
821 | |
822 | static inline void ip4_frags_ctl_register(void) | |
823 | { | |
824 | } | |
8d8354d2 PE |
825 | #endif |
826 | ||
2c8c1e72 | 827 | static int __net_init ipv4_frags_init_net(struct net *net) |
8d8354d2 | 828 | { |
e31e0bdc PE |
829 | /* |
830 | * Fragment cache limits. We will commit 256K at one time. Should we | |
831 | * cross that limit we will prune down to 192K. This should cope with | |
832 | * even the most extreme cases without allowing an attacker to | |
833 | * measurably harm machine performance. | |
834 | */ | |
835 | net->ipv4.frags.high_thresh = 256 * 1024; | |
836 | net->ipv4.frags.low_thresh = 192 * 1024; | |
b2fd5321 PE |
837 | /* |
838 | * Important NOTE! Fragment queue must be destroyed before MSL expires. | |
839 | * RFC791 is wrong proposing to prolongate timer each fragment arrival | |
840 | * by TTL. | |
841 | */ | |
842 | net->ipv4.frags.timeout = IP_FRAG_TIME; | |
843 | ||
e5a2bb84 PE |
844 | inet_frags_init_net(&net->ipv4.frags); |
845 | ||
0a64b4b8 | 846 | return ip4_frags_ns_ctl_register(net); |
8d8354d2 PE |
847 | } |
848 | ||
2c8c1e72 | 849 | static void __net_exit ipv4_frags_exit_net(struct net *net) |
81566e83 | 850 | { |
0a64b4b8 | 851 | ip4_frags_ns_ctl_unregister(net); |
81566e83 PE |
852 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); |
853 | } | |
854 | ||
855 | static struct pernet_operations ip4_frags_ops = { | |
856 | .init = ipv4_frags_init_net, | |
857 | .exit = ipv4_frags_exit_net, | |
858 | }; | |
859 | ||
b7aa0bf7 | 860 | void __init ipfrag_init(void) |
1da177e4 | 861 | { |
7d291ebb | 862 | ip4_frags_ctl_register(); |
81566e83 | 863 | register_pernet_subsys(&ip4_frags_ops); |
321a3a99 | 864 | ip4_frags.hashfn = ip4_hashfn; |
c6fda282 | 865 | ip4_frags.constructor = ip4_frag_init; |
1e4b8287 PE |
866 | ip4_frags.destructor = ip4_frag_free; |
867 | ip4_frags.skb_free = NULL; | |
868 | ip4_frags.qsize = sizeof(struct ipq); | |
abd6523d | 869 | ip4_frags.match = ip4_frag_match; |
e521db9d | 870 | ip4_frags.frag_expire = ip_expire; |
3b4bc4a2 | 871 | ip4_frags.secret_interval = 10 * 60 * HZ; |
7eb95156 | 872 | inet_frags_init(&ip4_frags); |
1da177e4 | 873 | } |