]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * The IP fragmentation functionality. | |
e905a9ed | 7 | * |
1da177e4 | 8 | * Authors: Fred N. van Kempen <[email protected]> |
113aa838 | 9 | * Alan Cox <[email protected]> |
1da177e4 LT |
10 | * |
11 | * Fixes: | |
12 | * Alan Cox : Split from ip.c , see ip_input.c for history. | |
13 | * David S. Miller : Begin massive cleanup... | |
14 | * Andi Kleen : Add sysctls. | |
15 | * xxxx : Overlapfrag bug. | |
16 | * Ultima : ip_expire() kernel panic. | |
17 | * Bill Hawes : Frag accounting and evictor fixes. | |
18 | * John McDonald : 0 length frag bug. | |
19 | * Alexey Kuznetsov: SMP races, threading, cleanup. | |
20 | * Patrick McHardy : LRU queue of frag heads for evictor. | |
21 | */ | |
22 | ||
afd46503 JP |
23 | #define pr_fmt(fmt) "IPv4: " fmt |
24 | ||
89cee8b1 | 25 | #include <linux/compiler.h> |
1da177e4 LT |
26 | #include <linux/module.h> |
27 | #include <linux/types.h> | |
28 | #include <linux/mm.h> | |
29 | #include <linux/jiffies.h> | |
30 | #include <linux/skbuff.h> | |
31 | #include <linux/list.h> | |
32 | #include <linux/ip.h> | |
33 | #include <linux/icmp.h> | |
34 | #include <linux/netdevice.h> | |
35 | #include <linux/jhash.h> | |
36 | #include <linux/random.h> | |
5a0e3ad6 | 37 | #include <linux/slab.h> |
e9017b55 SW |
38 | #include <net/route.h> |
39 | #include <net/dst.h> | |
1da177e4 LT |
40 | #include <net/sock.h> |
41 | #include <net/ip.h> | |
42 | #include <net/icmp.h> | |
43 | #include <net/checksum.h> | |
89cee8b1 | 44 | #include <net/inetpeer.h> |
5ab11c98 | 45 | #include <net/inet_frag.h> |
1da177e4 LT |
46 | #include <linux/tcp.h> |
47 | #include <linux/udp.h> | |
48 | #include <linux/inet.h> | |
49 | #include <linux/netfilter_ipv4.h> | |
6623e3b2 | 50 | #include <net/inet_ecn.h> |
1da177e4 LT |
51 | |
52 | /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 | |
53 | * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c | |
54 | * as well. Or notify me, at least. --ANK | |
55 | */ | |
56 | ||
8d8354d2 | 57 | static int sysctl_ipfrag_max_dist __read_mostly = 64; |
d4ad4d22 | 58 | static const char ip_frag_cache_name[] = "ip4-frags"; |
89cee8b1 | 59 | |
1da177e4 LT |
60 | struct ipfrag_skb_cb |
61 | { | |
62 | struct inet_skb_parm h; | |
63 | int offset; | |
64 | }; | |
65 | ||
fd3f8c4c | 66 | #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) |
1da177e4 LT |
67 | |
68 | /* Describe an entry in the "incomplete datagrams" queue. */ | |
69 | struct ipq { | |
5ab11c98 PE |
70 | struct inet_frag_queue q; |
71 | ||
1da177e4 | 72 | u32 user; |
18277770 AV |
73 | __be32 saddr; |
74 | __be32 daddr; | |
75 | __be16 id; | |
1da177e4 | 76 | u8 protocol; |
6623e3b2 | 77 | u8 ecn; /* RFC3168 support */ |
89cee8b1 HX |
78 | int iif; |
79 | unsigned int rid; | |
80 | struct inet_peer *peer; | |
1da177e4 LT |
81 | }; |
82 | ||
aa1f731e | 83 | static u8 ip4_frag_ecn(u8 tos) |
6623e3b2 | 84 | { |
5173cc05 | 85 | return 1 << (tos & INET_ECN_MASK); |
6623e3b2 ED |
86 | } |
87 | ||
7eb95156 | 88 | static struct inet_frags ip4_frags; |
1da177e4 | 89 | |
6ddc0822 | 90 | int ip_frag_mem(struct net *net) |
7eb95156 | 91 | { |
d433673e | 92 | return sum_frag_mem_limit(&net->ipv4.frags); |
7eb95156 | 93 | } |
1da177e4 | 94 | |
1706d587 HX |
95 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
96 | struct net_device *dev); | |
97 | ||
c6fda282 PE |
98 | struct ip4_create_arg { |
99 | struct iphdr *iph; | |
100 | u32 user; | |
101 | }; | |
102 | ||
18277770 | 103 | static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) |
1da177e4 | 104 | { |
e7b519ba | 105 | net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); |
18277770 AV |
106 | return jhash_3words((__force u32)id << 16 | prot, |
107 | (__force u32)saddr, (__force u32)daddr, | |
fb3cfe6e | 108 | ip4_frags.rnd); |
1da177e4 LT |
109 | } |
110 | ||
36c77782 | 111 | static unsigned int ip4_hashfn(const struct inet_frag_queue *q) |
1da177e4 | 112 | { |
36c77782 | 113 | const struct ipq *ipq; |
1da177e4 | 114 | |
321a3a99 PE |
115 | ipq = container_of(q, struct ipq, q); |
116 | return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); | |
1da177e4 LT |
117 | } |
118 | ||
36c77782 | 119 | static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) |
abd6523d | 120 | { |
36c77782 FW |
121 | const struct ipq *qp; |
122 | const struct ip4_create_arg *arg = a; | |
abd6523d PE |
123 | |
124 | qp = container_of(q, struct ipq, q); | |
a02cec21 | 125 | return qp->id == arg->iph->id && |
cbc264ca ED |
126 | qp->saddr == arg->iph->saddr && |
127 | qp->daddr == arg->iph->daddr && | |
128 | qp->protocol == arg->iph->protocol && | |
129 | qp->user == arg->user; | |
abd6523d PE |
130 | } |
131 | ||
36c77782 | 132 | static void ip4_frag_init(struct inet_frag_queue *q, const void *a) |
c6fda282 PE |
133 | { |
134 | struct ipq *qp = container_of(q, struct ipq, q); | |
54db0cc2 G |
135 | struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, |
136 | frags); | |
137 | struct net *net = container_of(ipv4, struct net, ipv4); | |
138 | ||
36c77782 | 139 | const struct ip4_create_arg *arg = a; |
c6fda282 PE |
140 | |
141 | qp->protocol = arg->iph->protocol; | |
142 | qp->id = arg->iph->id; | |
6623e3b2 | 143 | qp->ecn = ip4_frag_ecn(arg->iph->tos); |
c6fda282 PE |
144 | qp->saddr = arg->iph->saddr; |
145 | qp->daddr = arg->iph->daddr; | |
146 | qp->user = arg->user; | |
147 | qp->peer = sysctl_ipfrag_max_dist ? | |
c0efc887 | 148 | inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, 1) : NULL; |
c6fda282 PE |
149 | } |
150 | ||
aa1f731e | 151 | static void ip4_frag_free(struct inet_frag_queue *q) |
1da177e4 | 152 | { |
1e4b8287 PE |
153 | struct ipq *qp; |
154 | ||
155 | qp = container_of(q, struct ipq, q); | |
156 | if (qp->peer) | |
157 | inet_putpeer(qp->peer); | |
1da177e4 LT |
158 | } |
159 | ||
1da177e4 LT |
160 | |
161 | /* Destruction primitives. */ | |
162 | ||
aa1f731e | 163 | static void ipq_put(struct ipq *ipq) |
1da177e4 | 164 | { |
762cc408 | 165 | inet_frag_put(&ipq->q, &ip4_frags); |
1da177e4 LT |
166 | } |
167 | ||
168 | /* Kill ipq entry. It is not destroyed immediately, | |
169 | * because caller (and someone more) holds reference count. | |
170 | */ | |
171 | static void ipq_kill(struct ipq *ipq) | |
172 | { | |
277e650d | 173 | inet_frag_kill(&ipq->q, &ip4_frags); |
1da177e4 LT |
174 | } |
175 | ||
1da177e4 LT |
176 | /* |
177 | * Oops, a fragment queue timed out. Kill it and send an ICMP reply. | |
178 | */ | |
179 | static void ip_expire(unsigned long arg) | |
180 | { | |
e521db9d | 181 | struct ipq *qp; |
84a3aa00 | 182 | struct net *net; |
e521db9d PE |
183 | |
184 | qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); | |
84a3aa00 | 185 | net = container_of(qp->q.net, struct net, ipv4.frags); |
1da177e4 | 186 | |
5ab11c98 | 187 | spin_lock(&qp->q.lock); |
1da177e4 | 188 | |
06aa8b8a | 189 | if (qp->q.flags & INET_FRAG_COMPLETE) |
1da177e4 LT |
190 | goto out; |
191 | ||
192 | ipq_kill(qp); | |
7c73a6fa | 193 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
1da177e4 | 194 | |
2e404f63 | 195 | if (!(qp->q.flags & INET_FRAG_EVICTED)) { |
5ab11c98 | 196 | struct sk_buff *head = qp->q.fragments; |
64f3b9e2 ED |
197 | const struct iphdr *iph; |
198 | int err; | |
cb84663e | 199 | |
2e404f63 NA |
200 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); |
201 | ||
202 | if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) | |
203 | goto out; | |
204 | ||
69df9d59 ED |
205 | rcu_read_lock(); |
206 | head->dev = dev_get_by_index_rcu(net, qp->iif); | |
e9017b55 SW |
207 | if (!head->dev) |
208 | goto out_rcu_unlock; | |
209 | ||
97599dc7 | 210 | /* skb has no dst, perform route lookup again */ |
64f3b9e2 | 211 | iph = ip_hdr(head); |
c6cffba4 DM |
212 | err = ip_route_input_noref(head, iph->daddr, iph->saddr, |
213 | iph->tos, head->dev); | |
64f3b9e2 ED |
214 | if (err) |
215 | goto out_rcu_unlock; | |
216 | ||
2e404f63 | 217 | /* Only an end host needs to send an ICMP |
64f3b9e2 | 218 | * "Fragment Reassembly Timeout" message, per RFC792. |
e9017b55 | 219 | */ |
595fc71b | 220 | if (qp->user == IP_DEFRAG_AF_PACKET || |
7c3d5ab1 VA |
221 | ((qp->user >= IP_DEFRAG_CONNTRACK_IN) && |
222 | (qp->user <= __IP_DEFRAG_CONNTRACK_IN_END) && | |
223 | (skb_rtable(head)->rt_type != RTN_LOCAL))) | |
64f3b9e2 ED |
224 | goto out_rcu_unlock; |
225 | ||
e9017b55 SW |
226 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ |
227 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | |
e9017b55 | 228 | out_rcu_unlock: |
d1c9ae6d PM |
229 | rcu_read_unlock(); |
230 | } | |
1da177e4 | 231 | out: |
5ab11c98 | 232 | spin_unlock(&qp->q.lock); |
4b6cb5d8 | 233 | ipq_put(qp); |
1da177e4 LT |
234 | } |
235 | ||
abd6523d PE |
236 | /* Find the correct entry in the "incomplete datagrams" queue for |
237 | * this IP datagram, and create new one, if nothing is found. | |
238 | */ | |
aa1f731e | 239 | static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) |
1da177e4 | 240 | { |
c6fda282 PE |
241 | struct inet_frag_queue *q; |
242 | struct ip4_create_arg arg; | |
abd6523d | 243 | unsigned int hash; |
1da177e4 | 244 | |
c6fda282 PE |
245 | arg.iph = iph; |
246 | arg.user = user; | |
9a375803 | 247 | |
abd6523d | 248 | hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); |
1da177e4 | 249 | |
ac18e750 | 250 | q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); |
5a3da1fe HFS |
251 | if (IS_ERR_OR_NULL(q)) { |
252 | inet_frag_maybe_warn_overflow(q, pr_fmt()); | |
253 | return NULL; | |
254 | } | |
c6fda282 | 255 | return container_of(q, struct ipq, q); |
1da177e4 LT |
256 | } |
257 | ||
89cee8b1 | 258 | /* Is the fragment too far ahead to be part of ipq? */ |
aa1f731e | 259 | static int ip_frag_too_far(struct ipq *qp) |
89cee8b1 HX |
260 | { |
261 | struct inet_peer *peer = qp->peer; | |
262 | unsigned int max = sysctl_ipfrag_max_dist; | |
263 | unsigned int start, end; | |
264 | ||
265 | int rc; | |
266 | ||
267 | if (!peer || !max) | |
268 | return 0; | |
269 | ||
270 | start = qp->rid; | |
271 | end = atomic_inc_return(&peer->rid); | |
272 | qp->rid = end; | |
273 | ||
5ab11c98 | 274 | rc = qp->q.fragments && (end - start) > max; |
89cee8b1 HX |
275 | |
276 | if (rc) { | |
7c73a6fa PE |
277 | struct net *net; |
278 | ||
279 | net = container_of(qp->q.net, struct net, ipv4.frags); | |
280 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); | |
89cee8b1 HX |
281 | } |
282 | ||
283 | return rc; | |
284 | } | |
285 | ||
286 | static int ip_frag_reinit(struct ipq *qp) | |
287 | { | |
288 | struct sk_buff *fp; | |
d433673e | 289 | unsigned int sum_truesize = 0; |
89cee8b1 | 290 | |
b2fd5321 | 291 | if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { |
5ab11c98 | 292 | atomic_inc(&qp->q.refcnt); |
89cee8b1 HX |
293 | return -ETIMEDOUT; |
294 | } | |
295 | ||
5ab11c98 | 296 | fp = qp->q.fragments; |
89cee8b1 HX |
297 | do { |
298 | struct sk_buff *xp = fp->next; | |
d433673e JDB |
299 | |
300 | sum_truesize += fp->truesize; | |
301 | kfree_skb(fp); | |
89cee8b1 HX |
302 | fp = xp; |
303 | } while (fp); | |
d433673e | 304 | sub_frag_mem_limit(&qp->q, sum_truesize); |
89cee8b1 | 305 | |
06aa8b8a | 306 | qp->q.flags = 0; |
5ab11c98 PE |
307 | qp->q.len = 0; |
308 | qp->q.meat = 0; | |
309 | qp->q.fragments = NULL; | |
d6bebca9 | 310 | qp->q.fragments_tail = NULL; |
89cee8b1 | 311 | qp->iif = 0; |
6623e3b2 | 312 | qp->ecn = 0; |
89cee8b1 HX |
313 | |
314 | return 0; | |
315 | } | |
316 | ||
1da177e4 | 317 | /* Add new segment to existing queue. */ |
1706d587 | 318 | static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) |
1da177e4 LT |
319 | { |
320 | struct sk_buff *prev, *next; | |
1706d587 | 321 | struct net_device *dev; |
1da177e4 LT |
322 | int flags, offset; |
323 | int ihl, end; | |
1706d587 | 324 | int err = -ENOENT; |
6623e3b2 | 325 | u8 ecn; |
1da177e4 | 326 | |
06aa8b8a | 327 | if (qp->q.flags & INET_FRAG_COMPLETE) |
1da177e4 LT |
328 | goto err; |
329 | ||
89cee8b1 | 330 | if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && |
1706d587 HX |
331 | unlikely(ip_frag_too_far(qp)) && |
332 | unlikely(err = ip_frag_reinit(qp))) { | |
89cee8b1 HX |
333 | ipq_kill(qp); |
334 | goto err; | |
335 | } | |
336 | ||
6623e3b2 | 337 | ecn = ip4_frag_ecn(ip_hdr(skb)->tos); |
eddc9ec5 | 338 | offset = ntohs(ip_hdr(skb)->frag_off); |
1da177e4 LT |
339 | flags = offset & ~IP_OFFSET; |
340 | offset &= IP_OFFSET; | |
341 | offset <<= 3; /* offset is in 8-byte chunks */ | |
c9bdd4b5 | 342 | ihl = ip_hdrlen(skb); |
1da177e4 LT |
343 | |
344 | /* Determine the position of this fragment. */ | |
e905a9ed | 345 | end = offset + skb->len - ihl; |
1706d587 | 346 | err = -EINVAL; |
1da177e4 LT |
347 | |
348 | /* Is this the final fragment? */ | |
349 | if ((flags & IP_MF) == 0) { | |
350 | /* If we already have some bits beyond end | |
42b2aa86 | 351 | * or have different end, the segment is corrupted. |
1da177e4 | 352 | */ |
5ab11c98 | 353 | if (end < qp->q.len || |
06aa8b8a | 354 | ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) |
1da177e4 | 355 | goto err; |
06aa8b8a | 356 | qp->q.flags |= INET_FRAG_LAST_IN; |
5ab11c98 | 357 | qp->q.len = end; |
1da177e4 LT |
358 | } else { |
359 | if (end&7) { | |
360 | end &= ~7; | |
361 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
362 | skb->ip_summed = CHECKSUM_NONE; | |
363 | } | |
5ab11c98 | 364 | if (end > qp->q.len) { |
1da177e4 | 365 | /* Some bits beyond end -> corruption. */ |
06aa8b8a | 366 | if (qp->q.flags & INET_FRAG_LAST_IN) |
1da177e4 | 367 | goto err; |
5ab11c98 | 368 | qp->q.len = end; |
1da177e4 LT |
369 | } |
370 | } | |
371 | if (end == offset) | |
372 | goto err; | |
373 | ||
1706d587 | 374 | err = -ENOMEM; |
51456b29 | 375 | if (!pskb_pull(skb, ihl)) |
1da177e4 | 376 | goto err; |
1706d587 HX |
377 | |
378 | err = pskb_trim_rcsum(skb, end - offset); | |
379 | if (err) | |
1da177e4 LT |
380 | goto err; |
381 | ||
382 | /* Find out which fragments are in front and at the back of us | |
383 | * in the chain of fragments so far. We must know where to put | |
384 | * this fragment, right? | |
385 | */ | |
d6bebca9 CG |
386 | prev = qp->q.fragments_tail; |
387 | if (!prev || FRAG_CB(prev)->offset < offset) { | |
388 | next = NULL; | |
389 | goto found; | |
390 | } | |
1da177e4 | 391 | prev = NULL; |
5ab11c98 | 392 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
1da177e4 LT |
393 | if (FRAG_CB(next)->offset >= offset) |
394 | break; /* bingo! */ | |
395 | prev = next; | |
396 | } | |
397 | ||
d6bebca9 | 398 | found: |
1da177e4 LT |
399 | /* We found where to put this one. Check for overlap with |
400 | * preceding fragment, and, if needed, align things so that | |
401 | * any overlaps are eliminated. | |
402 | */ | |
403 | if (prev) { | |
404 | int i = (FRAG_CB(prev)->offset + prev->len) - offset; | |
405 | ||
406 | if (i > 0) { | |
407 | offset += i; | |
1706d587 | 408 | err = -EINVAL; |
1da177e4 LT |
409 | if (end <= offset) |
410 | goto err; | |
1706d587 | 411 | err = -ENOMEM; |
1da177e4 LT |
412 | if (!pskb_pull(skb, i)) |
413 | goto err; | |
414 | if (skb->ip_summed != CHECKSUM_UNNECESSARY) | |
415 | skb->ip_summed = CHECKSUM_NONE; | |
416 | } | |
417 | } | |
418 | ||
1706d587 HX |
419 | err = -ENOMEM; |
420 | ||
1da177e4 LT |
421 | while (next && FRAG_CB(next)->offset < end) { |
422 | int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ | |
423 | ||
424 | if (i < next->len) { | |
425 | /* Eat head of the next overlapped fragment | |
426 | * and leave the loop. The next ones cannot overlap. | |
427 | */ | |
428 | if (!pskb_pull(next, i)) | |
429 | goto err; | |
430 | FRAG_CB(next)->offset += i; | |
5ab11c98 | 431 | qp->q.meat -= i; |
1da177e4 LT |
432 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
433 | next->ip_summed = CHECKSUM_NONE; | |
434 | break; | |
435 | } else { | |
436 | struct sk_buff *free_it = next; | |
437 | ||
47c6bf77 | 438 | /* Old fragment is completely overridden with |
1da177e4 LT |
439 | * new one drop it. |
440 | */ | |
441 | next = next->next; | |
442 | ||
443 | if (prev) | |
444 | prev->next = next; | |
445 | else | |
5ab11c98 | 446 | qp->q.fragments = next; |
1da177e4 | 447 | |
5ab11c98 | 448 | qp->q.meat -= free_it->len; |
d433673e JDB |
449 | sub_frag_mem_limit(&qp->q, free_it->truesize); |
450 | kfree_skb(free_it); | |
1da177e4 LT |
451 | } |
452 | } | |
453 | ||
454 | FRAG_CB(skb)->offset = offset; | |
455 | ||
456 | /* Insert this fragment in the chain of fragments. */ | |
457 | skb->next = next; | |
d6bebca9 CG |
458 | if (!next) |
459 | qp->q.fragments_tail = skb; | |
1da177e4 LT |
460 | if (prev) |
461 | prev->next = skb; | |
462 | else | |
5ab11c98 | 463 | qp->q.fragments = skb; |
1da177e4 | 464 | |
1706d587 HX |
465 | dev = skb->dev; |
466 | if (dev) { | |
467 | qp->iif = dev->ifindex; | |
468 | skb->dev = NULL; | |
469 | } | |
5ab11c98 PE |
470 | qp->q.stamp = skb->tstamp; |
471 | qp->q.meat += skb->len; | |
6623e3b2 | 472 | qp->ecn |= ecn; |
d433673e | 473 | add_frag_mem_limit(&qp->q, skb->truesize); |
1da177e4 | 474 | if (offset == 0) |
06aa8b8a | 475 | qp->q.flags |= INET_FRAG_FIRST_IN; |
1da177e4 | 476 | |
5f2d04f1 PM |
477 | if (ip_hdr(skb)->frag_off & htons(IP_DF) && |
478 | skb->len + ihl > qp->q.max_size) | |
479 | qp->q.max_size = skb->len + ihl; | |
480 | ||
06aa8b8a | 481 | if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && |
97599dc7 ED |
482 | qp->q.meat == qp->q.len) { |
483 | unsigned long orefdst = skb->_skb_refdst; | |
1706d587 | 484 | |
97599dc7 ED |
485 | skb->_skb_refdst = 0UL; |
486 | err = ip_frag_reasm(qp, prev, dev); | |
487 | skb->_skb_refdst = orefdst; | |
488 | return err; | |
489 | } | |
490 | ||
491 | skb_dst_drop(skb); | |
1706d587 | 492 | return -EINPROGRESS; |
1da177e4 LT |
493 | |
494 | err: | |
495 | kfree_skb(skb); | |
1706d587 | 496 | return err; |
1da177e4 LT |
497 | } |
498 | ||
499 | ||
500 | /* Build a new IP datagram from all its fragments. */ | |
501 | ||
1706d587 HX |
502 | static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, |
503 | struct net_device *dev) | |
1da177e4 | 504 | { |
2bad35b7 | 505 | struct net *net = container_of(qp->q.net, struct net, ipv4.frags); |
1da177e4 | 506 | struct iphdr *iph; |
5ab11c98 | 507 | struct sk_buff *fp, *head = qp->q.fragments; |
1da177e4 LT |
508 | int len; |
509 | int ihlen; | |
1706d587 | 510 | int err; |
3cc49492 | 511 | int sum_truesize; |
5173cc05 | 512 | u8 ecn; |
1da177e4 LT |
513 | |
514 | ipq_kill(qp); | |
515 | ||
be991971 | 516 | ecn = ip_frag_ecn_table[qp->ecn]; |
5173cc05 ED |
517 | if (unlikely(ecn == 0xff)) { |
518 | err = -EINVAL; | |
519 | goto out_fail; | |
520 | } | |
1706d587 HX |
521 | /* Make the one we just received the head. */ |
522 | if (prev) { | |
523 | head = prev->next; | |
524 | fp = skb_clone(head, GFP_ATOMIC); | |
1706d587 HX |
525 | if (!fp) |
526 | goto out_nomem; | |
527 | ||
528 | fp->next = head->next; | |
d6bebca9 CG |
529 | if (!fp->next) |
530 | qp->q.fragments_tail = fp; | |
1706d587 HX |
531 | prev->next = fp; |
532 | ||
5ab11c98 PE |
533 | skb_morph(head, qp->q.fragments); |
534 | head->next = qp->q.fragments->next; | |
1706d587 | 535 | |
cbf8f7bb | 536 | consume_skb(qp->q.fragments); |
5ab11c98 | 537 | qp->q.fragments = head; |
1706d587 HX |
538 | } |
539 | ||
51456b29 | 540 | WARN_ON(!head); |
547b792c | 541 | WARN_ON(FRAG_CB(head)->offset != 0); |
1da177e4 LT |
542 | |
543 | /* Allocate a new buffer for the datagram. */ | |
c9bdd4b5 | 544 | ihlen = ip_hdrlen(head); |
5ab11c98 | 545 | len = ihlen + qp->q.len; |
1da177e4 | 546 | |
1706d587 | 547 | err = -E2BIG; |
132adf54 | 548 | if (len > 65535) |
1da177e4 LT |
549 | goto out_oversize; |
550 | ||
551 | /* Head of list must not be cloned. */ | |
14bbd6a5 | 552 | if (skb_unclone(head, GFP_ATOMIC)) |
1da177e4 LT |
553 | goto out_nomem; |
554 | ||
555 | /* If the first fragment is fragmented itself, we split | |
556 | * it to two chunks: the first with data and paged part | |
557 | * and the second, holding only fragments. */ | |
21dc3301 | 558 | if (skb_has_frag_list(head)) { |
1da177e4 LT |
559 | struct sk_buff *clone; |
560 | int i, plen = 0; | |
561 | ||
51456b29 IM |
562 | clone = alloc_skb(0, GFP_ATOMIC); |
563 | if (!clone) | |
1da177e4 LT |
564 | goto out_nomem; |
565 | clone->next = head->next; | |
566 | head->next = clone; | |
567 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | |
d7fcf1a5 | 568 | skb_frag_list_init(head); |
9e903e08 ED |
569 | for (i = 0; i < skb_shinfo(head)->nr_frags; i++) |
570 | plen += skb_frag_size(&skb_shinfo(head)->frags[i]); | |
1da177e4 LT |
571 | clone->len = clone->data_len = head->data_len - plen; |
572 | head->data_len -= clone->len; | |
573 | head->len -= clone->len; | |
574 | clone->csum = 0; | |
575 | clone->ip_summed = head->ip_summed; | |
d433673e | 576 | add_frag_mem_limit(&qp->q, clone->truesize); |
1da177e4 LT |
577 | } |
578 | ||
d56f90a7 | 579 | skb_push(head, head->data - skb_network_header(head)); |
1da177e4 | 580 | |
3cc49492 ED |
581 | sum_truesize = head->truesize; |
582 | for (fp = head->next; fp;) { | |
583 | bool headstolen; | |
584 | int delta; | |
585 | struct sk_buff *next = fp->next; | |
586 | ||
587 | sum_truesize += fp->truesize; | |
1da177e4 LT |
588 | if (head->ip_summed != fp->ip_summed) |
589 | head->ip_summed = CHECKSUM_NONE; | |
84fa7933 | 590 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
1da177e4 | 591 | head->csum = csum_add(head->csum, fp->csum); |
3cc49492 ED |
592 | |
593 | if (skb_try_coalesce(head, fp, &headstolen, &delta)) { | |
594 | kfree_skb_partial(fp, headstolen); | |
595 | } else { | |
596 | if (!skb_shinfo(head)->frag_list) | |
597 | skb_shinfo(head)->frag_list = fp; | |
598 | head->data_len += fp->len; | |
599 | head->len += fp->len; | |
600 | head->truesize += fp->truesize; | |
601 | } | |
602 | fp = next; | |
1da177e4 | 603 | } |
d433673e | 604 | sub_frag_mem_limit(&qp->q, sum_truesize); |
1da177e4 LT |
605 | |
606 | head->next = NULL; | |
607 | head->dev = dev; | |
5ab11c98 | 608 | head->tstamp = qp->q.stamp; |
5f2d04f1 | 609 | IPCB(head)->frag_max_size = qp->q.max_size; |
1da177e4 | 610 | |
eddc9ec5 | 611 | iph = ip_hdr(head); |
5f2d04f1 PM |
612 | /* max_size != 0 implies at least one fragment had IP_DF set */ |
613 | iph->frag_off = qp->q.max_size ? htons(IP_DF) : 0; | |
1da177e4 | 614 | iph->tot_len = htons(len); |
5173cc05 | 615 | iph->tos |= ecn; |
2bad35b7 | 616 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
5ab11c98 | 617 | qp->q.fragments = NULL; |
d6bebca9 | 618 | qp->q.fragments_tail = NULL; |
1706d587 | 619 | return 0; |
1da177e4 LT |
620 | |
621 | out_nomem: | |
ba7a46f1 | 622 | net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp); |
45542479 | 623 | err = -ENOMEM; |
1da177e4 LT |
624 | goto out_fail; |
625 | out_oversize: | |
e87cc472 | 626 | net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); |
1da177e4 | 627 | out_fail: |
bbf31bf1 | 628 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
1706d587 | 629 | return err; |
1da177e4 LT |
630 | } |
631 | ||
632 | /* Process an incoming IP datagram fragment. */ | |
776c729e | 633 | int ip_defrag(struct sk_buff *skb, u32 user) |
1da177e4 | 634 | { |
1da177e4 | 635 | struct ipq *qp; |
ac18e750 | 636 | struct net *net; |
e905a9ed | 637 | |
adf30907 | 638 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); |
7c73a6fa | 639 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); |
1da177e4 | 640 | |
1da177e4 | 641 | /* Lookup (or create) queue header */ |
00db4124 IM |
642 | qp = ip_find(net, ip_hdr(skb), user); |
643 | if (qp) { | |
1706d587 | 644 | int ret; |
1da177e4 | 645 | |
5ab11c98 | 646 | spin_lock(&qp->q.lock); |
1da177e4 | 647 | |
1706d587 | 648 | ret = ip_frag_queue(qp, skb); |
1da177e4 | 649 | |
5ab11c98 | 650 | spin_unlock(&qp->q.lock); |
4b6cb5d8 | 651 | ipq_put(qp); |
776c729e | 652 | return ret; |
1da177e4 LT |
653 | } |
654 | ||
7c73a6fa | 655 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); |
1da177e4 | 656 | kfree_skb(skb); |
776c729e | 657 | return -ENOMEM; |
1da177e4 | 658 | } |
4bc2f18b | 659 | EXPORT_SYMBOL(ip_defrag); |
1da177e4 | 660 | |
bc416d97 ED |
661 | struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) |
662 | { | |
1bf3751e | 663 | struct iphdr iph; |
3e32e733 | 664 | int netoff; |
bc416d97 ED |
665 | u32 len; |
666 | ||
667 | if (skb->protocol != htons(ETH_P_IP)) | |
668 | return skb; | |
669 | ||
3e32e733 AD |
670 | netoff = skb_network_offset(skb); |
671 | ||
672 | if (skb_copy_bits(skb, netoff, &iph, sizeof(iph)) < 0) | |
bc416d97 ED |
673 | return skb; |
674 | ||
1bf3751e | 675 | if (iph.ihl < 5 || iph.version != 4) |
bc416d97 | 676 | return skb; |
1bf3751e JB |
677 | |
678 | len = ntohs(iph.tot_len); | |
3e32e733 | 679 | if (skb->len < netoff + len || len < (iph.ihl * 4)) |
bc416d97 ED |
680 | return skb; |
681 | ||
1bf3751e | 682 | if (ip_is_fragment(&iph)) { |
bc416d97 ED |
683 | skb = skb_share_check(skb, GFP_ATOMIC); |
684 | if (skb) { | |
3e32e733 | 685 | if (!pskb_may_pull(skb, netoff + iph.ihl * 4)) |
1bf3751e | 686 | return skb; |
3e32e733 | 687 | if (pskb_trim_rcsum(skb, netoff + len)) |
bc416d97 ED |
688 | return skb; |
689 | memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); | |
690 | if (ip_defrag(skb, user)) | |
691 | return NULL; | |
7539fadc | 692 | skb_clear_hash(skb); |
bc416d97 ED |
693 | } |
694 | } | |
695 | return skb; | |
696 | } | |
697 | EXPORT_SYMBOL(ip_check_defrag); | |
698 | ||
8d8354d2 PE |
699 | #ifdef CONFIG_SYSCTL |
700 | static int zero; | |
701 | ||
0a64b4b8 | 702 | static struct ctl_table ip4_frags_ns_ctl_table[] = { |
8d8354d2 | 703 | { |
8d8354d2 | 704 | .procname = "ipfrag_high_thresh", |
e31e0bdc | 705 | .data = &init_net.ipv4.frags.high_thresh, |
8d8354d2 PE |
706 | .maxlen = sizeof(int), |
707 | .mode = 0644, | |
1bab4c75 NA |
708 | .proc_handler = proc_dointvec_minmax, |
709 | .extra1 = &init_net.ipv4.frags.low_thresh | |
8d8354d2 PE |
710 | }, |
711 | { | |
8d8354d2 | 712 | .procname = "ipfrag_low_thresh", |
e31e0bdc | 713 | .data = &init_net.ipv4.frags.low_thresh, |
8d8354d2 PE |
714 | .maxlen = sizeof(int), |
715 | .mode = 0644, | |
1bab4c75 NA |
716 | .proc_handler = proc_dointvec_minmax, |
717 | .extra1 = &zero, | |
718 | .extra2 = &init_net.ipv4.frags.high_thresh | |
8d8354d2 PE |
719 | }, |
720 | { | |
8d8354d2 | 721 | .procname = "ipfrag_time", |
b2fd5321 | 722 | .data = &init_net.ipv4.frags.timeout, |
8d8354d2 PE |
723 | .maxlen = sizeof(int), |
724 | .mode = 0644, | |
6d9f239a | 725 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 | 726 | }, |
7d291ebb PE |
727 | { } |
728 | }; | |
729 | ||
e3a57d18 FW |
730 | /* secret interval has been deprecated */ |
731 | static int ip4_frags_secret_interval_unused; | |
7d291ebb | 732 | static struct ctl_table ip4_frags_ctl_table[] = { |
8d8354d2 | 733 | { |
8d8354d2 | 734 | .procname = "ipfrag_secret_interval", |
e3a57d18 | 735 | .data = &ip4_frags_secret_interval_unused, |
8d8354d2 PE |
736 | .maxlen = sizeof(int), |
737 | .mode = 0644, | |
6d9f239a | 738 | .proc_handler = proc_dointvec_jiffies, |
8d8354d2 PE |
739 | }, |
740 | { | |
741 | .procname = "ipfrag_max_dist", | |
742 | .data = &sysctl_ipfrag_max_dist, | |
743 | .maxlen = sizeof(int), | |
744 | .mode = 0644, | |
6d9f239a | 745 | .proc_handler = proc_dointvec_minmax, |
8d8354d2 PE |
746 | .extra1 = &zero |
747 | }, | |
748 | { } | |
749 | }; | |
750 | ||
2c8c1e72 | 751 | static int __net_init ip4_frags_ns_ctl_register(struct net *net) |
8d8354d2 | 752 | { |
e4a2d5c2 | 753 | struct ctl_table *table; |
8d8354d2 PE |
754 | struct ctl_table_header *hdr; |
755 | ||
0a64b4b8 | 756 | table = ip4_frags_ns_ctl_table; |
09ad9bc7 | 757 | if (!net_eq(net, &init_net)) { |
0a64b4b8 | 758 | table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); |
51456b29 | 759 | if (!table) |
e4a2d5c2 PE |
760 | goto err_alloc; |
761 | ||
e31e0bdc | 762 | table[0].data = &net->ipv4.frags.high_thresh; |
1bab4c75 NA |
763 | table[0].extra1 = &net->ipv4.frags.low_thresh; |
764 | table[0].extra2 = &init_net.ipv4.frags.high_thresh; | |
e31e0bdc | 765 | table[1].data = &net->ipv4.frags.low_thresh; |
1bab4c75 | 766 | table[1].extra2 = &net->ipv4.frags.high_thresh; |
b2fd5321 | 767 | table[2].data = &net->ipv4.frags.timeout; |
464dc801 EB |
768 | |
769 | /* Don't export sysctls to unprivileged users */ | |
770 | if (net->user_ns != &init_user_ns) | |
771 | table[0].procname = NULL; | |
e4a2d5c2 PE |
772 | } |
773 | ||
ec8f23ce | 774 | hdr = register_net_sysctl(net, "net/ipv4", table); |
51456b29 | 775 | if (!hdr) |
e4a2d5c2 PE |
776 | goto err_reg; |
777 | ||
778 | net->ipv4.frags_hdr = hdr; | |
779 | return 0; | |
780 | ||
781 | err_reg: | |
09ad9bc7 | 782 | if (!net_eq(net, &init_net)) |
e4a2d5c2 PE |
783 | kfree(table); |
784 | err_alloc: | |
785 | return -ENOMEM; | |
786 | } | |
787 | ||
2c8c1e72 | 788 | static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) |
e4a2d5c2 PE |
789 | { |
790 | struct ctl_table *table; | |
791 | ||
792 | table = net->ipv4.frags_hdr->ctl_table_arg; | |
793 | unregister_net_sysctl_table(net->ipv4.frags_hdr); | |
794 | kfree(table); | |
8d8354d2 | 795 | } |
7d291ebb | 796 | |
57a02c39 | 797 | static void __init ip4_frags_ctl_register(void) |
7d291ebb | 798 | { |
43444757 | 799 | register_net_sysctl(&init_net, "net/ipv4", ip4_frags_ctl_table); |
7d291ebb | 800 | } |
8d8354d2 | 801 | #else |
aa1f731e | 802 | static int ip4_frags_ns_ctl_register(struct net *net) |
8d8354d2 PE |
803 | { |
804 | return 0; | |
805 | } | |
e4a2d5c2 | 806 | |
aa1f731e | 807 | static void ip4_frags_ns_ctl_unregister(struct net *net) |
e4a2d5c2 PE |
808 | { |
809 | } | |
7d291ebb | 810 | |
aa1f731e | 811 | static void __init ip4_frags_ctl_register(void) |
7d291ebb PE |
812 | { |
813 | } | |
8d8354d2 PE |
814 | #endif |
815 | ||
2c8c1e72 | 816 | static int __net_init ipv4_frags_init_net(struct net *net) |
8d8354d2 | 817 | { |
c2a93660 JDB |
818 | /* Fragment cache limits. |
819 | * | |
820 | * The fragment memory accounting code, (tries to) account for | |
821 | * the real memory usage, by measuring both the size of frag | |
822 | * queue struct (inet_frag_queue (ipv4:ipq/ipv6:frag_queue)) | |
823 | * and the SKB's truesize. | |
824 | * | |
825 | * A 64K fragment consumes 129736 bytes (44*2944)+200 | |
826 | * (1500 truesize == 2944, sizeof(struct ipq) == 200) | |
827 | * | |
828 | * We will commit 4MB at one time. Should we cross that limit | |
829 | * we will prune down to 3MB, making room for approx 8 big 64K | |
830 | * fragments 8x128k. | |
e31e0bdc | 831 | */ |
c2a93660 JDB |
832 | net->ipv4.frags.high_thresh = 4 * 1024 * 1024; |
833 | net->ipv4.frags.low_thresh = 3 * 1024 * 1024; | |
b2fd5321 PE |
834 | /* |
835 | * Important NOTE! Fragment queue must be destroyed before MSL expires. | |
836 | * RFC791 is wrong proposing to prolongate timer each fragment arrival | |
837 | * by TTL. | |
838 | */ | |
839 | net->ipv4.frags.timeout = IP_FRAG_TIME; | |
840 | ||
e5a2bb84 PE |
841 | inet_frags_init_net(&net->ipv4.frags); |
842 | ||
0a64b4b8 | 843 | return ip4_frags_ns_ctl_register(net); |
8d8354d2 PE |
844 | } |
845 | ||
2c8c1e72 | 846 | static void __net_exit ipv4_frags_exit_net(struct net *net) |
81566e83 | 847 | { |
0a64b4b8 | 848 | ip4_frags_ns_ctl_unregister(net); |
81566e83 PE |
849 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); |
850 | } | |
851 | ||
852 | static struct pernet_operations ip4_frags_ops = { | |
853 | .init = ipv4_frags_init_net, | |
854 | .exit = ipv4_frags_exit_net, | |
855 | }; | |
856 | ||
b7aa0bf7 | 857 | void __init ipfrag_init(void) |
1da177e4 | 858 | { |
7d291ebb | 859 | ip4_frags_ctl_register(); |
81566e83 | 860 | register_pernet_subsys(&ip4_frags_ops); |
321a3a99 | 861 | ip4_frags.hashfn = ip4_hashfn; |
c6fda282 | 862 | ip4_frags.constructor = ip4_frag_init; |
1e4b8287 PE |
863 | ip4_frags.destructor = ip4_frag_free; |
864 | ip4_frags.skb_free = NULL; | |
865 | ip4_frags.qsize = sizeof(struct ipq); | |
abd6523d | 866 | ip4_frags.match = ip4_frag_match; |
e521db9d | 867 | ip4_frags.frag_expire = ip_expire; |
d4ad4d22 NA |
868 | ip4_frags.frags_cache_name = ip_frag_cache_name; |
869 | if (inet_frags_init(&ip4_frags)) | |
870 | panic("IP: failed to allocate ip4_frags cache\n"); | |
1da177e4 | 871 | } |