]>
Commit | Line | Data |
---|---|---|
da5bab07 DB |
1 | /* |
2 | * IPV4 GSO/GRO offload support | |
3 | * Linux INET implementation | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; either version | |
8 | * 2 of the License, or (at your option) any later version. | |
9 | * | |
10 | * UDPv4 GSO support | |
11 | */ | |
12 | ||
13 | #include <linux/skbuff.h> | |
14 | #include <net/udp.h> | |
15 | #include <net/protocol.h> | |
16 | ||
8bce6d7d TH |
17 | static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, |
18 | netdev_features_t features, | |
19 | struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, | |
20 | netdev_features_t features), | |
4bcb877d | 21 | __be16 new_protocol, bool is_ipv6) |
155e010e | 22 | { |
dbef491e | 23 | int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); |
880388aa | 24 | bool remcsum, need_csum, offload_csum, gso_partial; |
155e010e | 25 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
dbef491e | 26 | struct udphdr *uh = udp_hdr(skb); |
155e010e | 27 | u16 mac_offset = skb->mac_header; |
155e010e | 28 | __be16 protocol = skb->protocol; |
dbef491e | 29 | u16 mac_len = skb->mac_len; |
155e010e | 30 | int udp_offset, outer_hlen; |
08334824 | 31 | __wsum partial; |
b40c5f4f | 32 | bool need_ipsec; |
155e010e TH |
33 | |
34 | if (unlikely(!pskb_may_pull(skb, tnl_hlen))) | |
35 | goto out; | |
36 | ||
08334824 AD |
37 | /* Adjust partial header checksum to negate old length. |
38 | * We cannot rely on the value contained in uh->len as it is | |
39 | * possible that the actual value exceeds the boundaries of the | |
40 | * 16 bit length field due to the header being added outside of an | |
41 | * IP or IPv6 frame that was already limited to 64K - 1. | |
42 | */ | |
802ab55a AD |
43 | if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) |
44 | partial = (__force __wsum)uh->len; | |
45 | else | |
46 | partial = (__force __wsum)htonl(skb->len); | |
47 | partial = csum_sub(csum_unfold(uh->check), partial); | |
dbef491e AD |
48 | |
49 | /* setup inner skb. */ | |
155e010e | 50 | skb->encapsulation = 0; |
5197f349 | 51 | SKB_GSO_CB(skb)->encap_level = 0; |
155e010e TH |
52 | __skb_pull(skb, tnl_hlen); |
53 | skb_reset_mac_header(skb); | |
54 | skb_set_network_header(skb, skb_inner_network_offset(skb)); | |
55 | skb->mac_len = skb_inner_network_offset(skb); | |
8bce6d7d | 56 | skb->protocol = new_protocol; |
fdaefd62 AD |
57 | |
58 | need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM); | |
4bcb877d | 59 | skb->encap_hdr_csum = need_csum; |
fdaefd62 AD |
60 | |
61 | remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM); | |
e585f236 | 62 | skb->remcsum_offload = remcsum; |
155e010e | 63 | |
b40c5f4f | 64 | need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); |
4bcb877d TH |
65 | /* Try to offload checksum if possible */ |
66 | offload_csum = !!(need_csum && | |
b40c5f4f | 67 | !need_ipsec && |
fdaefd62 AD |
68 | (skb->dev->features & |
69 | (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) : | |
70 | (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM)))); | |
155e010e | 71 | |
bef3c6c9 AD |
72 | features &= skb->dev->hw_enc_features; |
73 | ||
7fbeffed AD |
74 | /* The only checksum offload we care about from here on out is the |
75 | * outer one so strip the existing checksum feature flags and | |
76 | * instead set the flag based on our outer checksum offload value. | |
77 | */ | |
880388aa | 78 | if (remcsum) { |
7fbeffed | 79 | features &= ~NETIF_F_CSUM_MASK; |
22463876 | 80 | if (!need_csum || offload_csum) |
7fbeffed AD |
81 | features |= NETIF_F_HW_CSUM; |
82 | } | |
83 | ||
155e010e | 84 | /* segment inner packet. */ |
bef3c6c9 | 85 | segs = gso_inner_segment(skb, features); |
27446442 | 86 | if (IS_ERR_OR_NULL(segs)) { |
155e010e TH |
87 | skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, |
88 | mac_len); | |
89 | goto out; | |
90 | } | |
91 | ||
07b26c94 SK |
92 | gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL); |
93 | ||
155e010e TH |
94 | outer_hlen = skb_tnl_header_len(skb); |
95 | udp_offset = outer_hlen - tnl_hlen; | |
96 | skb = segs; | |
97 | do { | |
802ab55a | 98 | unsigned int len; |
4bcb877d | 99 | |
fdaefd62 | 100 | if (remcsum) |
4bcb877d | 101 | skb->ip_summed = CHECKSUM_NONE; |
fdaefd62 AD |
102 | |
103 | /* Set up inner headers if we are offloading inner checksum */ | |
104 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | |
4bcb877d TH |
105 | skb_reset_inner_headers(skb); |
106 | skb->encapsulation = 1; | |
107 | } | |
155e010e TH |
108 | |
109 | skb->mac_len = mac_len; | |
4bcb877d | 110 | skb->protocol = protocol; |
155e010e | 111 | |
dbef491e | 112 | __skb_push(skb, outer_hlen); |
155e010e TH |
113 | skb_reset_mac_header(skb); |
114 | skb_set_network_header(skb, mac_len); | |
115 | skb_set_transport_header(skb, udp_offset); | |
802ab55a | 116 | len = skb->len - udp_offset; |
155e010e | 117 | uh = udp_hdr(skb); |
802ab55a AD |
118 | |
119 | /* If we are only performing partial GSO the inner header | |
120 | * will be using a length value equal to only one MSS sized | |
121 | * segment instead of the entire frame. | |
122 | */ | |
3d0241d5 | 123 | if (gso_partial && skb_is_gso(skb)) { |
802ab55a AD |
124 | uh->len = htons(skb_shinfo(skb)->gso_size + |
125 | SKB_GSO_CB(skb)->data_offset + | |
126 | skb->head - (unsigned char *)uh); | |
127 | } else { | |
128 | uh->len = htons(len); | |
129 | } | |
155e010e | 130 | |
4bcb877d TH |
131 | if (!need_csum) |
132 | continue; | |
133 | ||
802ab55a AD |
134 | uh->check = ~csum_fold(csum_add(partial, |
135 | (__force __wsum)htonl(len))); | |
155e010e | 136 | |
fdaefd62 AD |
137 | if (skb->encapsulation || !offload_csum) { |
138 | uh->check = gso_make_checksum(skb, ~uh->check); | |
155e010e TH |
139 | if (uh->check == 0) |
140 | uh->check = CSUM_MANGLED_0; | |
fdaefd62 AD |
141 | } else { |
142 | skb->ip_summed = CHECKSUM_PARTIAL; | |
143 | skb->csum_start = skb_transport_header(skb) - skb->head; | |
144 | skb->csum_offset = offsetof(struct udphdr, check); | |
155e010e | 145 | } |
155e010e TH |
146 | } while ((skb = skb->next)); |
147 | out: | |
148 | return segs; | |
149 | } | |
150 | ||
8bce6d7d TH |
151 | struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb, |
152 | netdev_features_t features, | |
153 | bool is_ipv6) | |
154 | { | |
155 | __be16 protocol = skb->protocol; | |
156 | const struct net_offload **offloads; | |
157 | const struct net_offload *ops; | |
158 | struct sk_buff *segs = ERR_PTR(-EINVAL); | |
159 | struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb, | |
160 | netdev_features_t features); | |
161 | ||
162 | rcu_read_lock(); | |
163 | ||
164 | switch (skb->inner_protocol_type) { | |
165 | case ENCAP_TYPE_ETHER: | |
166 | protocol = skb->inner_protocol; | |
167 | gso_inner_segment = skb_mac_gso_segment; | |
168 | break; | |
169 | case ENCAP_TYPE_IPPROTO: | |
170 | offloads = is_ipv6 ? inet6_offloads : inet_offloads; | |
171 | ops = rcu_dereference(offloads[skb->inner_ipproto]); | |
172 | if (!ops || !ops->callbacks.gso_segment) | |
173 | goto out_unlock; | |
174 | gso_inner_segment = ops->callbacks.gso_segment; | |
175 | break; | |
176 | default: | |
177 | goto out_unlock; | |
178 | } | |
179 | ||
180 | segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment, | |
4bcb877d | 181 | protocol, is_ipv6); |
8bce6d7d TH |
182 | |
183 | out_unlock: | |
184 | rcu_read_unlock(); | |
185 | ||
186 | return segs; | |
187 | } | |
a6024562 | 188 | EXPORT_SYMBOL(skb_udp_tunnel_segment); |
8bce6d7d | 189 | |
0c19f846 WB |
190 | static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb, |
191 | netdev_features_t features) | |
da5bab07 DB |
192 | { |
193 | struct sk_buff *segs = ERR_PTR(-EINVAL); | |
0c19f846 WB |
194 | unsigned int mss; |
195 | __wsum csum; | |
196 | struct udphdr *uh; | |
197 | struct iphdr *iph; | |
7a7ffbab WCC |
198 | |
199 | if (skb->encapsulation && | |
0f4f4ffa | 200 | (skb_shinfo(skb)->gso_type & |
0c19f846 | 201 | (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) { |
8bce6d7d | 202 | segs = skb_udp_tunnel_segment(skb, features, false); |
0c19f846 WB |
203 | goto out; |
204 | } | |
205 | ||
121d57af WB |
206 | if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP)) |
207 | goto out; | |
208 | ||
0c19f846 WB |
209 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
210 | goto out; | |
211 | ||
212 | mss = skb_shinfo(skb)->gso_size; | |
213 | if (unlikely(skb->len <= mss)) | |
214 | goto out; | |
215 | ||
216 | /* Do software UFO. Complete and fill in the UDP checksum as | |
217 | * HW cannot do checksum of UDP packets sent as multiple | |
218 | * IP fragments. | |
219 | */ | |
f71470b3 | 220 | |
0c19f846 WB |
221 | uh = udp_hdr(skb); |
222 | iph = ip_hdr(skb); | |
223 | ||
224 | uh->check = 0; | |
225 | csum = skb_checksum(skb, 0, skb->len, 0); | |
226 | uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum); | |
227 | if (uh->check == 0) | |
228 | uh->check = CSUM_MANGLED_0; | |
229 | ||
230 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
231 | ||
232 | /* If there is no outer header we can fake a checksum offload | |
233 | * due to the fact that we have already done the checksum in | |
234 | * software prior to segmenting the frame. | |
235 | */ | |
236 | if (!skb->encap_hdr_csum) | |
237 | features |= NETIF_F_HW_CSUM; | |
238 | ||
239 | /* Fragment the skb. IP headers of the fragments are updated in | |
240 | * inet_gso_segment() | |
241 | */ | |
242 | segs = skb_segment(skb, features); | |
243 | out: | |
da5bab07 DB |
244 | return segs; |
245 | } | |
246 | ||
57c67ff4 | 247 | struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb, |
a6024562 | 248 | struct udphdr *uh, udp_lookup_t lookup) |
b582ef09 | 249 | { |
b582ef09 | 250 | struct sk_buff *p, **pp = NULL; |
57c67ff4 TH |
251 | struct udphdr *uh2; |
252 | unsigned int off = skb_gro_offset(skb); | |
b582ef09 | 253 | int flush = 1; |
a6024562 | 254 | struct sock *sk; |
b582ef09 | 255 | |
fac8e0f5 | 256 | if (NAPI_GRO_CB(skb)->encap_mark || |
662880f4 TH |
257 | (skb->ip_summed != CHECKSUM_PARTIAL && |
258 | NAPI_GRO_CB(skb)->csum_cnt == 0 && | |
259 | !NAPI_GRO_CB(skb)->csum_valid)) | |
b582ef09 OG |
260 | goto out; |
261 | ||
fac8e0f5 JG |
262 | /* mark that this skb passed once through the tunnel gro layer */ |
263 | NAPI_GRO_CB(skb)->encap_mark = 1; | |
b582ef09 OG |
264 | |
265 | rcu_read_lock(); | |
a6024562 TH |
266 | sk = (*lookup)(skb, uh->source, uh->dest); |
267 | ||
268 | if (sk && udp_sk(sk)->gro_receive) | |
269 | goto unflush; | |
b582ef09 OG |
270 | goto out_unlock; |
271 | ||
272 | unflush: | |
273 | flush = 0; | |
274 | ||
275 | for (p = *head; p; p = p->next) { | |
276 | if (!NAPI_GRO_CB(p)->same_flow) | |
277 | continue; | |
278 | ||
279 | uh2 = (struct udphdr *)(p->data + off); | |
57c67ff4 TH |
280 | |
281 | /* Match ports and either checksums are either both zero | |
282 | * or nonzero. | |
283 | */ | |
284 | if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) || | |
285 | (!uh->check ^ !uh2->check)) { | |
b582ef09 OG |
286 | NAPI_GRO_CB(p)->same_flow = 0; |
287 | continue; | |
288 | } | |
289 | } | |
290 | ||
291 | skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */ | |
6bae1d4c | 292 | skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr)); |
fcd91dd4 | 293 | pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb); |
b582ef09 OG |
294 | |
295 | out_unlock: | |
296 | rcu_read_unlock(); | |
297 | out: | |
298 | NAPI_GRO_CB(skb)->flush |= flush; | |
299 | return pp; | |
300 | } | |
a6024562 | 301 | EXPORT_SYMBOL(udp_gro_receive); |
b582ef09 | 302 | |
57c67ff4 TH |
303 | static struct sk_buff **udp4_gro_receive(struct sk_buff **head, |
304 | struct sk_buff *skb) | |
305 | { | |
306 | struct udphdr *uh = udp_gro_udphdr(skb); | |
307 | ||
2abb7cdc TH |
308 | if (unlikely(!uh)) |
309 | goto flush; | |
57c67ff4 | 310 | |
2abb7cdc | 311 | /* Don't bother verifying checksum if we're going to flush anyway. */ |
2d8f7e2c | 312 | if (NAPI_GRO_CB(skb)->flush) |
2abb7cdc TH |
313 | goto skip; |
314 | ||
315 | if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, | |
316 | inet_gro_compute_pseudo)) | |
317 | goto flush; | |
318 | else if (uh->check) | |
319 | skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check, | |
320 | inet_gro_compute_pseudo); | |
321 | skip: | |
efc98d08 | 322 | NAPI_GRO_CB(skb)->is_ipv6 = 0; |
a6024562 | 323 | return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb); |
2abb7cdc TH |
324 | |
325 | flush: | |
326 | NAPI_GRO_CB(skb)->flush = 1; | |
327 | return NULL; | |
57c67ff4 TH |
328 | } |
329 | ||
a6024562 TH |
330 | int udp_gro_complete(struct sk_buff *skb, int nhoff, |
331 | udp_lookup_t lookup) | |
b582ef09 | 332 | { |
b582ef09 OG |
333 | __be16 newlen = htons(skb->len - nhoff); |
334 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); | |
335 | int err = -ENOSYS; | |
a6024562 | 336 | struct sock *sk; |
b582ef09 OG |
337 | |
338 | uh->len = newlen; | |
339 | ||
229740c6 JR |
340 | /* Set encapsulation before calling into inner gro_complete() functions |
341 | * to make them set up the inner offsets. | |
342 | */ | |
343 | skb->encapsulation = 1; | |
344 | ||
b582ef09 | 345 | rcu_read_lock(); |
a6024562 TH |
346 | sk = (*lookup)(skb, uh->source, uh->dest); |
347 | if (sk && udp_sk(sk)->gro_complete) | |
348 | err = udp_sk(sk)->gro_complete(sk, skb, | |
349 | nhoff + sizeof(struct udphdr)); | |
b582ef09 | 350 | rcu_read_unlock(); |
6db93ea1 TH |
351 | |
352 | if (skb->remcsum_offload) | |
353 | skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; | |
354 | ||
b582ef09 OG |
355 | return err; |
356 | } | |
a6024562 | 357 | EXPORT_SYMBOL(udp_gro_complete); |
b582ef09 | 358 | |
72bb17b3 | 359 | static int udp4_gro_complete(struct sk_buff *skb, int nhoff) |
57c67ff4 TH |
360 | { |
361 | const struct iphdr *iph = ip_hdr(skb); | |
362 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); | |
363 | ||
6db93ea1 TH |
364 | if (uh->check) { |
365 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; | |
57c67ff4 TH |
366 | uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, |
367 | iph->daddr, 0); | |
6db93ea1 TH |
368 | } else { |
369 | skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; | |
370 | } | |
57c67ff4 | 371 | |
a6024562 | 372 | return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb); |
57c67ff4 TH |
373 | } |
374 | ||
da5bab07 DB |
375 | static const struct net_offload udpv4_offload = { |
376 | .callbacks = { | |
0c19f846 | 377 | .gso_segment = udp4_ufo_fragment, |
57c67ff4 TH |
378 | .gro_receive = udp4_gro_receive, |
379 | .gro_complete = udp4_gro_complete, | |
da5bab07 DB |
380 | }, |
381 | }; | |
382 | ||
383 | int __init udpv4_offload_init(void) | |
384 | { | |
385 | return inet_add_offload(&udpv4_offload, IPPROTO_UDP); | |
386 | } |