]>
Commit | Line | Data |
---|---|---|
7785bba2 SK |
1 | /* |
2 | * IPV4 GSO/GRO offload support | |
3 | * Linux INET implementation | |
4 | * | |
5 | * Copyright (C) 2016 secunet Security Networks AG | |
6 | * Author: Steffen Klassert <[email protected]> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms and conditions of the GNU General Public License, | |
10 | * version 2, as published by the Free Software Foundation. | |
11 | * | |
12 | * ESP GRO support | |
13 | */ | |
14 | ||
15 | #include <linux/skbuff.h> | |
16 | #include <linux/init.h> | |
17 | #include <net/protocol.h> | |
18 | #include <crypto/aead.h> | |
19 | #include <crypto/authenc.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/module.h> | |
22 | #include <net/ip.h> | |
23 | #include <net/xfrm.h> | |
24 | #include <net/esp.h> | |
25 | #include <linux/scatterlist.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <net/udp.h> | |
30 | ||
31 | static struct sk_buff **esp4_gro_receive(struct sk_buff **head, | |
32 | struct sk_buff *skb) | |
33 | { | |
34 | int offset = skb_gro_offset(skb); | |
35 | struct xfrm_offload *xo; | |
36 | struct xfrm_state *x; | |
37 | __be32 seq; | |
38 | __be32 spi; | |
39 | int err; | |
40 | ||
374d1b5a SK |
41 | if (!pskb_pull(skb, offset)) |
42 | return NULL; | |
7785bba2 SK |
43 | |
44 | if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0) | |
45 | goto out; | |
46 | ||
bcd1f8a4 SK |
47 | xo = xfrm_offload(skb); |
48 | if (!xo || !(xo->flags & CRYPTO_DONE)) { | |
49 | err = secpath_set(skb); | |
50 | if (err) | |
51 | goto out; | |
7785bba2 | 52 | |
bcd1f8a4 SK |
53 | if (skb->sp->len == XFRM_MAX_DEPTH) |
54 | goto out; | |
7785bba2 | 55 | |
bcd1f8a4 SK |
56 | x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, |
57 | (xfrm_address_t *)&ip_hdr(skb)->daddr, | |
58 | spi, IPPROTO_ESP, AF_INET); | |
59 | if (!x) | |
60 | goto out; | |
7785bba2 | 61 | |
bcd1f8a4 SK |
62 | skb->sp->xvec[skb->sp->len++] = x; |
63 | skb->sp->olen++; | |
7785bba2 | 64 | |
bcd1f8a4 SK |
65 | xo = xfrm_offload(skb); |
66 | if (!xo) { | |
67 | xfrm_state_put(x); | |
68 | goto out; | |
69 | } | |
7785bba2 | 70 | } |
bcd1f8a4 | 71 | |
7785bba2 SK |
72 | xo->flags |= XFRM_GRO; |
73 | ||
74 | XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL; | |
75 | XFRM_SPI_SKB_CB(skb)->family = AF_INET; | |
76 | XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); | |
77 | XFRM_SPI_SKB_CB(skb)->seq = seq; | |
78 | ||
79 | /* We don't need to handle errors from xfrm_input, it does all | |
80 | * the error handling and frees the resources on error. */ | |
81 | xfrm_input(skb, IPPROTO_ESP, spi, -2); | |
82 | ||
83 | return ERR_PTR(-EINPROGRESS); | |
84 | out: | |
85 | skb_push(skb, offset); | |
86 | NAPI_GRO_CB(skb)->same_flow = 0; | |
87 | NAPI_GRO_CB(skb)->flush = 1; | |
88 | ||
89 | return NULL; | |
90 | } | |
91 | ||
7862b405 SK |
92 | static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb) |
93 | { | |
94 | struct ip_esp_hdr *esph; | |
95 | struct iphdr *iph = ip_hdr(skb); | |
96 | struct xfrm_offload *xo = xfrm_offload(skb); | |
97 | int proto = iph->protocol; | |
98 | ||
99 | skb_push(skb, -skb_network_offset(skb)); | |
100 | esph = ip_esp_hdr(skb); | |
101 | *skb_mac_header(skb) = IPPROTO_ESP; | |
102 | ||
103 | esph->spi = x->id.spi; | |
104 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); | |
105 | ||
106 | xo->proto = proto; | |
107 | } | |
108 | ||
109 | static struct sk_buff *esp4_gso_segment(struct sk_buff *skb, | |
110 | netdev_features_t features) | |
111 | { | |
7862b405 SK |
112 | struct xfrm_state *x; |
113 | struct ip_esp_hdr *esph; | |
114 | struct crypto_aead *aead; | |
7862b405 SK |
115 | netdev_features_t esp_features = features; |
116 | struct xfrm_offload *xo = xfrm_offload(skb); | |
117 | ||
118 | if (!xo) | |
3dca3f38 | 119 | return ERR_PTR(-EINVAL); |
7862b405 | 120 | |
121d57af | 121 | if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP)) |
5ca11440 | 122 | return ERR_PTR(-EINVAL); |
7862b405 SK |
123 | |
124 | x = skb->sp->xvec[skb->sp->len - 1]; | |
125 | aead = x->data; | |
126 | esph = ip_esp_hdr(skb); | |
127 | ||
128 | if (esph->spi != x->id.spi) | |
3dca3f38 | 129 | return ERR_PTR(-EINVAL); |
7862b405 SK |
130 | |
131 | if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) | |
3dca3f38 | 132 | return ERR_PTR(-EINVAL); |
7862b405 SK |
133 | |
134 | __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)); | |
135 | ||
136 | skb->encap_hdr_csum = 1; | |
137 | ||
3dca3f38 SK |
138 | if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || |
139 | (x->xso.dev != skb->dev)) | |
7862b405 | 140 | esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK); |
5211fcfb SN |
141 | else if (!(features & NETIF_F_HW_ESP_TX_CSUM)) |
142 | esp_features = features & ~NETIF_F_CSUM_MASK; | |
7862b405 | 143 | |
3dca3f38 | 144 | xo->flags |= XFRM_GSO_SEGMENT; |
7862b405 | 145 | |
3dca3f38 | 146 | return x->outer_mode->gso_segment(x, skb, esp_features); |
7862b405 SK |
147 | } |
148 | ||
fca11ebd SK |
149 | static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb) |
150 | { | |
151 | struct crypto_aead *aead = x->data; | |
ec9567a9 | 152 | struct xfrm_offload *xo = xfrm_offload(skb); |
fca11ebd SK |
153 | |
154 | if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead))) | |
155 | return -EINVAL; | |
156 | ||
ec9567a9 IT |
157 | if (!(xo->flags & CRYPTO_DONE)) |
158 | skb->ip_summed = CHECKSUM_NONE; | |
fca11ebd SK |
159 | |
160 | return esp_input_done2(skb, 0); | |
161 | } | |
162 | ||
163 | static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features) | |
164 | { | |
165 | int err; | |
166 | int alen; | |
167 | int blksize; | |
168 | struct xfrm_offload *xo; | |
169 | struct ip_esp_hdr *esph; | |
170 | struct crypto_aead *aead; | |
171 | struct esp_info esp; | |
172 | bool hw_offload = true; | |
3dca3f38 | 173 | __u32 seq; |
fca11ebd SK |
174 | |
175 | esp.inplace = true; | |
176 | ||
177 | xo = xfrm_offload(skb); | |
178 | ||
179 | if (!xo) | |
180 | return -EINVAL; | |
181 | ||
8f92e03e IT |
182 | if (!(features & NETIF_F_HW_ESP) || !x->xso.offload_handle || |
183 | (x->xso.dev != skb->dev)) { | |
fca11ebd SK |
184 | xo->flags |= CRYPTO_FALLBACK; |
185 | hw_offload = false; | |
186 | } | |
187 | ||
188 | esp.proto = xo->proto; | |
189 | ||
190 | /* skb is pure payload to encrypt */ | |
191 | ||
192 | aead = x->data; | |
193 | alen = crypto_aead_authsize(aead); | |
194 | ||
195 | esp.tfclen = 0; | |
196 | /* XXX: Add support for tfc padding here. */ | |
197 | ||
198 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); | |
199 | esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); | |
200 | esp.plen = esp.clen - skb->len - esp.tfclen; | |
201 | esp.tailen = esp.tfclen + esp.plen + alen; | |
202 | ||
203 | esp.esph = ip_esp_hdr(skb); | |
204 | ||
205 | ||
206 | if (!hw_offload || (hw_offload && !skb_is_gso(skb))) { | |
207 | esp.nfrags = esp_output_head(x, skb, &esp); | |
208 | if (esp.nfrags < 0) | |
209 | return esp.nfrags; | |
210 | } | |
211 | ||
3dca3f38 SK |
212 | seq = xo->seq.low; |
213 | ||
fca11ebd SK |
214 | esph = esp.esph; |
215 | esph->spi = x->id.spi; | |
216 | ||
217 | skb_push(skb, -skb_network_offset(skb)); | |
218 | ||
219 | if (xo->flags & XFRM_GSO_SEGMENT) { | |
3dca3f38 SK |
220 | esph->seq_no = htonl(seq); |
221 | ||
222 | if (!skb_is_gso(skb)) | |
223 | xo->seq.low++; | |
224 | else | |
225 | xo->seq.low += skb_shinfo(skb)->gso_segs; | |
fca11ebd SK |
226 | } |
227 | ||
3dca3f38 SK |
228 | esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); |
229 | ||
230 | ip_hdr(skb)->tot_len = htons(skb->len); | |
231 | ip_send_check(ip_hdr(skb)); | |
232 | ||
fca11ebd SK |
233 | if (hw_offload) |
234 | return 0; | |
235 | ||
fca11ebd | 236 | err = esp_output_tail(x, skb, &esp); |
4ff0308f | 237 | if (err) |
fca11ebd SK |
238 | return err; |
239 | ||
240 | secpath_reset(skb); | |
241 | ||
242 | return 0; | |
243 | } | |
244 | ||
7785bba2 SK |
245 | static const struct net_offload esp4_offload = { |
246 | .callbacks = { | |
247 | .gro_receive = esp4_gro_receive, | |
7862b405 | 248 | .gso_segment = esp4_gso_segment, |
7785bba2 SK |
249 | }, |
250 | }; | |
251 | ||
fca11ebd SK |
252 | static const struct xfrm_type_offload esp_type_offload = { |
253 | .description = "ESP4 OFFLOAD", | |
254 | .owner = THIS_MODULE, | |
255 | .proto = IPPROTO_ESP, | |
256 | .input_tail = esp_input_tail, | |
257 | .xmit = esp_xmit, | |
7862b405 | 258 | .encap = esp4_gso_encap, |
fca11ebd SK |
259 | }; |
260 | ||
7785bba2 SK |
261 | static int __init esp4_offload_init(void) |
262 | { | |
fca11ebd SK |
263 | if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) { |
264 | pr_info("%s: can't add xfrm type offload\n", __func__); | |
265 | return -EAGAIN; | |
266 | } | |
267 | ||
7785bba2 SK |
268 | return inet_add_offload(&esp4_offload, IPPROTO_ESP); |
269 | } | |
270 | ||
271 | static void __exit esp4_offload_exit(void) | |
272 | { | |
fca11ebd SK |
273 | if (xfrm_unregister_type_offload(&esp_type_offload, AF_INET) < 0) |
274 | pr_info("%s: can't remove xfrm type offload\n", __func__); | |
275 | ||
7785bba2 SK |
276 | inet_del_offload(&esp4_offload, IPPROTO_ESP); |
277 | } | |
278 | ||
279 | module_init(esp4_offload_init); | |
280 | module_exit(esp4_offload_exit); | |
281 | MODULE_LICENSE("GPL"); | |
282 | MODULE_AUTHOR("Steffen Klassert <[email protected]>"); | |
ffdb5211 | 283 | MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP); |