]>
Commit | Line | Data |
---|---|---|
1ccea77e | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
1da177e4 LT |
2 | /* |
3 | * Copyright (C)2002 USAGI/WIDE Project | |
1ab1457c | 4 | * |
1da177e4 LT |
5 | * Authors |
6 | * | |
1ab1457c | 7 | * Mitsuru KANDA @USAGI : IPv6 Support |
67ba4152 IM |
8 | * Kazunori MIYAZAWA @USAGI : |
9 | * Kunihiro Ishiguro <[email protected]> | |
1ab1457c | 10 | * |
67ba4152 | 11 | * This file is derived from net/ipv4/esp.c |
1da177e4 LT |
12 | */ |
13 | ||
f3213831 JP |
14 | #define pr_fmt(fmt) "IPv6: " fmt |
15 | ||
38320c70 HX |
16 | #include <crypto/aead.h> |
17 | #include <crypto/authenc.h> | |
6b7326c8 | 18 | #include <linux/err.h> |
1da177e4 LT |
19 | #include <linux/module.h> |
20 | #include <net/ip.h> | |
21 | #include <net/xfrm.h> | |
22 | #include <net/esp.h> | |
72998d8c | 23 | #include <linux/scatterlist.h> |
a02a6422 | 24 | #include <linux/kernel.h> |
1da177e4 LT |
25 | #include <linux/pfkeyv2.h> |
26 | #include <linux/random.h> | |
38320c70 | 27 | #include <linux/slab.h> |
b7c6538c | 28 | #include <linux/spinlock.h> |
0146dca7 | 29 | #include <net/ip6_checksum.h> |
81aded24 | 30 | #include <net/ip6_route.h> |
1da177e4 LT |
31 | #include <net/icmp.h> |
32 | #include <net/ipv6.h> | |
14c85021 | 33 | #include <net/protocol.h> |
0146dca7 | 34 | #include <net/udp.h> |
1da177e4 | 35 | #include <linux/icmpv6.h> |
26333c37 SD |
36 | #include <net/tcp.h> |
37 | #include <net/espintcp.h> | |
38 | #include <net/inet6_hashtables.h> | |
1da177e4 | 39 | |
03e2a30f SK |
40 | #include <linux/highmem.h> |
41 | ||
38320c70 HX |
42 | struct esp_skb_cb { |
43 | struct xfrm_skb_cb xfrm; | |
44 | void *tmp; | |
45 | }; | |
46 | ||
0146dca7 SD |
47 | struct esp_output_extra { |
48 | __be32 seqhi; | |
49 | u32 esphoff; | |
50 | }; | |
51 | ||
38320c70 HX |
52 | #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0])) |
53 | ||
54 | /* | |
55 | * Allocate an AEAD request structure with extra space for SG and IV. | |
56 | * | |
d212a4c2 SK |
57 | * For alignment considerations the upper 32 bits of the sequence number are |
58 | * placed at the front, if present. Followed by the IV, the request and finally | |
59 | * the SG list. | |
38320c70 HX |
60 | * |
61 | * TODO: Use spare space in skb for this where possible. | |
62 | */ | |
d212a4c2 | 63 | static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen) |
38320c70 HX |
64 | { |
65 | unsigned int len; | |
66 | ||
d212a4c2 SK |
67 | len = seqihlen; |
68 | ||
69 | len += crypto_aead_ivsize(aead); | |
70 | ||
38320c70 HX |
71 | if (len) { |
72 | len += crypto_aead_alignmask(aead) & | |
73 | ~(crypto_tfm_ctx_alignment() - 1); | |
74 | len = ALIGN(len, crypto_tfm_ctx_alignment()); | |
75 | } | |
76 | ||
000ae7b2 | 77 | len += sizeof(struct aead_request) + crypto_aead_reqsize(aead); |
38320c70 HX |
78 | len = ALIGN(len, __alignof__(struct scatterlist)); |
79 | ||
80 | len += sizeof(struct scatterlist) * nfrags; | |
81 | ||
82 | return kmalloc(len, GFP_ATOMIC); | |
83 | } | |
84 | ||
0146dca7 | 85 | static inline void *esp_tmp_extra(void *tmp) |
d212a4c2 | 86 | { |
0146dca7 | 87 | return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra)); |
d212a4c2 SK |
88 | } |
89 | ||
90 | static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen) | |
38320c70 HX |
91 | { |
92 | return crypto_aead_ivsize(aead) ? | |
d212a4c2 SK |
93 | PTR_ALIGN((u8 *)tmp + seqhilen, |
94 | crypto_aead_alignmask(aead) + 1) : tmp + seqhilen; | |
38320c70 HX |
95 | } |
96 | ||
38320c70 HX |
97 | static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv) |
98 | { | |
99 | struct aead_request *req; | |
100 | ||
101 | req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead), | |
102 | crypto_tfm_ctx_alignment()); | |
103 | aead_request_set_tfm(req, aead); | |
104 | return req; | |
105 | } | |
106 | ||
107 | static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, | |
108 | struct aead_request *req) | |
109 | { | |
110 | return (void *)ALIGN((unsigned long)(req + 1) + | |
111 | crypto_aead_reqsize(aead), | |
112 | __alignof__(struct scatterlist)); | |
113 | } | |
114 | ||
03e2a30f SK |
115 | static void esp_ssg_unref(struct xfrm_state *x, void *tmp) |
116 | { | |
0146dca7 | 117 | struct esp_output_extra *extra = esp_tmp_extra(tmp); |
03e2a30f | 118 | struct crypto_aead *aead = x->data; |
0146dca7 | 119 | int extralen = 0; |
03e2a30f SK |
120 | u8 *iv; |
121 | struct aead_request *req; | |
122 | struct scatterlist *sg; | |
123 | ||
124 | if (x->props.flags & XFRM_STATE_ESN) | |
0146dca7 | 125 | extralen += sizeof(*extra); |
03e2a30f | 126 | |
0146dca7 | 127 | iv = esp_tmp_iv(aead, tmp, extralen); |
03e2a30f SK |
128 | req = esp_tmp_req(aead, iv); |
129 | ||
130 | /* Unref skb_frag_pages in the src scatterlist if necessary. | |
131 | * Skip the first sg which comes from skb->data. | |
132 | */ | |
133 | if (req->src != req->dst) | |
134 | for (sg = sg_next(req->src); sg; sg = sg_next(sg)) | |
135 | put_page(sg_page(sg)); | |
136 | } | |
137 | ||
26333c37 SD |
138 | #ifdef CONFIG_INET6_ESPINTCP |
139 | struct esp_tcp_sk { | |
140 | struct sock *sk; | |
141 | struct rcu_head rcu; | |
142 | }; | |
143 | ||
144 | static void esp_free_tcp_sk(struct rcu_head *head) | |
145 | { | |
146 | struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu); | |
147 | ||
148 | sock_put(esk->sk); | |
149 | kfree(esk); | |
150 | } | |
151 | ||
152 | static struct sock *esp6_find_tcp_sk(struct xfrm_state *x) | |
153 | { | |
154 | struct xfrm_encap_tmpl *encap = x->encap; | |
155 | struct esp_tcp_sk *esk; | |
156 | __be16 sport, dport; | |
157 | struct sock *nsk; | |
158 | struct sock *sk; | |
159 | ||
160 | sk = rcu_dereference(x->encap_sk); | |
161 | if (sk && sk->sk_state == TCP_ESTABLISHED) | |
162 | return sk; | |
163 | ||
164 | spin_lock_bh(&x->lock); | |
165 | sport = encap->encap_sport; | |
166 | dport = encap->encap_dport; | |
167 | nsk = rcu_dereference_protected(x->encap_sk, | |
168 | lockdep_is_held(&x->lock)); | |
169 | if (sk && sk == nsk) { | |
170 | esk = kmalloc(sizeof(*esk), GFP_ATOMIC); | |
171 | if (!esk) { | |
172 | spin_unlock_bh(&x->lock); | |
173 | return ERR_PTR(-ENOMEM); | |
174 | } | |
175 | RCU_INIT_POINTER(x->encap_sk, NULL); | |
176 | esk->sk = sk; | |
177 | call_rcu(&esk->rcu, esp_free_tcp_sk); | |
178 | } | |
179 | spin_unlock_bh(&x->lock); | |
180 | ||
181 | sk = __inet6_lookup_established(xs_net(x), &tcp_hashinfo, &x->id.daddr.in6, | |
182 | dport, &x->props.saddr.in6, ntohs(sport), 0, 0); | |
183 | if (!sk) | |
184 | return ERR_PTR(-ENOENT); | |
185 | ||
186 | if (!tcp_is_ulp_esp(sk)) { | |
187 | sock_put(sk); | |
188 | return ERR_PTR(-EINVAL); | |
189 | } | |
190 | ||
191 | spin_lock_bh(&x->lock); | |
192 | nsk = rcu_dereference_protected(x->encap_sk, | |
193 | lockdep_is_held(&x->lock)); | |
194 | if (encap->encap_sport != sport || | |
195 | encap->encap_dport != dport) { | |
196 | sock_put(sk); | |
197 | sk = nsk ?: ERR_PTR(-EREMCHG); | |
198 | } else if (sk == nsk) { | |
199 | sock_put(sk); | |
200 | } else { | |
201 | rcu_assign_pointer(x->encap_sk, sk); | |
202 | } | |
203 | spin_unlock_bh(&x->lock); | |
204 | ||
205 | return sk; | |
206 | } | |
207 | ||
208 | static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb) | |
209 | { | |
210 | struct sock *sk; | |
211 | int err; | |
212 | ||
213 | rcu_read_lock(); | |
214 | ||
215 | sk = esp6_find_tcp_sk(x); | |
216 | err = PTR_ERR_OR_ZERO(sk); | |
217 | if (err) | |
218 | goto out; | |
219 | ||
220 | bh_lock_sock(sk); | |
221 | if (sock_owned_by_user(sk)) | |
222 | err = espintcp_queue_out(sk, skb); | |
223 | else | |
224 | err = espintcp_push_skb(sk, skb); | |
225 | bh_unlock_sock(sk); | |
226 | ||
227 | out: | |
228 | rcu_read_unlock(); | |
229 | return err; | |
230 | } | |
231 | ||
232 | static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk, | |
233 | struct sk_buff *skb) | |
234 | { | |
235 | struct dst_entry *dst = skb_dst(skb); | |
236 | struct xfrm_state *x = dst->xfrm; | |
237 | ||
238 | return esp_output_tcp_finish(x, skb); | |
239 | } | |
240 | ||
241 | static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) | |
242 | { | |
243 | int err; | |
244 | ||
245 | local_bh_disable(); | |
246 | err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb); | |
247 | local_bh_enable(); | |
248 | ||
249 | /* EINPROGRESS just happens to do the right thing. It | |
250 | * actually means that the skb has been consumed and | |
251 | * isn't coming back. | |
252 | */ | |
253 | return err ?: -EINPROGRESS; | |
254 | } | |
255 | #else | |
256 | static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) | |
257 | { | |
258 | kfree_skb(skb); | |
259 | ||
260 | return -EOPNOTSUPP; | |
261 | } | |
262 | #endif | |
263 | ||
0146dca7 SD |
264 | static void esp_output_encap_csum(struct sk_buff *skb) |
265 | { | |
266 | /* UDP encap with IPv6 requires a valid checksum */ | |
267 | if (*skb_mac_header(skb) == IPPROTO_UDP) { | |
268 | struct udphdr *uh = udp_hdr(skb); | |
269 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | |
270 | int len = ntohs(uh->len); | |
271 | unsigned int offset = skb_transport_offset(skb); | |
272 | __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0); | |
273 | ||
274 | uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, | |
275 | len, IPPROTO_UDP, csum); | |
276 | if (uh->check == 0) | |
277 | uh->check = CSUM_MANGLED_0; | |
278 | } | |
279 | } | |
280 | ||
38320c70 HX |
281 | static void esp_output_done(struct crypto_async_request *base, int err) |
282 | { | |
283 | struct sk_buff *skb = base->data; | |
f53c7239 | 284 | struct xfrm_offload *xo = xfrm_offload(skb); |
03e2a30f | 285 | void *tmp; |
f53c7239 SK |
286 | struct xfrm_state *x; |
287 | ||
2294be0f FW |
288 | if (xo && (xo->flags & XFRM_DEV_RESUME)) { |
289 | struct sec_path *sp = skb_sec_path(skb); | |
290 | ||
291 | x = sp->xvec[sp->len - 1]; | |
292 | } else { | |
f53c7239 | 293 | x = skb_dst(skb)->xfrm; |
2294be0f | 294 | } |
38320c70 | 295 | |
03e2a30f SK |
296 | tmp = ESP_SKB_CB(skb)->tmp; |
297 | esp_ssg_unref(x, tmp); | |
298 | kfree(tmp); | |
f53c7239 | 299 | |
0146dca7 SD |
300 | esp_output_encap_csum(skb); |
301 | ||
f53c7239 SK |
302 | if (xo && (xo->flags & XFRM_DEV_RESUME)) { |
303 | if (err) { | |
304 | XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR); | |
305 | kfree_skb(skb); | |
306 | return; | |
307 | } | |
308 | ||
309 | skb_push(skb, skb->data - skb_mac_header(skb)); | |
310 | secpath_reset(skb); | |
311 | xfrm_dev_resume(skb); | |
312 | } else { | |
26333c37 SD |
313 | if (!err && |
314 | x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) | |
315 | esp_output_tail_tcp(x, skb); | |
316 | else | |
317 | xfrm_output_resume(skb, err); | |
f53c7239 | 318 | } |
38320c70 HX |
319 | } |
320 | ||
000ae7b2 HX |
321 | /* Move ESP header back into place. */ |
322 | static void esp_restore_header(struct sk_buff *skb, unsigned int offset) | |
323 | { | |
324 | struct ip_esp_hdr *esph = (void *)(skb->data + offset); | |
325 | void *tmp = ESP_SKB_CB(skb)->tmp; | |
0146dca7 | 326 | __be32 *seqhi = esp_tmp_extra(tmp); |
000ae7b2 HX |
327 | |
328 | esph->seq_no = esph->spi; | |
329 | esph->spi = *seqhi; | |
330 | } | |
331 | ||
332 | static void esp_output_restore_header(struct sk_buff *skb) | |
333 | { | |
0146dca7 SD |
334 | void *tmp = ESP_SKB_CB(skb)->tmp; |
335 | struct esp_output_extra *extra = esp_tmp_extra(tmp); | |
336 | ||
337 | esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff - | |
338 | sizeof(__be32)); | |
000ae7b2 HX |
339 | } |
340 | ||
03e2a30f | 341 | static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb, |
383d0350 | 342 | struct xfrm_state *x, |
03e2a30f | 343 | struct ip_esp_hdr *esph, |
0146dca7 | 344 | struct esp_output_extra *extra) |
03e2a30f | 345 | { |
03e2a30f SK |
346 | /* For ESN we move the header forward by 4 bytes to |
347 | * accomodate the high bits. We will move it back after | |
348 | * encryption. | |
349 | */ | |
350 | if ((x->props.flags & XFRM_STATE_ESN)) { | |
0146dca7 | 351 | __u32 seqhi; |
7862b405 SK |
352 | struct xfrm_offload *xo = xfrm_offload(skb); |
353 | ||
7862b405 | 354 | if (xo) |
0146dca7 | 355 | seqhi = xo->seq.hi; |
7862b405 | 356 | else |
0146dca7 SD |
357 | seqhi = XFRM_SKB_CB(skb)->seq.output.hi; |
358 | ||
359 | extra->esphoff = (unsigned char *)esph - | |
360 | skb_transport_header(skb); | |
361 | esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4); | |
362 | extra->seqhi = esph->spi; | |
363 | esph->seq_no = htonl(seqhi); | |
03e2a30f SK |
364 | } |
365 | ||
366 | esph->spi = x->id.spi; | |
367 | ||
368 | return esph; | |
369 | } | |
370 | ||
000ae7b2 HX |
371 | static void esp_output_done_esn(struct crypto_async_request *base, int err) |
372 | { | |
373 | struct sk_buff *skb = base->data; | |
374 | ||
375 | esp_output_restore_header(skb); | |
376 | esp_output_done(base, err); | |
377 | } | |
378 | ||
0146dca7 SD |
379 | static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb, |
380 | int encap_type, | |
381 | struct esp_info *esp, | |
382 | __be16 sport, | |
383 | __be16 dport) | |
384 | { | |
385 | struct udphdr *uh; | |
386 | __be32 *udpdata32; | |
387 | unsigned int len; | |
388 | ||
389 | len = skb->len + esp->tailen - skb_transport_offset(skb); | |
390 | if (len > U16_MAX) | |
391 | return ERR_PTR(-EMSGSIZE); | |
392 | ||
393 | uh = (struct udphdr *)esp->esph; | |
394 | uh->source = sport; | |
395 | uh->dest = dport; | |
396 | uh->len = htons(len); | |
397 | uh->check = 0; | |
398 | ||
399 | *skb_mac_header(skb) = IPPROTO_UDP; | |
400 | ||
401 | if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) { | |
402 | udpdata32 = (__be32 *)(uh + 1); | |
403 | udpdata32[0] = udpdata32[1] = 0; | |
404 | return (struct ip_esp_hdr *)(udpdata32 + 2); | |
405 | } | |
406 | ||
407 | return (struct ip_esp_hdr *)(uh + 1); | |
408 | } | |
409 | ||
26333c37 SD |
410 | #ifdef CONFIG_INET6_ESPINTCP |
411 | static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, | |
412 | struct sk_buff *skb, | |
413 | struct esp_info *esp) | |
414 | { | |
415 | __be16 *lenp = (void *)esp->esph; | |
416 | struct ip_esp_hdr *esph; | |
417 | unsigned int len; | |
418 | struct sock *sk; | |
419 | ||
420 | len = skb->len + esp->tailen - skb_transport_offset(skb); | |
421 | if (len > IP_MAX_MTU) | |
422 | return ERR_PTR(-EMSGSIZE); | |
423 | ||
424 | rcu_read_lock(); | |
425 | sk = esp6_find_tcp_sk(x); | |
426 | rcu_read_unlock(); | |
427 | ||
428 | if (IS_ERR(sk)) | |
429 | return ERR_CAST(sk); | |
430 | ||
431 | *lenp = htons(len); | |
432 | esph = (struct ip_esp_hdr *)(lenp + 1); | |
433 | ||
434 | return esph; | |
435 | } | |
436 | #else | |
437 | static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x, | |
438 | struct sk_buff *skb, | |
439 | struct esp_info *esp) | |
440 | { | |
441 | return ERR_PTR(-EOPNOTSUPP); | |
442 | } | |
443 | #endif | |
444 | ||
0146dca7 SD |
445 | static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb, |
446 | struct esp_info *esp) | |
447 | { | |
448 | struct xfrm_encap_tmpl *encap = x->encap; | |
449 | struct ip_esp_hdr *esph; | |
450 | __be16 sport, dport; | |
451 | int encap_type; | |
452 | ||
453 | spin_lock_bh(&x->lock); | |
454 | sport = encap->encap_sport; | |
455 | dport = encap->encap_dport; | |
456 | encap_type = encap->encap_type; | |
457 | spin_unlock_bh(&x->lock); | |
458 | ||
459 | switch (encap_type) { | |
460 | default: | |
461 | case UDP_ENCAP_ESPINUDP: | |
462 | case UDP_ENCAP_ESPINUDP_NON_IKE: | |
463 | esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport); | |
464 | break; | |
26333c37 SD |
465 | case TCP_ENCAP_ESPINTCP: |
466 | esph = esp6_output_tcp_encap(x, skb, esp); | |
467 | break; | |
0146dca7 SD |
468 | } |
469 | ||
470 | if (IS_ERR(esph)) | |
471 | return PTR_ERR(esph); | |
472 | ||
473 | esp->esph = esph; | |
474 | ||
475 | return 0; | |
476 | } | |
477 | ||
383d0350 | 478 | int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) |
1da177e4 | 479 | { |
27a884dc | 480 | u8 *tail; |
383d0350 | 481 | int nfrags; |
0146dca7 | 482 | int esph_offset; |
383d0350 | 483 | struct page *page; |
383d0350 SK |
484 | struct sk_buff *trailer; |
485 | int tailen = esp->tailen; | |
d212a4c2 | 486 | |
0146dca7 SD |
487 | if (x->encap) { |
488 | int err = esp6_output_encap(x, skb, esp); | |
489 | ||
490 | if (err < 0) | |
491 | return err; | |
492 | } | |
493 | ||
03e2a30f | 494 | if (!skb_cloned(skb)) { |
54ffd790 | 495 | if (tailen <= skb_tailroom(skb)) { |
03e2a30f SK |
496 | nfrags = 1; |
497 | trailer = skb; | |
498 | tail = skb_tail_pointer(trailer); | |
499 | ||
500 | goto skip_cow; | |
501 | } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) | |
502 | && !skb_has_frag_list(skb)) { | |
503 | int allocsize; | |
504 | struct sock *sk = skb->sk; | |
505 | struct page_frag *pfrag = &x->xfrag; | |
506 | ||
383d0350 SK |
507 | esp->inplace = false; |
508 | ||
03e2a30f SK |
509 | allocsize = ALIGN(tailen, L1_CACHE_BYTES); |
510 | ||
511 | spin_lock_bh(&x->lock); | |
512 | ||
513 | if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { | |
514 | spin_unlock_bh(&x->lock); | |
515 | goto cow; | |
516 | } | |
517 | ||
518 | page = pfrag->page; | |
519 | get_page(page); | |
520 | ||
9bd6b629 | 521 | tail = page_address(page) + pfrag->offset; |
03e2a30f | 522 | |
383d0350 | 523 | esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); |
03e2a30f | 524 | |
03e2a30f SK |
525 | nfrags = skb_shinfo(skb)->nr_frags; |
526 | ||
527 | __skb_fill_page_desc(skb, nfrags, page, pfrag->offset, | |
528 | tailen); | |
529 | skb_shinfo(skb)->nr_frags = ++nfrags; | |
530 | ||
531 | pfrag->offset = pfrag->offset + allocsize; | |
36ff0dd3 SK |
532 | |
533 | spin_unlock_bh(&x->lock); | |
534 | ||
03e2a30f SK |
535 | nfrags++; |
536 | ||
537 | skb->len += tailen; | |
538 | skb->data_len += tailen; | |
539 | skb->truesize += tailen; | |
09db5124 | 540 | if (sk && sk_fullsock(sk)) |
14afee4b | 541 | refcount_add(tailen, &sk->sk_wmem_alloc); |
03e2a30f | 542 | |
383d0350 | 543 | goto out; |
03e2a30f | 544 | } |
48f125ce | 545 | } |
38320c70 | 546 | |
03e2a30f | 547 | cow: |
0146dca7 SD |
548 | esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb); |
549 | ||
383d0350 SK |
550 | nfrags = skb_cow_data(skb, tailen, &trailer); |
551 | if (nfrags < 0) | |
552 | goto out; | |
27a884dc | 553 | tail = skb_tail_pointer(trailer); |
0146dca7 | 554 | esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset); |
03e2a30f SK |
555 | |
556 | skip_cow: | |
383d0350 SK |
557 | esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); |
558 | pskb_put(skb, trailer, tailen); | |
1da177e4 | 559 | |
383d0350 SK |
560 | out: |
561 | return nfrags; | |
562 | } | |
563 | EXPORT_SYMBOL_GPL(esp6_output_head); | |
1da177e4 | 564 | |
383d0350 SK |
565 | int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp) |
566 | { | |
567 | u8 *iv; | |
568 | int alen; | |
569 | void *tmp; | |
570 | int ivlen; | |
571 | int assoclen; | |
0146dca7 | 572 | int extralen; |
383d0350 SK |
573 | struct page *page; |
574 | struct ip_esp_hdr *esph; | |
575 | struct aead_request *req; | |
576 | struct crypto_aead *aead; | |
577 | struct scatterlist *sg, *dsg; | |
0146dca7 | 578 | struct esp_output_extra *extra; |
383d0350 SK |
579 | int err = -ENOMEM; |
580 | ||
581 | assoclen = sizeof(struct ip_esp_hdr); | |
0146dca7 | 582 | extralen = 0; |
383d0350 SK |
583 | |
584 | if (x->props.flags & XFRM_STATE_ESN) { | |
0146dca7 | 585 | extralen += sizeof(*extra); |
383d0350 SK |
586 | assoclen += sizeof(__be32); |
587 | } | |
1da177e4 | 588 | |
383d0350 SK |
589 | aead = x->data; |
590 | alen = crypto_aead_authsize(aead); | |
591 | ivlen = crypto_aead_ivsize(aead); | |
592 | ||
0146dca7 | 593 | tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen); |
e892d2d4 | 594 | if (!tmp) |
03e2a30f | 595 | goto error; |
000ae7b2 | 596 | |
0146dca7 SD |
597 | extra = esp_tmp_extra(tmp); |
598 | iv = esp_tmp_iv(aead, tmp, extralen); | |
03e2a30f SK |
599 | req = esp_tmp_req(aead, iv); |
600 | sg = esp_req_sg(aead, req); | |
03e2a30f | 601 | |
383d0350 SK |
602 | if (esp->inplace) |
603 | dsg = sg; | |
604 | else | |
605 | dsg = &sg[esp->nfrags]; | |
606 | ||
0146dca7 SD |
607 | esph = esp_output_set_esn(skb, x, esp->esph, extra); |
608 | esp->esph = esph; | |
000ae7b2 | 609 | |
383d0350 | 610 | sg_init_table(sg, esp->nfrags); |
3f297707 JD |
611 | err = skb_to_sgvec(skb, sg, |
612 | (unsigned char *)esph - skb->data, | |
613 | assoclen + ivlen + esp->clen + alen); | |
614 | if (unlikely(err < 0)) | |
e6194923 | 615 | goto error_free; |
383d0350 SK |
616 | |
617 | if (!esp->inplace) { | |
618 | int allocsize; | |
619 | struct page_frag *pfrag = &x->xfrag; | |
620 | ||
621 | allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES); | |
622 | ||
623 | spin_lock_bh(&x->lock); | |
624 | if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) { | |
625 | spin_unlock_bh(&x->lock); | |
e6194923 | 626 | goto error_free; |
383d0350 SK |
627 | } |
628 | ||
629 | skb_shinfo(skb)->nr_frags = 1; | |
630 | ||
631 | page = pfrag->page; | |
632 | get_page(page); | |
633 | /* replace page frags in skb with new page */ | |
634 | __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len); | |
635 | pfrag->offset = pfrag->offset + allocsize; | |
636 | spin_unlock_bh(&x->lock); | |
637 | ||
638 | sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1); | |
3f297707 JD |
639 | err = skb_to_sgvec(skb, dsg, |
640 | (unsigned char *)esph - skb->data, | |
641 | assoclen + ivlen + esp->clen + alen); | |
642 | if (unlikely(err < 0)) | |
e6194923 | 643 | goto error_free; |
383d0350 | 644 | } |
d212a4c2 | 645 | |
03e2a30f SK |
646 | if ((x->props.flags & XFRM_STATE_ESN)) |
647 | aead_request_set_callback(req, 0, esp_output_done_esn, skb); | |
648 | else | |
649 | aead_request_set_callback(req, 0, esp_output_done, skb); | |
650 | ||
383d0350 | 651 | aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv); |
000ae7b2 HX |
652 | aead_request_set_ad(req, assoclen); |
653 | ||
000ae7b2 | 654 | memset(iv, 0, ivlen); |
383d0350 | 655 | memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8), |
000ae7b2 | 656 | min(ivlen, 8)); |
1da177e4 | 657 | |
38320c70 | 658 | ESP_SKB_CB(skb)->tmp = tmp; |
000ae7b2 HX |
659 | err = crypto_aead_encrypt(req); |
660 | ||
661 | switch (err) { | |
662 | case -EINPROGRESS: | |
38320c70 | 663 | goto error; |
1da177e4 | 664 | |
068c2e70 | 665 | case -ENOSPC: |
38320c70 | 666 | err = NET_XMIT_DROP; |
000ae7b2 HX |
667 | break; |
668 | ||
669 | case 0: | |
670 | if ((x->props.flags & XFRM_STATE_ESN)) | |
671 | esp_output_restore_header(skb); | |
0146dca7 | 672 | esp_output_encap_csum(skb); |
000ae7b2 | 673 | } |
38320c70 | 674 | |
03e2a30f SK |
675 | if (sg != dsg) |
676 | esp_ssg_unref(x, tmp); | |
38320c70 | 677 | |
26333c37 SD |
678 | if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) |
679 | err = esp_output_tail_tcp(x, skb); | |
680 | ||
e6194923 SK |
681 | error_free: |
682 | kfree(tmp); | |
38320c70 HX |
683 | error: |
684 | return err; | |
685 | } | |
383d0350 SK |
686 | EXPORT_SYMBOL_GPL(esp6_output_tail); |
687 | ||
688 | static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) | |
689 | { | |
690 | int alen; | |
691 | int blksize; | |
692 | struct ip_esp_hdr *esph; | |
693 | struct crypto_aead *aead; | |
694 | struct esp_info esp; | |
695 | ||
696 | esp.inplace = true; | |
697 | ||
698 | esp.proto = *skb_mac_header(skb); | |
699 | *skb_mac_header(skb) = IPPROTO_ESP; | |
700 | ||
701 | /* skb is pure payload to encrypt */ | |
702 | ||
703 | aead = x->data; | |
704 | alen = crypto_aead_authsize(aead); | |
705 | ||
706 | esp.tfclen = 0; | |
707 | if (x->tfcpad) { | |
708 | struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); | |
709 | u32 padto; | |
710 | ||
c7b37c76 | 711 | padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); |
383d0350 SK |
712 | if (skb->len < padto) |
713 | esp.tfclen = padto - skb->len; | |
714 | } | |
715 | blksize = ALIGN(crypto_aead_blocksize(aead), 4); | |
716 | esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize); | |
717 | esp.plen = esp.clen - skb->len - esp.tfclen; | |
718 | esp.tailen = esp.tfclen + esp.plen + alen; | |
719 | ||
0146dca7 SD |
720 | esp.esph = ip_esp_hdr(skb); |
721 | ||
383d0350 SK |
722 | esp.nfrags = esp6_output_head(x, skb, &esp); |
723 | if (esp.nfrags < 0) | |
724 | return esp.nfrags; | |
725 | ||
0146dca7 | 726 | esph = esp.esph; |
383d0350 SK |
727 | esph->spi = x->id.spi; |
728 | ||
729 | esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low); | |
730 | esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low + | |
731 | ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32)); | |
732 | ||
733 | skb_push(skb, -skb_network_offset(skb)); | |
734 | ||
735 | return esp6_output_tail(x, skb, &esp); | |
736 | } | |
38320c70 | 737 | |
47ebcc0b | 738 | static inline int esp_remove_trailer(struct sk_buff *skb) |
38320c70 HX |
739 | { |
740 | struct xfrm_state *x = xfrm_input_state(skb); | |
d77e38e6 | 741 | struct xfrm_offload *xo = xfrm_offload(skb); |
1c5ad13f | 742 | struct crypto_aead *aead = x->data; |
47ebcc0b | 743 | int alen, hlen, elen; |
e51a6472 IT |
744 | int padlen, trimlen; |
745 | __wsum csumdiff; | |
38320c70 | 746 | u8 nexthdr[2]; |
47ebcc0b | 747 | int ret; |
38320c70 | 748 | |
47ebcc0b YK |
749 | alen = crypto_aead_authsize(aead); |
750 | hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); | |
751 | elen = skb->len - hlen; | |
1da177e4 | 752 | |
47ebcc0b YK |
753 | if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) { |
754 | ret = xo->proto; | |
38320c70 | 755 | goto out; |
47ebcc0b | 756 | } |
6b7326c8 | 757 | |
eee12df5 GS |
758 | ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2); |
759 | BUG_ON(ret); | |
1da177e4 | 760 | |
47ebcc0b | 761 | ret = -EINVAL; |
38320c70 HX |
762 | padlen = nexthdr[0]; |
763 | if (padlen + 2 + alen >= elen) { | |
ba7a46f1 JP |
764 | net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n", |
765 | padlen + 2, elen - alen); | |
38320c70 | 766 | goto out; |
1da177e4 LT |
767 | } |
768 | ||
e51a6472 IT |
769 | trimlen = alen + padlen + 2; |
770 | if (skb->ip_summed == CHECKSUM_COMPLETE) { | |
771 | csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0); | |
772 | skb->csum = csum_block_sub(skb->csum, csumdiff, | |
773 | skb->len - trimlen); | |
774 | } | |
775 | pskb_trim(skb, skb->len - trimlen); | |
776 | ||
47ebcc0b YK |
777 | ret = nexthdr[1]; |
778 | ||
779 | out: | |
780 | return ret; | |
781 | } | |
782 | ||
783 | int esp6_input_done2(struct sk_buff *skb, int err) | |
784 | { | |
785 | struct xfrm_state *x = xfrm_input_state(skb); | |
786 | struct xfrm_offload *xo = xfrm_offload(skb); | |
787 | struct crypto_aead *aead = x->data; | |
788 | int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead); | |
789 | int hdr_len = skb_network_header_len(skb); | |
790 | ||
bf3da527 | 791 | if (!xo || !(xo->flags & CRYPTO_DONE)) |
47ebcc0b YK |
792 | kfree(ESP_SKB_CB(skb)->tmp); |
793 | ||
794 | if (unlikely(err)) | |
795 | goto out; | |
796 | ||
797 | err = esp_remove_trailer(skb); | |
798 | if (unlikely(err < 0)) | |
799 | goto out; | |
800 | ||
0146dca7 SD |
801 | if (x->encap) { |
802 | const struct ipv6hdr *ip6h = ipv6_hdr(skb); | |
17175d1a | 803 | int offset = skb_network_offset(skb) + sizeof(*ip6h); |
0146dca7 | 804 | struct xfrm_encap_tmpl *encap = x->encap; |
17175d1a SD |
805 | u8 nexthdr = ip6h->nexthdr; |
806 | __be16 frag_off, source; | |
807 | struct udphdr *uh; | |
808 | struct tcphdr *th; | |
809 | ||
810 | offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); | |
811 | uh = (void *)(skb->data + offset); | |
812 | th = (void *)(skb->data + offset); | |
d5dba137 | 813 | hdr_len += offset; |
0146dca7 SD |
814 | |
815 | switch (x->encap->encap_type) { | |
26333c37 SD |
816 | case TCP_ENCAP_ESPINTCP: |
817 | source = th->source; | |
818 | break; | |
0146dca7 SD |
819 | case UDP_ENCAP_ESPINUDP: |
820 | case UDP_ENCAP_ESPINUDP_NON_IKE: | |
821 | source = uh->source; | |
822 | break; | |
823 | default: | |
824 | WARN_ON_ONCE(1); | |
825 | err = -EINVAL; | |
826 | goto out; | |
827 | } | |
828 | ||
829 | /* | |
830 | * 1) if the NAT-T peer's IP or port changed then | |
831 | * advertize the change to the keying daemon. | |
832 | * This is an inbound SA, so just compare | |
833 | * SRC ports. | |
834 | */ | |
835 | if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) || | |
836 | source != encap->encap_sport) { | |
837 | xfrm_address_t ipaddr; | |
838 | ||
839 | memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6)); | |
840 | km_new_mapping(x, &ipaddr, source); | |
841 | ||
842 | /* XXX: perhaps add an extra | |
843 | * policy check here, to see | |
844 | * if we should allow or | |
845 | * reject a packet from a | |
846 | * different source | |
847 | * address/port. | |
848 | */ | |
849 | } | |
850 | ||
851 | /* | |
852 | * 2) ignore UDP/TCP checksums in case | |
853 | * of NAT-T in Transport Mode, or | |
854 | * perform other post-processing fixes | |
855 | * as per draft-ietf-ipsec-udp-encaps-06, | |
856 | * section 3.1.2 | |
857 | */ | |
858 | if (x->props.mode == XFRM_MODE_TRANSPORT) | |
859 | skb->ip_summed = CHECKSUM_UNNECESSARY; | |
860 | } | |
861 | ||
47ebcc0b YK |
862 | skb_postpull_rcsum(skb, skb_network_header(skb), |
863 | skb_network_header_len(skb)); | |
e51a6472 | 864 | skb_pull_rcsum(skb, hlen); |
a9403f8a LR |
865 | if (x->props.mode == XFRM_MODE_TUNNEL) |
866 | skb_reset_transport_header(skb); | |
867 | else | |
868 | skb_set_transport_header(skb, -hdr_len); | |
38320c70 | 869 | |
38320c70 HX |
870 | /* RFC4303: Drop dummy packets without any error */ |
871 | if (err == IPPROTO_NONE) | |
872 | err = -EINVAL; | |
873 | ||
874 | out: | |
1da177e4 LT |
875 | return err; |
876 | } | |
383d0350 | 877 | EXPORT_SYMBOL_GPL(esp6_input_done2); |
1da177e4 | 878 | |
38320c70 HX |
879 | static void esp_input_done(struct crypto_async_request *base, int err) |
880 | { | |
881 | struct sk_buff *skb = base->data; | |
882 | ||
f1fbed0e | 883 | xfrm_input_resume(skb, esp6_input_done2(skb, err)); |
38320c70 HX |
884 | } |
885 | ||
000ae7b2 HX |
886 | static void esp_input_restore_header(struct sk_buff *skb) |
887 | { | |
888 | esp_restore_header(skb, 0); | |
889 | __skb_pull(skb, 4); | |
890 | } | |
891 | ||
03e2a30f SK |
892 | static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi) |
893 | { | |
894 | struct xfrm_state *x = xfrm_input_state(skb); | |
03e2a30f SK |
895 | |
896 | /* For ESN we move the header forward by 4 bytes to | |
897 | * accomodate the high bits. We will move it back after | |
898 | * decryption. | |
899 | */ | |
900 | if ((x->props.flags & XFRM_STATE_ESN)) { | |
d3cc547d CIK |
901 | struct ip_esp_hdr *esph = skb_push(skb, 4); |
902 | ||
03e2a30f SK |
903 | *seqhi = esph->spi; |
904 | esph->spi = esph->seq_no; | |
905 | esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi; | |
906 | } | |
907 | } | |
908 | ||
000ae7b2 HX |
909 | static void esp_input_done_esn(struct crypto_async_request *base, int err) |
910 | { | |
911 | struct sk_buff *skb = base->data; | |
912 | ||
913 | esp_input_restore_header(skb); | |
914 | esp_input_done(base, err); | |
915 | } | |
916 | ||
e695633e | 917 | static int esp6_input(struct xfrm_state *x, struct sk_buff *skb) |
1da177e4 | 918 | { |
1c5ad13f | 919 | struct crypto_aead *aead = x->data; |
38320c70 | 920 | struct aead_request *req; |
1da177e4 | 921 | struct sk_buff *trailer; |
000ae7b2 | 922 | int ivlen = crypto_aead_ivsize(aead); |
0c05f983 | 923 | int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen; |
1da177e4 | 924 | int nfrags; |
d212a4c2 | 925 | int assoclen; |
d212a4c2 | 926 | int seqhilen; |
1da177e4 | 927 | int ret = 0; |
38320c70 | 928 | void *tmp; |
d212a4c2 | 929 | __be32 *seqhi; |
38320c70 HX |
930 | u8 *iv; |
931 | struct scatterlist *sg; | |
1da177e4 | 932 | |
0c05f983 | 933 | if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) { |
1da177e4 | 934 | ret = -EINVAL; |
31a4ab93 | 935 | goto out; |
1da177e4 LT |
936 | } |
937 | ||
38320c70 | 938 | if (elen <= 0) { |
1da177e4 | 939 | ret = -EINVAL; |
31a4ab93 | 940 | goto out; |
1da177e4 | 941 | } |
1da177e4 | 942 | |
0c05f983 | 943 | assoclen = sizeof(struct ip_esp_hdr); |
d212a4c2 SK |
944 | seqhilen = 0; |
945 | ||
946 | if (x->props.flags & XFRM_STATE_ESN) { | |
d212a4c2 SK |
947 | seqhilen += sizeof(__be32); |
948 | assoclen += seqhilen; | |
949 | } | |
950 | ||
03e2a30f SK |
951 | if (!skb_cloned(skb)) { |
952 | if (!skb_is_nonlinear(skb)) { | |
953 | nfrags = 1; | |
954 | ||
955 | goto skip_cow; | |
956 | } else if (!skb_has_frag_list(skb)) { | |
957 | nfrags = skb_shinfo(skb)->nr_frags; | |
958 | nfrags++; | |
959 | ||
960 | goto skip_cow; | |
961 | } | |
962 | } | |
963 | ||
964 | nfrags = skb_cow_data(skb, 0, &trailer); | |
965 | if (nfrags < 0) { | |
966 | ret = -EINVAL; | |
967 | goto out; | |
968 | } | |
969 | ||
970 | skip_cow: | |
971 | ret = -ENOMEM; | |
000ae7b2 | 972 | tmp = esp_alloc_tmp(aead, nfrags, seqhilen); |
38320c70 HX |
973 | if (!tmp) |
974 | goto out; | |
1da177e4 | 975 | |
38320c70 | 976 | ESP_SKB_CB(skb)->tmp = tmp; |
0146dca7 | 977 | seqhi = esp_tmp_extra(tmp); |
d212a4c2 | 978 | iv = esp_tmp_iv(aead, tmp, seqhilen); |
38320c70 | 979 | req = esp_tmp_req(aead, iv); |
000ae7b2 | 980 | sg = esp_req_sg(aead, req); |
1da177e4 | 981 | |
03e2a30f | 982 | esp_input_set_header(skb, seqhi); |
1da177e4 | 983 | |
03e2a30f | 984 | sg_init_table(sg, nfrags); |
3f297707 | 985 | ret = skb_to_sgvec(skb, sg, 0, skb->len); |
7284fdf3 ZL |
986 | if (unlikely(ret < 0)) { |
987 | kfree(tmp); | |
3f297707 | 988 | goto out; |
7284fdf3 | 989 | } |
1da177e4 | 990 | |
03e2a30f | 991 | skb->ip_summed = CHECKSUM_NONE; |
d212a4c2 | 992 | |
03e2a30f | 993 | if ((x->props.flags & XFRM_STATE_ESN)) |
000ae7b2 | 994 | aead_request_set_callback(req, 0, esp_input_done_esn, skb); |
03e2a30f SK |
995 | else |
996 | aead_request_set_callback(req, 0, esp_input_done, skb); | |
000ae7b2 HX |
997 | |
998 | aead_request_set_crypt(req, sg, sg, elen + ivlen, iv); | |
999 | aead_request_set_ad(req, assoclen); | |
1da177e4 | 1000 | |
38320c70 HX |
1001 | ret = crypto_aead_decrypt(req); |
1002 | if (ret == -EINPROGRESS) | |
1003 | goto out; | |
95a02cfd | 1004 | |
000ae7b2 HX |
1005 | if ((x->props.flags & XFRM_STATE_ESN)) |
1006 | esp_input_restore_header(skb); | |
1007 | ||
f1fbed0e | 1008 | ret = esp6_input_done2(skb, ret); |
1da177e4 LT |
1009 | |
1010 | out: | |
1da177e4 LT |
1011 | return ret; |
1012 | } | |
1013 | ||
d5860c5c SK |
1014 | static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
1015 | u8 type, u8 code, int offset, __be32 info) | |
1da177e4 | 1016 | { |
4fb236ba | 1017 | struct net *net = dev_net(skb->dev); |
b71d1d42 | 1018 | const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data; |
87bdc48d | 1019 | struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset); |
1da177e4 LT |
1020 | struct xfrm_state *x; |
1021 | ||
b3b2b9e1 | 1022 | if (type != ICMPV6_PKT_TOOBIG && |
ec18d9a2 | 1023 | type != NDISC_REDIRECT) |
d5860c5c | 1024 | return 0; |
1da177e4 | 1025 | |
b71d1d42 ED |
1026 | x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr, |
1027 | esph->spi, IPPROTO_ESP, AF_INET6); | |
1da177e4 | 1028 | if (!x) |
d5860c5c | 1029 | return 0; |
ec18d9a2 DM |
1030 | |
1031 | if (type == NDISC_REDIRECT) | |
e2d118a1 LC |
1032 | ip6_redirect(skb, net, skb->dev->ifindex, 0, |
1033 | sock_net_uid(net, NULL)); | |
ec18d9a2 | 1034 | else |
e2d118a1 | 1035 | ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL)); |
1da177e4 | 1036 | xfrm_state_put(x); |
d5860c5c SK |
1037 | |
1038 | return 0; | |
1da177e4 LT |
1039 | } |
1040 | ||
1041 | static void esp6_destroy(struct xfrm_state *x) | |
1042 | { | |
1c5ad13f | 1043 | struct crypto_aead *aead = x->data; |
1da177e4 | 1044 | |
1c5ad13f | 1045 | if (!aead) |
1da177e4 LT |
1046 | return; |
1047 | ||
1c5ad13f | 1048 | crypto_free_aead(aead); |
1da177e4 LT |
1049 | } |
1050 | ||
1a6509d9 HX |
1051 | static int esp_init_aead(struct xfrm_state *x) |
1052 | { | |
000ae7b2 | 1053 | char aead_name[CRYPTO_MAX_ALG_NAME]; |
1a6509d9 HX |
1054 | struct crypto_aead *aead; |
1055 | int err; | |
1056 | ||
000ae7b2 HX |
1057 | err = -ENAMETOOLONG; |
1058 | if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)", | |
1059 | x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME) | |
1060 | goto error; | |
1061 | ||
f58869c4 | 1062 | aead = crypto_alloc_aead(aead_name, 0, 0); |
1a6509d9 HX |
1063 | err = PTR_ERR(aead); |
1064 | if (IS_ERR(aead)) | |
1065 | goto error; | |
1066 | ||
1c5ad13f | 1067 | x->data = aead; |
1a6509d9 HX |
1068 | |
1069 | err = crypto_aead_setkey(aead, x->aead->alg_key, | |
1070 | (x->aead->alg_key_len + 7) / 8); | |
1071 | if (err) | |
1072 | goto error; | |
1073 | ||
1074 | err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8); | |
1075 | if (err) | |
1076 | goto error; | |
1077 | ||
1078 | error: | |
1079 | return err; | |
1080 | } | |
1081 | ||
1082 | static int esp_init_authenc(struct xfrm_state *x) | |
1da177e4 | 1083 | { |
38320c70 HX |
1084 | struct crypto_aead *aead; |
1085 | struct crypto_authenc_key_param *param; | |
1086 | struct rtattr *rta; | |
1087 | char *key; | |
1088 | char *p; | |
1089 | char authenc_name[CRYPTO_MAX_ALG_NAME]; | |
38320c70 HX |
1090 | unsigned int keylen; |
1091 | int err; | |
1da177e4 | 1092 | |
1a6509d9 | 1093 | err = -EINVAL; |
63159f29 | 1094 | if (!x->ealg) |
1a6509d9 | 1095 | goto error; |
38320c70 | 1096 | |
1a6509d9 | 1097 | err = -ENAMETOOLONG; |
d212a4c2 SK |
1098 | |
1099 | if ((x->props.flags & XFRM_STATE_ESN)) { | |
1100 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, | |
000ae7b2 HX |
1101 | "%s%sauthencesn(%s,%s)%s", |
1102 | x->geniv ?: "", x->geniv ? "(" : "", | |
d212a4c2 | 1103 | x->aalg ? x->aalg->alg_name : "digest_null", |
000ae7b2 HX |
1104 | x->ealg->alg_name, |
1105 | x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) | |
d212a4c2 SK |
1106 | goto error; |
1107 | } else { | |
1108 | if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME, | |
000ae7b2 HX |
1109 | "%s%sauthenc(%s,%s)%s", |
1110 | x->geniv ?: "", x->geniv ? "(" : "", | |
d212a4c2 | 1111 | x->aalg ? x->aalg->alg_name : "digest_null", |
000ae7b2 HX |
1112 | x->ealg->alg_name, |
1113 | x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME) | |
d212a4c2 SK |
1114 | goto error; |
1115 | } | |
38320c70 | 1116 | |
f58869c4 | 1117 | aead = crypto_alloc_aead(authenc_name, 0, 0); |
38320c70 HX |
1118 | err = PTR_ERR(aead); |
1119 | if (IS_ERR(aead)) | |
1120 | goto error; | |
1121 | ||
1c5ad13f | 1122 | x->data = aead; |
38320c70 HX |
1123 | |
1124 | keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) + | |
1125 | (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param)); | |
1126 | err = -ENOMEM; | |
1127 | key = kmalloc(keylen, GFP_KERNEL); | |
1128 | if (!key) | |
1129 | goto error; | |
1130 | ||
1131 | p = key; | |
1132 | rta = (void *)p; | |
1133 | rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM; | |
1134 | rta->rta_len = RTA_LENGTH(sizeof(*param)); | |
1135 | param = RTA_DATA(rta); | |
1136 | p += RTA_SPACE(sizeof(*param)); | |
1137 | ||
1da177e4 LT |
1138 | if (x->aalg) { |
1139 | struct xfrm_algo_desc *aalg_desc; | |
1140 | ||
38320c70 HX |
1141 | memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8); |
1142 | p += (x->aalg->alg_key_len + 7) / 8; | |
1ab1457c | 1143 | |
1da177e4 LT |
1144 | aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); |
1145 | BUG_ON(!aalg_desc); | |
1ab1457c | 1146 | |
38320c70 | 1147 | err = -EINVAL; |
45083497 | 1148 | if (aalg_desc->uinfo.auth.icv_fullbits / 8 != |
38320c70 | 1149 | crypto_aead_authsize(aead)) { |
45083497 JP |
1150 | pr_info("ESP: %s digestsize %u != %hu\n", |
1151 | x->aalg->alg_name, | |
1152 | crypto_aead_authsize(aead), | |
1153 | aalg_desc->uinfo.auth.icv_fullbits / 8); | |
38320c70 | 1154 | goto free_key; |
1da177e4 | 1155 | } |
1ab1457c | 1156 | |
38320c70 | 1157 | err = crypto_aead_setauthsize( |
8f8a088c | 1158 | aead, x->aalg->alg_trunc_len / 8); |
38320c70 HX |
1159 | if (err) |
1160 | goto free_key; | |
1da177e4 | 1161 | } |
38320c70 | 1162 | |
38320c70 HX |
1163 | param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8); |
1164 | memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8); | |
1165 | ||
1166 | err = crypto_aead_setkey(aead, key, keylen); | |
1167 | ||
1168 | free_key: | |
1169 | kfree(key); | |
1170 | ||
1a6509d9 HX |
1171 | error: |
1172 | return err; | |
1173 | } | |
1174 | ||
1175 | static int esp6_init_state(struct xfrm_state *x) | |
1176 | { | |
1a6509d9 HX |
1177 | struct crypto_aead *aead; |
1178 | u32 align; | |
1179 | int err; | |
1180 | ||
1c5ad13f | 1181 | x->data = NULL; |
1a6509d9 HX |
1182 | |
1183 | if (x->aead) | |
1184 | err = esp_init_aead(x); | |
1185 | else | |
1186 | err = esp_init_authenc(x); | |
1187 | ||
38320c70 | 1188 | if (err) |
1da177e4 | 1189 | goto error; |
38320c70 | 1190 | |
1c5ad13f | 1191 | aead = x->data; |
1a6509d9 | 1192 | |
38320c70 HX |
1193 | x->props.header_len = sizeof(struct ip_esp_hdr) + |
1194 | crypto_aead_ivsize(aead); | |
ca68145f HX |
1195 | switch (x->props.mode) { |
1196 | case XFRM_MODE_BEET: | |
abf5cdb8 JK |
1197 | if (x->sel.family != AF_INET6) |
1198 | x->props.header_len += IPV4_BEET_PHMAXLEN + | |
67ba4152 | 1199 | (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); |
abf5cdb8 | 1200 | break; |
bcfd09f7 | 1201 | default: |
ca68145f HX |
1202 | case XFRM_MODE_TRANSPORT: |
1203 | break; | |
1204 | case XFRM_MODE_TUNNEL: | |
1da177e4 | 1205 | x->props.header_len += sizeof(struct ipv6hdr); |
ea2c47b4 | 1206 | break; |
ca68145f | 1207 | } |
38320c70 | 1208 | |
0146dca7 SD |
1209 | if (x->encap) { |
1210 | struct xfrm_encap_tmpl *encap = x->encap; | |
1211 | ||
1212 | switch (encap->encap_type) { | |
1213 | default: | |
1214 | err = -EINVAL; | |
1215 | goto error; | |
1216 | case UDP_ENCAP_ESPINUDP: | |
1217 | x->props.header_len += sizeof(struct udphdr); | |
1218 | break; | |
1219 | case UDP_ENCAP_ESPINUDP_NON_IKE: | |
1220 | x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); | |
1221 | break; | |
26333c37 SD |
1222 | #ifdef CONFIG_INET6_ESPINTCP |
1223 | case TCP_ENCAP_ESPINTCP: | |
1224 | /* only the length field, TCP encap is done by | |
1225 | * the socket | |
1226 | */ | |
1227 | x->props.header_len += 2; | |
1228 | break; | |
1229 | #endif | |
0146dca7 SD |
1230 | } |
1231 | } | |
1232 | ||
38320c70 | 1233 | align = ALIGN(crypto_aead_blocksize(aead), 4); |
1c5ad13f | 1234 | x->props.trailer_len = align + 1 + crypto_aead_authsize(aead); |
1da177e4 LT |
1235 | |
1236 | error: | |
38320c70 | 1237 | return err; |
1da177e4 LT |
1238 | } |
1239 | ||
d5860c5c SK |
1240 | static int esp6_rcv_cb(struct sk_buff *skb, int err) |
1241 | { | |
1242 | return 0; | |
1243 | } | |
1244 | ||
cc24beca | 1245 | static const struct xfrm_type esp6_type = { |
1da177e4 | 1246 | .description = "ESP6", |
cc24beca IM |
1247 | .owner = THIS_MODULE, |
1248 | .proto = IPPROTO_ESP, | |
436a0a40 | 1249 | .flags = XFRM_TYPE_REPLAY_PROT, |
1da177e4 LT |
1250 | .init_state = esp6_init_state, |
1251 | .destructor = esp6_destroy, | |
1da177e4 | 1252 | .input = esp6_input, |
aee5adb4 MN |
1253 | .output = esp6_output, |
1254 | .hdr_offset = xfrm6_find_1stfragopt, | |
1da177e4 LT |
1255 | }; |
1256 | ||
d5860c5c SK |
1257 | static struct xfrm6_protocol esp6_protocol = { |
1258 | .handler = xfrm6_rcv, | |
0146dca7 | 1259 | .input_handler = xfrm_input, |
d5860c5c | 1260 | .cb_handler = esp6_rcv_cb, |
1da177e4 | 1261 | .err_handler = esp6_err, |
d5860c5c | 1262 | .priority = 0, |
1da177e4 LT |
1263 | }; |
1264 | ||
1265 | static int __init esp6_init(void) | |
1266 | { | |
1267 | if (xfrm_register_type(&esp6_type, AF_INET6) < 0) { | |
f3213831 | 1268 | pr_info("%s: can't add xfrm type\n", __func__); |
1da177e4 LT |
1269 | return -EAGAIN; |
1270 | } | |
d5860c5c | 1271 | if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) { |
f3213831 | 1272 | pr_info("%s: can't add protocol\n", __func__); |
1da177e4 LT |
1273 | xfrm_unregister_type(&esp6_type, AF_INET6); |
1274 | return -EAGAIN; | |
1275 | } | |
1276 | ||
1277 | return 0; | |
1278 | } | |
1279 | ||
1280 | static void __exit esp6_fini(void) | |
1281 | { | |
d5860c5c | 1282 | if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0) |
f3213831 | 1283 | pr_info("%s: can't remove protocol\n", __func__); |
4f518e80 | 1284 | xfrm_unregister_type(&esp6_type, AF_INET6); |
1da177e4 LT |
1285 | } |
1286 | ||
1287 | module_init(esp6_init); | |
1288 | module_exit(esp6_fini); | |
1289 | ||
1290 | MODULE_LICENSE("GPL"); | |
d3d6dd3a | 1291 | MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP); |