2 * Copyright (C)2002 USAGI/WIDE Project
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 * Mitsuru KANDA @USAGI : IPv6 Support
21 * Kazunori MIYAZAWA @USAGI :
24 * This file is derived from net/ipv4/esp.c
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <crypto/aead.h>
30 #include <crypto/authenc.h>
31 #include <linux/err.h>
32 #include <linux/module.h>
36 #include <linux/scatterlist.h>
37 #include <linux/kernel.h>
38 #include <linux/pfkeyv2.h>
39 #include <linux/random.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <net/ip6_route.h>
45 #include <net/protocol.h>
46 #include <linux/icmpv6.h>
49 struct xfrm_skb_cb xfrm;
53 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
55 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
58 * Allocate an AEAD request structure with extra space for SG and IV.
60 * For alignment considerations the upper 32 bits of the sequence number are
61 * placed at the front, if present. Followed by the IV, the request and finally
64 * TODO: Use spare space in skb for this where possible.
66 static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
72 len += crypto_aead_ivsize(aead);
75 len += crypto_aead_alignmask(aead) &
76 ~(crypto_tfm_ctx_alignment() - 1);
77 len = ALIGN(len, crypto_tfm_ctx_alignment());
80 len += sizeof(struct aead_givcrypt_request) + crypto_aead_reqsize(aead);
81 len = ALIGN(len, __alignof__(struct scatterlist));
83 len += sizeof(struct scatterlist) * nfrags;
85 return kmalloc(len, GFP_ATOMIC);
88 static inline __be32 *esp_tmp_seqhi(void *tmp)
90 return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
93 static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
95 return crypto_aead_ivsize(aead) ?
96 PTR_ALIGN((u8 *)tmp + seqhilen,
97 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
100 static inline struct aead_givcrypt_request *esp_tmp_givreq(
101 struct crypto_aead *aead, u8 *iv)
103 struct aead_givcrypt_request *req;
105 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
106 crypto_tfm_ctx_alignment());
107 aead_givcrypt_set_tfm(req, aead);
111 static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
113 struct aead_request *req;
115 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
116 crypto_tfm_ctx_alignment());
117 aead_request_set_tfm(req, aead);
121 static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
122 struct aead_request *req)
124 return (void *)ALIGN((unsigned long)(req + 1) +
125 crypto_aead_reqsize(aead),
126 __alignof__(struct scatterlist));
129 static inline struct scatterlist *esp_givreq_sg(
130 struct crypto_aead *aead, struct aead_givcrypt_request *req)
132 return (void *)ALIGN((unsigned long)(req + 1) +
133 crypto_aead_reqsize(aead),
134 __alignof__(struct scatterlist));
137 static void esp_output_done(struct crypto_async_request *base, int err)
139 struct sk_buff *skb = base->data;
141 kfree(ESP_SKB_CB(skb)->tmp);
142 xfrm_output_resume(skb, err);
145 static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
148 struct ip_esp_hdr *esph;
149 struct crypto_aead *aead;
150 struct aead_givcrypt_request *req;
151 struct scatterlist *sg;
152 struct scatterlist *asg;
153 struct sk_buff *trailer;
167 struct esp_data *esp = x->data;
169 /* skb is pure payload to encrypt */
171 alen = crypto_aead_authsize(aead);
175 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
178 padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
179 if (skb->len < padto)
180 tfclen = padto - skb->len;
182 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
183 clen = ALIGN(skb->len + 2 + tfclen, blksize);
185 clen = ALIGN(clen, esp->padlen);
186 plen = clen - skb->len - tfclen;
188 err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
193 assoclen = sizeof(*esph);
197 if (x->props.flags & XFRM_STATE_ESN) {
199 seqhilen += sizeof(__be32);
200 assoclen += seqhilen;
203 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
209 seqhi = esp_tmp_seqhi(tmp);
210 iv = esp_tmp_iv(aead, tmp, seqhilen);
211 req = esp_tmp_givreq(aead, iv);
212 asg = esp_givreq_sg(aead, req);
215 /* Fill padding... */
216 tail = skb_tail_pointer(trailer);
218 memset(tail, 0, tfclen);
223 for (i = 0; i < plen - 2; i++)
226 tail[plen - 2] = plen - 2;
227 tail[plen - 1] = *skb_mac_header(skb);
228 pskb_put(skb, trailer, clen - skb->len + alen);
230 skb_push(skb, -skb_network_offset(skb));
231 esph = ip_esp_hdr(skb);
232 *skb_mac_header(skb) = IPPROTO_ESP;
234 esph->spi = x->id.spi;
235 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
237 sg_init_table(sg, nfrags);
238 skb_to_sgvec(skb, sg,
239 esph->enc_data + crypto_aead_ivsize(aead) - skb->data,
242 if ((x->props.flags & XFRM_STATE_ESN)) {
243 sg_init_table(asg, 3);
244 sg_set_buf(asg, &esph->spi, sizeof(__be32));
245 *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
246 sg_set_buf(asg + 1, seqhi, seqhilen);
247 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
249 sg_init_one(asg, esph, sizeof(*esph));
251 aead_givcrypt_set_callback(req, 0, esp_output_done, skb);
252 aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
253 aead_givcrypt_set_assoc(req, asg, assoclen);
254 aead_givcrypt_set_giv(req, esph->enc_data,
255 XFRM_SKB_CB(skb)->seq.output.low);
257 ESP_SKB_CB(skb)->tmp = tmp;
258 err = crypto_aead_givencrypt(req);
259 if (err == -EINPROGRESS)
271 static int esp_input_done2(struct sk_buff *skb, int err)
273 struct xfrm_state *x = xfrm_input_state(skb);
274 struct esp_data *esp = x->data;
275 struct crypto_aead *aead = esp->aead;
276 int alen = crypto_aead_authsize(aead);
277 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
278 int elen = skb->len - hlen;
279 int hdr_len = skb_network_header_len(skb);
283 kfree(ESP_SKB_CB(skb)->tmp);
288 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
293 if (padlen + 2 + alen >= elen) {
294 LIMIT_NETDEBUG(KERN_WARNING "ipsec esp packet is garbage "
295 "padlen=%d, elen=%d\n", padlen + 2, elen - alen);
299 /* ... check padding bits here. Silly. :-) */
301 pskb_trim(skb, skb->len - alen - padlen - 2);
302 __skb_pull(skb, hlen);
303 if (x->props.mode == XFRM_MODE_TUNNEL)
304 skb_reset_transport_header(skb);
306 skb_set_transport_header(skb, -hdr_len);
310 /* RFC4303: Drop dummy packets without any error */
311 if (err == IPPROTO_NONE)
318 static void esp_input_done(struct crypto_async_request *base, int err)
320 struct sk_buff *skb = base->data;
322 xfrm_input_resume(skb, esp_input_done2(skb, err));
325 static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
327 struct ip_esp_hdr *esph;
328 struct esp_data *esp = x->data;
329 struct crypto_aead *aead = esp->aead;
330 struct aead_request *req;
331 struct sk_buff *trailer;
332 int elen = skb->len - sizeof(*esph) - crypto_aead_ivsize(aead);
341 struct scatterlist *sg;
342 struct scatterlist *asg;
344 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead))) {
354 if ((nfrags = skb_cow_data(skb, 0, &trailer)) < 0) {
361 assoclen = sizeof(*esph);
365 if (x->props.flags & XFRM_STATE_ESN) {
367 seqhilen += sizeof(__be32);
368 assoclen += seqhilen;
371 tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen);
375 ESP_SKB_CB(skb)->tmp = tmp;
376 seqhi = esp_tmp_seqhi(tmp);
377 iv = esp_tmp_iv(aead, tmp, seqhilen);
378 req = esp_tmp_req(aead, iv);
379 asg = esp_req_sg(aead, req);
382 skb->ip_summed = CHECKSUM_NONE;
384 esph = (struct ip_esp_hdr *)skb->data;
386 /* Get ivec. This can be wrong, check against another impls. */
389 sg_init_table(sg, nfrags);
390 skb_to_sgvec(skb, sg, sizeof(*esph) + crypto_aead_ivsize(aead), elen);
392 if ((x->props.flags & XFRM_STATE_ESN)) {
393 sg_init_table(asg, 3);
394 sg_set_buf(asg, &esph->spi, sizeof(__be32));
395 *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
396 sg_set_buf(asg + 1, seqhi, seqhilen);
397 sg_set_buf(asg + 2, &esph->seq_no, sizeof(__be32));
399 sg_init_one(asg, esph, sizeof(*esph));
401 aead_request_set_callback(req, 0, esp_input_done, skb);
402 aead_request_set_crypt(req, sg, sg, elen, iv);
403 aead_request_set_assoc(req, asg, assoclen);
405 ret = crypto_aead_decrypt(req);
406 if (ret == -EINPROGRESS)
409 ret = esp_input_done2(skb, ret);
415 static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
417 struct esp_data *esp = x->data;
418 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
419 u32 align = max_t(u32, blksize, esp->padlen);
420 unsigned int net_adj;
422 if (x->props.mode != XFRM_MODE_TUNNEL)
423 net_adj = sizeof(struct ipv6hdr);
427 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
428 net_adj) & ~(align - 1)) + (net_adj - 2);
431 static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
432 u8 type, u8 code, int offset, __be32 info)
434 struct net *net = dev_net(skb->dev);
435 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
436 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
437 struct xfrm_state *x;
439 if (type != ICMPV6_DEST_UNREACH &&
440 type != ICMPV6_PKT_TOOBIG &&
441 type != NDISC_REDIRECT)
444 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
445 esph->spi, IPPROTO_ESP, AF_INET6);
449 if (type == NDISC_REDIRECT)
450 ip6_redirect(skb, net, 0, 0);
452 ip6_update_pmtu(skb, net, info, 0, 0);
456 static void esp6_destroy(struct xfrm_state *x)
458 struct esp_data *esp = x->data;
463 crypto_free_aead(esp->aead);
467 static int esp_init_aead(struct xfrm_state *x)
469 struct esp_data *esp = x->data;
470 struct crypto_aead *aead;
473 aead = crypto_alloc_aead(x->aead->alg_name, 0, 0);
480 err = crypto_aead_setkey(aead, x->aead->alg_key,
481 (x->aead->alg_key_len + 7) / 8);
485 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
493 static int esp_init_authenc(struct xfrm_state *x)
495 struct esp_data *esp = x->data;
496 struct crypto_aead *aead;
497 struct crypto_authenc_key_param *param;
501 char authenc_name[CRYPTO_MAX_ALG_NAME];
511 if ((x->props.flags & XFRM_STATE_ESN)) {
512 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
514 x->aalg ? x->aalg->alg_name : "digest_null",
515 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
518 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
520 x->aalg ? x->aalg->alg_name : "digest_null",
521 x->ealg->alg_name) >= CRYPTO_MAX_ALG_NAME)
525 aead = crypto_alloc_aead(authenc_name, 0, 0);
532 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
533 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
535 key = kmalloc(keylen, GFP_KERNEL);
541 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
542 rta->rta_len = RTA_LENGTH(sizeof(*param));
543 param = RTA_DATA(rta);
544 p += RTA_SPACE(sizeof(*param));
547 struct xfrm_algo_desc *aalg_desc;
549 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
550 p += (x->aalg->alg_key_len + 7) / 8;
552 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
556 if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
557 crypto_aead_authsize(aead)) {
558 NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
560 crypto_aead_authsize(aead),
561 aalg_desc->uinfo.auth.icv_fullbits/8);
565 err = crypto_aead_setauthsize(
566 aead, x->aalg->alg_trunc_len / 8);
571 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
572 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
574 err = crypto_aead_setkey(aead, key, keylen);
583 static int esp6_init_state(struct xfrm_state *x)
585 struct esp_data *esp;
586 struct crypto_aead *aead;
593 esp = kzalloc(sizeof(*esp), GFP_KERNEL);
600 err = esp_init_aead(x);
602 err = esp_init_authenc(x);
611 x->props.header_len = sizeof(struct ip_esp_hdr) +
612 crypto_aead_ivsize(aead);
613 switch (x->props.mode) {
615 if (x->sel.family != AF_INET6)
616 x->props.header_len += IPV4_BEET_PHMAXLEN +
617 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
619 case XFRM_MODE_TRANSPORT:
621 case XFRM_MODE_TUNNEL:
622 x->props.header_len += sizeof(struct ipv6hdr);
628 align = ALIGN(crypto_aead_blocksize(aead), 4);
630 align = max_t(u32, align, esp->padlen);
631 x->props.trailer_len = align + 1 + crypto_aead_authsize(esp->aead);
637 static const struct xfrm_type esp6_type =
639 .description = "ESP6",
640 .owner = THIS_MODULE,
641 .proto = IPPROTO_ESP,
642 .flags = XFRM_TYPE_REPLAY_PROT,
643 .init_state = esp6_init_state,
644 .destructor = esp6_destroy,
645 .get_mtu = esp6_get_mtu,
647 .output = esp6_output,
648 .hdr_offset = xfrm6_find_1stfragopt,
651 static const struct inet6_protocol esp6_protocol = {
652 .handler = xfrm6_rcv,
653 .err_handler = esp6_err,
654 .flags = INET6_PROTO_NOPOLICY,
657 static int __init esp6_init(void)
659 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
660 pr_info("%s: can't add xfrm type\n", __func__);
663 if (inet6_add_protocol(&esp6_protocol, IPPROTO_ESP) < 0) {
664 pr_info("%s: can't add protocol\n", __func__);
665 xfrm_unregister_type(&esp6_type, AF_INET6);
672 static void __exit esp6_fini(void)
674 if (inet6_del_protocol(&esp6_protocol, IPPROTO_ESP) < 0)
675 pr_info("%s: can't remove protocol\n", __func__);
676 if (xfrm_unregister_type(&esp6_type, AF_INET6) < 0)
677 pr_info("%s: can't remove xfrm type\n", __func__);
680 module_init(esp6_init);
681 module_exit(esp6_fini);
683 MODULE_LICENSE("GPL");
684 MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);