]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * echainiv: Encrypted Chain IV Generator | |
3 | * | |
4 | * This generator generates an IV based on a sequence number by xoring it | |
5 | * with a salt and then encrypting it with the same key as used to encrypt | |
6 | * the plain text. This algorithm requires that the block size be equal | |
7 | * to the IV size. It is mainly useful for CBC. | |
8 | * | |
9 | * This generator can only be used by algorithms where authentication | |
10 | * is performed after encryption (i.e., authenc). | |
11 | * | |
12 | * Copyright (c) 2015 Herbert Xu <[email protected]> | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify it | |
15 | * under the terms of the GNU General Public License as published by the Free | |
16 | * Software Foundation; either version 2 of the License, or (at your option) | |
17 | * any later version. | |
18 | * | |
19 | */ | |
20 | ||
21 | #include <crypto/internal/geniv.h> | |
22 | #include <crypto/scatterwalk.h> | |
23 | #include <linux/err.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/kernel.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/percpu.h> | |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/string.h> | |
31 | ||
32 | #define MAX_IV_SIZE 16 | |
33 | ||
34 | static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv); | |
35 | ||
36 | /* We don't care if we get preempted and read/write IVs from the next CPU. */ | |
37 | static void echainiv_read_iv(u8 *dst, unsigned size) | |
38 | { | |
39 | u32 *a = (u32 *)dst; | |
40 | u32 __percpu *b = echainiv_iv; | |
41 | ||
42 | for (; size >= 4; size -= 4) { | |
43 | *a++ = this_cpu_read(*b); | |
44 | b++; | |
45 | } | |
46 | } | |
47 | ||
48 | static void echainiv_write_iv(const u8 *src, unsigned size) | |
49 | { | |
50 | const u32 *a = (const u32 *)src; | |
51 | u32 __percpu *b = echainiv_iv; | |
52 | ||
53 | for (; size >= 4; size -= 4) { | |
54 | this_cpu_write(*b, *a); | |
55 | a++; | |
56 | b++; | |
57 | } | |
58 | } | |
59 | ||
60 | static void echainiv_encrypt_complete2(struct aead_request *req, int err) | |
61 | { | |
62 | struct aead_request *subreq = aead_request_ctx(req); | |
63 | struct crypto_aead *geniv; | |
64 | unsigned int ivsize; | |
65 | ||
66 | if (err == -EINPROGRESS) | |
67 | return; | |
68 | ||
69 | if (err) | |
70 | goto out; | |
71 | ||
72 | geniv = crypto_aead_reqtfm(req); | |
73 | ivsize = crypto_aead_ivsize(geniv); | |
74 | ||
75 | echainiv_write_iv(subreq->iv, ivsize); | |
76 | ||
77 | if (req->iv != subreq->iv) | |
78 | memcpy(req->iv, subreq->iv, ivsize); | |
79 | ||
80 | out: | |
81 | if (req->iv != subreq->iv) | |
82 | kzfree(subreq->iv); | |
83 | } | |
84 | ||
85 | static void echainiv_encrypt_complete(struct crypto_async_request *base, | |
86 | int err) | |
87 | { | |
88 | struct aead_request *req = base->data; | |
89 | ||
90 | echainiv_encrypt_complete2(req, err); | |
91 | aead_request_complete(req, err); | |
92 | } | |
93 | ||
94 | static int echainiv_encrypt(struct aead_request *req) | |
95 | { | |
96 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | |
97 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | |
98 | struct aead_request *subreq = aead_request_ctx(req); | |
99 | crypto_completion_t compl; | |
100 | void *data; | |
101 | u8 *info; | |
102 | unsigned int ivsize = crypto_aead_ivsize(geniv); | |
103 | int err; | |
104 | ||
105 | if (req->cryptlen < ivsize) | |
106 | return -EINVAL; | |
107 | ||
108 | aead_request_set_tfm(subreq, ctx->child); | |
109 | ||
110 | compl = echainiv_encrypt_complete; | |
111 | data = req; | |
112 | info = req->iv; | |
113 | ||
114 | if (req->src != req->dst) { | |
115 | struct blkcipher_desc desc = { | |
116 | .tfm = ctx->null, | |
117 | }; | |
118 | ||
119 | err = crypto_blkcipher_encrypt( | |
120 | &desc, req->dst, req->src, | |
121 | req->assoclen + req->cryptlen); | |
122 | if (err) | |
123 | return err; | |
124 | } | |
125 | ||
126 | if (unlikely(!IS_ALIGNED((unsigned long)info, | |
127 | crypto_aead_alignmask(geniv) + 1))) { | |
128 | info = kmalloc(ivsize, req->base.flags & | |
129 | CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL: | |
130 | GFP_ATOMIC); | |
131 | if (!info) | |
132 | return -ENOMEM; | |
133 | ||
134 | memcpy(info, req->iv, ivsize); | |
135 | } | |
136 | ||
137 | aead_request_set_callback(subreq, req->base.flags, compl, data); | |
138 | aead_request_set_crypt(subreq, req->dst, req->dst, | |
139 | req->cryptlen, info); | |
140 | aead_request_set_ad(subreq, req->assoclen); | |
141 | ||
142 | crypto_xor(info, ctx->salt, ivsize); | |
143 | scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1); | |
144 | echainiv_read_iv(info, ivsize); | |
145 | ||
146 | err = crypto_aead_encrypt(subreq); | |
147 | echainiv_encrypt_complete2(req, err); | |
148 | return err; | |
149 | } | |
150 | ||
151 | static int echainiv_decrypt(struct aead_request *req) | |
152 | { | |
153 | struct crypto_aead *geniv = crypto_aead_reqtfm(req); | |
154 | struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv); | |
155 | struct aead_request *subreq = aead_request_ctx(req); | |
156 | crypto_completion_t compl; | |
157 | void *data; | |
158 | unsigned int ivsize = crypto_aead_ivsize(geniv); | |
159 | ||
160 | if (req->cryptlen < ivsize) | |
161 | return -EINVAL; | |
162 | ||
163 | aead_request_set_tfm(subreq, ctx->child); | |
164 | ||
165 | compl = req->base.complete; | |
166 | data = req->base.data; | |
167 | ||
168 | aead_request_set_callback(subreq, req->base.flags, compl, data); | |
169 | aead_request_set_crypt(subreq, req->src, req->dst, | |
170 | req->cryptlen - ivsize, req->iv); | |
171 | aead_request_set_ad(subreq, req->assoclen + ivsize); | |
172 | ||
173 | scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0); | |
174 | ||
175 | return crypto_aead_decrypt(subreq); | |
176 | } | |
177 | ||
178 | static int echainiv_aead_create(struct crypto_template *tmpl, | |
179 | struct rtattr **tb) | |
180 | { | |
181 | struct aead_instance *inst; | |
182 | struct crypto_aead_spawn *spawn; | |
183 | struct aead_alg *alg; | |
184 | int err; | |
185 | ||
186 | inst = aead_geniv_alloc(tmpl, tb, 0, 0); | |
187 | ||
188 | if (IS_ERR(inst)) | |
189 | return PTR_ERR(inst); | |
190 | ||
191 | spawn = aead_instance_ctx(inst); | |
192 | alg = crypto_spawn_aead_alg(spawn); | |
193 | ||
194 | err = -EINVAL; | |
195 | if (inst->alg.ivsize & (sizeof(u32) - 1) || | |
196 | inst->alg.ivsize > MAX_IV_SIZE) | |
197 | goto free_inst; | |
198 | ||
199 | inst->alg.encrypt = echainiv_encrypt; | |
200 | inst->alg.decrypt = echainiv_decrypt; | |
201 | ||
202 | inst->alg.init = aead_init_geniv; | |
203 | inst->alg.exit = aead_exit_geniv; | |
204 | ||
205 | inst->alg.base.cra_alignmask |= __alignof__(u32) - 1; | |
206 | inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx); | |
207 | inst->alg.base.cra_ctxsize += inst->alg.ivsize; | |
208 | ||
209 | inst->free = aead_geniv_free; | |
210 | ||
211 | err = aead_register_instance(tmpl, inst); | |
212 | if (err) | |
213 | goto free_inst; | |
214 | ||
215 | out: | |
216 | return err; | |
217 | ||
218 | free_inst: | |
219 | aead_geniv_free(inst); | |
220 | goto out; | |
221 | } | |
222 | ||
223 | static void echainiv_free(struct crypto_instance *inst) | |
224 | { | |
225 | aead_geniv_free(aead_instance(inst)); | |
226 | } | |
227 | ||
228 | static struct crypto_template echainiv_tmpl = { | |
229 | .name = "echainiv", | |
230 | .create = echainiv_aead_create, | |
231 | .free = echainiv_free, | |
232 | .module = THIS_MODULE, | |
233 | }; | |
234 | ||
235 | static int __init echainiv_module_init(void) | |
236 | { | |
237 | return crypto_register_template(&echainiv_tmpl); | |
238 | } | |
239 | ||
240 | static void __exit echainiv_module_exit(void) | |
241 | { | |
242 | crypto_unregister_template(&echainiv_tmpl); | |
243 | } | |
244 | ||
245 | module_init(echainiv_module_init); | |
246 | module_exit(echainiv_module_exit); | |
247 | ||
248 | MODULE_LICENSE("GPL"); | |
249 | MODULE_DESCRIPTION("Encrypted Chain IV Generator"); | |
250 | MODULE_ALIAS_CRYPTO("echainiv"); |