]>
Commit | Line | Data |
---|---|---|
a7d85e06 JB |
1 | //SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * CFB: Cipher FeedBack mode | |
4 | * | |
5 | * Copyright (c) 2018 [email protected] | |
6 | * | |
7 | * CFB is a stream cipher mode which is layered on to a block | |
8 | * encryption scheme. It works very much like a one time pad where | |
9 | * the pad is generated initially from the encrypted IV and then | |
10 | * subsequently from the encrypted previous block of ciphertext. The | |
11 | * pad is XOR'd into the plain text to get the final ciphertext. | |
12 | * | |
13 | * The scheme of CFB is best described by wikipedia: | |
14 | * | |
15 | * https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#CFB | |
16 | * | |
17 | * Note that since the pad for both encryption and decryption is | |
18 | * generated by an encryption operation, CFB never uses the block | |
19 | * decryption function. | |
20 | */ | |
21 | ||
22 | #include <crypto/algapi.h> | |
23 | #include <crypto/internal/skcipher.h> | |
24 | #include <linux/err.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/kernel.h> | |
27 | #include <linux/module.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/string.h> | |
30 | #include <linux/types.h> | |
31 | ||
32 | struct crypto_cfb_ctx { | |
33 | struct crypto_cipher *child; | |
34 | }; | |
35 | ||
36 | static unsigned int crypto_cfb_bsize(struct crypto_skcipher *tfm) | |
37 | { | |
38 | struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); | |
39 | struct crypto_cipher *child = ctx->child; | |
40 | ||
41 | return crypto_cipher_blocksize(child); | |
42 | } | |
43 | ||
44 | static void crypto_cfb_encrypt_one(struct crypto_skcipher *tfm, | |
45 | const u8 *src, u8 *dst) | |
46 | { | |
47 | struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); | |
48 | ||
49 | crypto_cipher_encrypt_one(ctx->child, dst, src); | |
50 | } | |
51 | ||
52 | /* final encrypt and decrypt is the same */ | |
53 | static void crypto_cfb_final(struct skcipher_walk *walk, | |
54 | struct crypto_skcipher *tfm) | |
55 | { | |
a7d85e06 | 56 | const unsigned long alignmask = crypto_skcipher_alignmask(tfm); |
6650c4de | 57 | u8 tmp[MAX_CIPHER_BLOCKSIZE + MAX_CIPHER_ALIGNMASK]; |
a7d85e06 JB |
58 | u8 *stream = PTR_ALIGN(tmp + 0, alignmask + 1); |
59 | u8 *src = walk->src.virt.addr; | |
60 | u8 *dst = walk->dst.virt.addr; | |
61 | u8 *iv = walk->iv; | |
62 | unsigned int nbytes = walk->nbytes; | |
63 | ||
64 | crypto_cfb_encrypt_one(tfm, iv, stream); | |
65 | crypto_xor_cpy(dst, stream, src, nbytes); | |
66 | } | |
67 | ||
68 | static int crypto_cfb_encrypt_segment(struct skcipher_walk *walk, | |
69 | struct crypto_skcipher *tfm) | |
70 | { | |
71 | const unsigned int bsize = crypto_cfb_bsize(tfm); | |
72 | unsigned int nbytes = walk->nbytes; | |
73 | u8 *src = walk->src.virt.addr; | |
74 | u8 *dst = walk->dst.virt.addr; | |
75 | u8 *iv = walk->iv; | |
76 | ||
77 | do { | |
78 | crypto_cfb_encrypt_one(tfm, iv, dst); | |
79 | crypto_xor(dst, src, bsize); | |
80 | memcpy(iv, dst, bsize); | |
81 | ||
82 | src += bsize; | |
83 | dst += bsize; | |
84 | } while ((nbytes -= bsize) >= bsize); | |
85 | ||
86 | return nbytes; | |
87 | } | |
88 | ||
89 | static int crypto_cfb_encrypt_inplace(struct skcipher_walk *walk, | |
90 | struct crypto_skcipher *tfm) | |
91 | { | |
92 | const unsigned int bsize = crypto_cfb_bsize(tfm); | |
93 | unsigned int nbytes = walk->nbytes; | |
94 | u8 *src = walk->src.virt.addr; | |
95 | u8 *iv = walk->iv; | |
6650c4de | 96 | u8 tmp[MAX_CIPHER_BLOCKSIZE]; |
a7d85e06 JB |
97 | |
98 | do { | |
99 | crypto_cfb_encrypt_one(tfm, iv, tmp); | |
100 | crypto_xor(src, tmp, bsize); | |
101 | iv = src; | |
102 | ||
103 | src += bsize; | |
104 | } while ((nbytes -= bsize) >= bsize); | |
105 | ||
106 | memcpy(walk->iv, iv, bsize); | |
107 | ||
108 | return nbytes; | |
109 | } | |
110 | ||
111 | static int crypto_cfb_encrypt(struct skcipher_request *req) | |
112 | { | |
113 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
114 | struct skcipher_walk walk; | |
115 | unsigned int bsize = crypto_cfb_bsize(tfm); | |
116 | int err; | |
117 | ||
118 | err = skcipher_walk_virt(&walk, req, false); | |
119 | ||
120 | while (walk.nbytes >= bsize) { | |
121 | if (walk.src.virt.addr == walk.dst.virt.addr) | |
122 | err = crypto_cfb_encrypt_inplace(&walk, tfm); | |
123 | else | |
124 | err = crypto_cfb_encrypt_segment(&walk, tfm); | |
125 | err = skcipher_walk_done(&walk, err); | |
126 | } | |
127 | ||
128 | if (walk.nbytes) { | |
129 | crypto_cfb_final(&walk, tfm); | |
130 | err = skcipher_walk_done(&walk, 0); | |
131 | } | |
132 | ||
133 | return err; | |
134 | } | |
135 | ||
136 | static int crypto_cfb_decrypt_segment(struct skcipher_walk *walk, | |
137 | struct crypto_skcipher *tfm) | |
138 | { | |
139 | const unsigned int bsize = crypto_cfb_bsize(tfm); | |
140 | unsigned int nbytes = walk->nbytes; | |
141 | u8 *src = walk->src.virt.addr; | |
142 | u8 *dst = walk->dst.virt.addr; | |
143 | u8 *iv = walk->iv; | |
144 | ||
145 | do { | |
146 | crypto_cfb_encrypt_one(tfm, iv, dst); | |
fa460073 | 147 | crypto_xor(dst, src, bsize); |
a7d85e06 JB |
148 | iv = src; |
149 | ||
150 | src += bsize; | |
151 | dst += bsize; | |
152 | } while ((nbytes -= bsize) >= bsize); | |
153 | ||
154 | memcpy(walk->iv, iv, bsize); | |
155 | ||
156 | return nbytes; | |
157 | } | |
158 | ||
159 | static int crypto_cfb_decrypt_inplace(struct skcipher_walk *walk, | |
160 | struct crypto_skcipher *tfm) | |
161 | { | |
162 | const unsigned int bsize = crypto_cfb_bsize(tfm); | |
163 | unsigned int nbytes = walk->nbytes; | |
164 | u8 *src = walk->src.virt.addr; | |
165 | u8 *iv = walk->iv; | |
6650c4de | 166 | u8 tmp[MAX_CIPHER_BLOCKSIZE]; |
a7d85e06 JB |
167 | |
168 | do { | |
169 | crypto_cfb_encrypt_one(tfm, iv, tmp); | |
170 | memcpy(iv, src, bsize); | |
171 | crypto_xor(src, tmp, bsize); | |
172 | src += bsize; | |
173 | } while ((nbytes -= bsize) >= bsize); | |
174 | ||
175 | memcpy(walk->iv, iv, bsize); | |
176 | ||
177 | return nbytes; | |
178 | } | |
179 | ||
180 | static int crypto_cfb_decrypt_blocks(struct skcipher_walk *walk, | |
181 | struct crypto_skcipher *tfm) | |
182 | { | |
183 | if (walk->src.virt.addr == walk->dst.virt.addr) | |
184 | return crypto_cfb_decrypt_inplace(walk, tfm); | |
185 | else | |
186 | return crypto_cfb_decrypt_segment(walk, tfm); | |
187 | } | |
188 | ||
189 | static int crypto_cfb_setkey(struct crypto_skcipher *parent, const u8 *key, | |
190 | unsigned int keylen) | |
191 | { | |
192 | struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(parent); | |
193 | struct crypto_cipher *child = ctx->child; | |
194 | int err; | |
195 | ||
196 | crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); | |
197 | crypto_cipher_set_flags(child, crypto_skcipher_get_flags(parent) & | |
198 | CRYPTO_TFM_REQ_MASK); | |
199 | err = crypto_cipher_setkey(child, key, keylen); | |
200 | crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(child) & | |
201 | CRYPTO_TFM_RES_MASK); | |
202 | return err; | |
203 | } | |
204 | ||
205 | static int crypto_cfb_decrypt(struct skcipher_request *req) | |
206 | { | |
207 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | |
208 | struct skcipher_walk walk; | |
209 | const unsigned int bsize = crypto_cfb_bsize(tfm); | |
210 | int err; | |
211 | ||
212 | err = skcipher_walk_virt(&walk, req, false); | |
213 | ||
214 | while (walk.nbytes >= bsize) { | |
215 | err = crypto_cfb_decrypt_blocks(&walk, tfm); | |
216 | err = skcipher_walk_done(&walk, err); | |
217 | } | |
218 | ||
219 | if (walk.nbytes) { | |
220 | crypto_cfb_final(&walk, tfm); | |
221 | err = skcipher_walk_done(&walk, 0); | |
222 | } | |
223 | ||
224 | return err; | |
225 | } | |
226 | ||
227 | static int crypto_cfb_init_tfm(struct crypto_skcipher *tfm) | |
228 | { | |
229 | struct skcipher_instance *inst = skcipher_alg_instance(tfm); | |
230 | struct crypto_spawn *spawn = skcipher_instance_ctx(inst); | |
231 | struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); | |
232 | struct crypto_cipher *cipher; | |
233 | ||
234 | cipher = crypto_spawn_cipher(spawn); | |
235 | if (IS_ERR(cipher)) | |
236 | return PTR_ERR(cipher); | |
237 | ||
238 | ctx->child = cipher; | |
239 | return 0; | |
240 | } | |
241 | ||
242 | static void crypto_cfb_exit_tfm(struct crypto_skcipher *tfm) | |
243 | { | |
244 | struct crypto_cfb_ctx *ctx = crypto_skcipher_ctx(tfm); | |
245 | ||
246 | crypto_free_cipher(ctx->child); | |
247 | } | |
248 | ||
249 | static void crypto_cfb_free(struct skcipher_instance *inst) | |
250 | { | |
251 | crypto_drop_skcipher(skcipher_instance_ctx(inst)); | |
252 | kfree(inst); | |
253 | } | |
254 | ||
255 | static int crypto_cfb_create(struct crypto_template *tmpl, struct rtattr **tb) | |
256 | { | |
257 | struct skcipher_instance *inst; | |
258 | struct crypto_attr_type *algt; | |
259 | struct crypto_spawn *spawn; | |
260 | struct crypto_alg *alg; | |
261 | u32 mask; | |
262 | int err; | |
263 | ||
264 | err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SKCIPHER); | |
265 | if (err) | |
266 | return err; | |
267 | ||
268 | inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL); | |
269 | if (!inst) | |
270 | return -ENOMEM; | |
271 | ||
272 | algt = crypto_get_attr_type(tb); | |
273 | err = PTR_ERR(algt); | |
274 | if (IS_ERR(algt)) | |
275 | goto err_free_inst; | |
276 | ||
277 | mask = CRYPTO_ALG_TYPE_MASK | | |
278 | crypto_requires_off(algt->type, algt->mask, | |
279 | CRYPTO_ALG_NEED_FALLBACK); | |
280 | ||
281 | alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, mask); | |
282 | err = PTR_ERR(alg); | |
283 | if (IS_ERR(alg)) | |
284 | goto err_free_inst; | |
285 | ||
286 | spawn = skcipher_instance_ctx(inst); | |
287 | err = crypto_init_spawn(spawn, alg, skcipher_crypto_instance(inst), | |
288 | CRYPTO_ALG_TYPE_MASK); | |
a7d85e06 | 289 | if (err) |
e5bde04c | 290 | goto err_put_alg; |
a7d85e06 JB |
291 | |
292 | err = crypto_inst_setname(skcipher_crypto_instance(inst), "cfb", alg); | |
293 | if (err) | |
294 | goto err_drop_spawn; | |
295 | ||
296 | inst->alg.base.cra_priority = alg->cra_priority; | |
297 | /* we're a stream cipher independend of the crypto cra_blocksize */ | |
298 | inst->alg.base.cra_blocksize = 1; | |
299 | inst->alg.base.cra_alignmask = alg->cra_alignmask; | |
300 | ||
301 | inst->alg.ivsize = alg->cra_blocksize; | |
302 | inst->alg.min_keysize = alg->cra_cipher.cia_min_keysize; | |
303 | inst->alg.max_keysize = alg->cra_cipher.cia_max_keysize; | |
304 | ||
305 | inst->alg.base.cra_ctxsize = sizeof(struct crypto_cfb_ctx); | |
306 | ||
307 | inst->alg.init = crypto_cfb_init_tfm; | |
308 | inst->alg.exit = crypto_cfb_exit_tfm; | |
309 | ||
310 | inst->alg.setkey = crypto_cfb_setkey; | |
311 | inst->alg.encrypt = crypto_cfb_encrypt; | |
312 | inst->alg.decrypt = crypto_cfb_decrypt; | |
313 | ||
314 | inst->free = crypto_cfb_free; | |
315 | ||
316 | err = skcipher_register_instance(tmpl, inst); | |
317 | if (err) | |
318 | goto err_drop_spawn; | |
e5bde04c | 319 | crypto_mod_put(alg); |
a7d85e06 JB |
320 | |
321 | out: | |
322 | return err; | |
323 | ||
324 | err_drop_spawn: | |
325 | crypto_drop_spawn(spawn); | |
e5bde04c PB |
326 | err_put_alg: |
327 | crypto_mod_put(alg); | |
a7d85e06 JB |
328 | err_free_inst: |
329 | kfree(inst); | |
330 | goto out; | |
331 | } | |
332 | ||
333 | static struct crypto_template crypto_cfb_tmpl = { | |
334 | .name = "cfb", | |
335 | .create = crypto_cfb_create, | |
336 | .module = THIS_MODULE, | |
337 | }; | |
338 | ||
339 | static int __init crypto_cfb_module_init(void) | |
340 | { | |
341 | return crypto_register_template(&crypto_cfb_tmpl); | |
342 | } | |
343 | ||
344 | static void __exit crypto_cfb_module_exit(void) | |
345 | { | |
346 | crypto_unregister_template(&crypto_cfb_tmpl); | |
347 | } | |
348 | ||
349 | module_init(crypto_cfb_module_init); | |
350 | module_exit(crypto_cfb_module_exit); | |
351 | ||
352 | MODULE_LICENSE("GPL"); | |
353 | MODULE_DESCRIPTION("CFB block cipher algorithm"); | |
354 | MODULE_ALIAS_CRYPTO("cfb"); |