2 * Shared glue code for 128bit block ciphers
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
8 * CTR part based on code (crypto/ctr.c) by:
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 #include <linux/module.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/lrw.h>
31 #include <crypto/xts.h>
32 #include <asm/crypto/glue_helper.h>
33 #include <crypto/scatterwalk.h>
35 static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
36 struct blkcipher_desc *desc,
37 struct blkcipher_walk *walk)
39 void *ctx = crypto_blkcipher_ctx(desc->tfm);
40 const unsigned int bsize = 128 / 8;
41 unsigned int nbytes, i, func_bytes;
42 bool fpu_enabled = false;
45 err = blkcipher_walk_virt(desc, walk);
47 while ((nbytes = walk->nbytes)) {
48 u8 *wsrc = walk->src.virt.addr;
49 u8 *wdst = walk->dst.virt.addr;
51 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
52 desc, fpu_enabled, nbytes);
54 for (i = 0; i < gctx->num_funcs; i++) {
55 func_bytes = bsize * gctx->funcs[i].num_blocks;
57 /* Process multi-block batch */
58 if (nbytes >= func_bytes) {
60 gctx->funcs[i].fn_u.ecb(ctx, wdst,
66 } while (nbytes >= func_bytes);
74 err = blkcipher_walk_done(desc, walk, nbytes);
77 glue_fpu_end(fpu_enabled);
81 int glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx,
82 struct blkcipher_desc *desc, struct scatterlist *dst,
83 struct scatterlist *src, unsigned int nbytes)
85 struct blkcipher_walk walk;
87 blkcipher_walk_init(&walk, dst, src, nbytes);
88 return __glue_ecb_crypt_128bit(gctx, desc, &walk);
90 EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit);
92 static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn,
93 struct blkcipher_desc *desc,
94 struct blkcipher_walk *walk)
96 void *ctx = crypto_blkcipher_ctx(desc->tfm);
97 const unsigned int bsize = 128 / 8;
98 unsigned int nbytes = walk->nbytes;
99 u128 *src = (u128 *)walk->src.virt.addr;
100 u128 *dst = (u128 *)walk->dst.virt.addr;
101 u128 *iv = (u128 *)walk->iv;
104 u128_xor(dst, src, iv);
105 fn(ctx, (u8 *)dst, (u8 *)dst);
111 } while (nbytes >= bsize);
113 *(u128 *)walk->iv = *iv;
117 int glue_cbc_encrypt_128bit(const common_glue_func_t fn,
118 struct blkcipher_desc *desc,
119 struct scatterlist *dst,
120 struct scatterlist *src, unsigned int nbytes)
122 struct blkcipher_walk walk;
125 blkcipher_walk_init(&walk, dst, src, nbytes);
126 err = blkcipher_walk_virt(desc, &walk);
128 while ((nbytes = walk.nbytes)) {
129 nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk);
130 err = blkcipher_walk_done(desc, &walk, nbytes);
135 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit);
138 __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
139 struct blkcipher_desc *desc,
140 struct blkcipher_walk *walk)
142 void *ctx = crypto_blkcipher_ctx(desc->tfm);
143 const unsigned int bsize = 128 / 8;
144 unsigned int nbytes = walk->nbytes;
145 u128 *src = (u128 *)walk->src.virt.addr;
146 u128 *dst = (u128 *)walk->dst.virt.addr;
148 unsigned int num_blocks, func_bytes;
151 /* Start of the last block. */
152 src += nbytes / bsize - 1;
153 dst += nbytes / bsize - 1;
157 for (i = 0; i < gctx->num_funcs; i++) {
158 num_blocks = gctx->funcs[i].num_blocks;
159 func_bytes = bsize * num_blocks;
161 /* Process multi-block batch */
162 if (nbytes >= func_bytes) {
164 nbytes -= func_bytes - bsize;
165 src -= num_blocks - 1;
166 dst -= num_blocks - 1;
168 gctx->funcs[i].fn_u.cbc(ctx, dst, src);
174 u128_xor(dst, dst, src - 1);
177 } while (nbytes >= func_bytes);
185 u128_xor(dst, dst, (u128 *)walk->iv);
186 *(u128 *)walk->iv = last_iv;
191 int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx,
192 struct blkcipher_desc *desc,
193 struct scatterlist *dst,
194 struct scatterlist *src, unsigned int nbytes)
196 const unsigned int bsize = 128 / 8;
197 bool fpu_enabled = false;
198 struct blkcipher_walk walk;
201 blkcipher_walk_init(&walk, dst, src, nbytes);
202 err = blkcipher_walk_virt(desc, &walk);
204 while ((nbytes = walk.nbytes)) {
205 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
206 desc, fpu_enabled, nbytes);
207 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
208 err = blkcipher_walk_done(desc, &walk, nbytes);
211 glue_fpu_end(fpu_enabled);
214 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
216 static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr,
217 struct blkcipher_desc *desc,
218 struct blkcipher_walk *walk)
220 void *ctx = crypto_blkcipher_ctx(desc->tfm);
221 u8 *src = (u8 *)walk->src.virt.addr;
222 u8 *dst = (u8 *)walk->dst.virt.addr;
223 unsigned int nbytes = walk->nbytes;
227 be128_to_le128(&ctrblk, (be128 *)walk->iv);
229 memcpy(&tmp, src, nbytes);
230 fn_ctr(ctx, &tmp, &tmp, &ctrblk);
231 memcpy(dst, &tmp, nbytes);
233 le128_to_be128((be128 *)walk->iv, &ctrblk);
235 EXPORT_SYMBOL_GPL(glue_ctr_crypt_final_128bit);
237 static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
238 struct blkcipher_desc *desc,
239 struct blkcipher_walk *walk)
241 const unsigned int bsize = 128 / 8;
242 void *ctx = crypto_blkcipher_ctx(desc->tfm);
243 unsigned int nbytes = walk->nbytes;
244 u128 *src = (u128 *)walk->src.virt.addr;
245 u128 *dst = (u128 *)walk->dst.virt.addr;
247 unsigned int num_blocks, func_bytes;
250 be128_to_le128(&ctrblk, (be128 *)walk->iv);
252 /* Process multi-block batch */
253 for (i = 0; i < gctx->num_funcs; i++) {
254 num_blocks = gctx->funcs[i].num_blocks;
255 func_bytes = bsize * num_blocks;
257 if (nbytes >= func_bytes) {
259 gctx->funcs[i].fn_u.ctr(ctx, dst, src, &ctrblk);
263 nbytes -= func_bytes;
264 } while (nbytes >= func_bytes);
272 le128_to_be128((be128 *)walk->iv, &ctrblk);
276 int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx,
277 struct blkcipher_desc *desc, struct scatterlist *dst,
278 struct scatterlist *src, unsigned int nbytes)
280 const unsigned int bsize = 128 / 8;
281 bool fpu_enabled = false;
282 struct blkcipher_walk walk;
285 blkcipher_walk_init(&walk, dst, src, nbytes);
286 err = blkcipher_walk_virt_block(desc, &walk, bsize);
288 while ((nbytes = walk.nbytes) >= bsize) {
289 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
290 desc, fpu_enabled, nbytes);
291 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
292 err = blkcipher_walk_done(desc, &walk, nbytes);
295 glue_fpu_end(fpu_enabled);
298 glue_ctr_crypt_final_128bit(
299 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
300 err = blkcipher_walk_done(desc, &walk, 0);
305 EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit);
307 static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
309 struct blkcipher_desc *desc,
310 struct blkcipher_walk *walk)
312 const unsigned int bsize = 128 / 8;
313 unsigned int nbytes = walk->nbytes;
314 u128 *src = (u128 *)walk->src.virt.addr;
315 u128 *dst = (u128 *)walk->dst.virt.addr;
316 unsigned int num_blocks, func_bytes;
319 /* Process multi-block batch */
320 for (i = 0; i < gctx->num_funcs; i++) {
321 num_blocks = gctx->funcs[i].num_blocks;
322 func_bytes = bsize * num_blocks;
324 if (nbytes >= func_bytes) {
326 gctx->funcs[i].fn_u.xts(ctx, dst, src,
331 nbytes -= func_bytes;
332 } while (nbytes >= func_bytes);
343 /* for implementations implementing faster XTS IV generator */
344 int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx,
345 struct blkcipher_desc *desc, struct scatterlist *dst,
346 struct scatterlist *src, unsigned int nbytes,
347 void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src),
348 void *tweak_ctx, void *crypt_ctx)
350 const unsigned int bsize = 128 / 8;
351 bool fpu_enabled = false;
352 struct blkcipher_walk walk;
355 blkcipher_walk_init(&walk, dst, src, nbytes);
357 err = blkcipher_walk_virt(desc, &walk);
358 nbytes = walk.nbytes;
362 /* set minimum length to bsize, for tweak_fn */
363 fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
365 nbytes < bsize ? bsize : nbytes);
367 /* calculate first value of T */
368 tweak_fn(tweak_ctx, walk.iv, walk.iv);
371 nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
373 err = blkcipher_walk_done(desc, &walk, nbytes);
374 nbytes = walk.nbytes;
377 glue_fpu_end(fpu_enabled);
381 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
383 void glue_xts_crypt_128bit_one(void *ctx, u128 *dst, const u128 *src, le128 *iv,
384 common_glue_func_t fn)
388 /* generate next IV */
389 le128_gf128mul_x_ble(iv, &ivblk);
392 u128_xor(dst, src, (u128 *)&ivblk);
394 /* PP <- D(Key2,CC) */
395 fn(ctx, (u8 *)dst, (u8 *)dst);
398 u128_xor(dst, dst, (u128 *)&ivblk);
400 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one);
402 MODULE_LICENSE("GPL");