2 * sun4i-ss-cipher.c - hardware cryptographic accelerator for Allwinner A20 SoC
6 * This file add support for AES cipher with 128,192,256 bits
7 * keysize in CBC and ECB mode.
8 * Add support also for DES and 3DES in CBC and ECB mode.
10 * You could find the datasheet in Documentation/arm/sunxi/README
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
19 static int sun4i_ss_opti_poll(struct ablkcipher_request *areq)
21 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
22 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
23 struct sun4i_ss_ctx *ss = op->ss;
24 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
25 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
27 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
28 u32 rx_cnt = SS_RX_DEFAULT;
34 unsigned int ileft = areq->nbytes;
35 unsigned int oleft = areq->nbytes;
37 struct sg_mapping_iter mi, mo;
38 unsigned int oi, oo; /* offset for in and out */
41 if (areq->nbytes == 0)
45 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
49 if (!areq->src || !areq->dst) {
50 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
54 spin_lock_irqsave(&ss->slock, flags);
56 for (i = 0; i < op->keylen; i += 4)
57 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
60 for (i = 0; i < 4 && i < ivsize / 4; i++) {
61 v = *(u32 *)(areq->info + i * 4);
62 writel(v, ss->base + SS_IV0 + i * 4);
65 writel(mode, ss->base + SS_CTL);
67 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
68 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
69 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
70 SG_MITER_TO_SG | SG_MITER_ATOMIC);
73 if (!mi.addr || !mo.addr) {
74 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
79 ileft = areq->nbytes / 4;
80 oleft = areq->nbytes / 4;
84 todo = min3(rx_cnt, ileft, (mi.length - oi) / 4);
87 writesl(ss->base + SS_RXFIFO, mi.addr + oi, todo);
90 if (oi == mi.length) {
95 spaces = readl(ss->base + SS_FCSR);
96 rx_cnt = SS_RXFIFO_SPACES(spaces);
97 tx_cnt = SS_TXFIFO_SPACES(spaces);
99 todo = min3(tx_cnt, oleft, (mo.length - oo) / 4);
102 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
105 if (oo == mo.length) {
112 for (i = 0; i < 4 && i < ivsize / 4; i++) {
113 v = readl(ss->base + SS_IV0 + i * 4);
114 *(u32 *)(areq->info + i * 4) = v;
121 writel(0, ss->base + SS_CTL);
122 spin_unlock_irqrestore(&ss->slock, flags);
126 /* Generic function that support SG with size not multiple of 4 */
127 static int sun4i_ss_cipher_poll(struct ablkcipher_request *areq)
129 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
130 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
131 struct sun4i_ss_ctx *ss = op->ss;
133 struct scatterlist *in_sg = areq->src;
134 struct scatterlist *out_sg = areq->dst;
135 unsigned int ivsize = crypto_ablkcipher_ivsize(tfm);
136 struct sun4i_cipher_req_ctx *ctx = ablkcipher_request_ctx(areq);
137 u32 mode = ctx->mode;
138 /* when activating SS, the default FIFO space is SS_RX_DEFAULT(32) */
139 u32 rx_cnt = SS_RX_DEFAULT;
145 unsigned int ileft = areq->nbytes;
146 unsigned int oleft = areq->nbytes;
148 struct sg_mapping_iter mi, mo;
149 unsigned int oi, oo; /* offset for in and out */
150 char buf[4 * SS_RX_MAX];/* buffer for linearize SG src */
151 char bufo[4 * SS_TX_MAX]; /* buffer for linearize SG dst */
152 unsigned int ob = 0; /* offset in buf */
153 unsigned int obo = 0; /* offset in bufo*/
154 unsigned int obl = 0; /* length of data in bufo */
157 if (areq->nbytes == 0)
161 dev_err_ratelimited(ss->dev, "ERROR: Empty IV\n");
165 if (!areq->src || !areq->dst) {
166 dev_err_ratelimited(ss->dev, "ERROR: Some SGs are NULL\n");
171 * if we have only SGs with size multiple of 4,
172 * we can use the SS optimized function
174 while (in_sg && no_chunk == 1) {
175 if ((in_sg->length % 4) != 0)
177 in_sg = sg_next(in_sg);
179 while (out_sg && no_chunk == 1) {
180 if ((out_sg->length % 4) != 0)
182 out_sg = sg_next(out_sg);
186 return sun4i_ss_opti_poll(areq);
188 spin_lock_irqsave(&ss->slock, flags);
190 for (i = 0; i < op->keylen; i += 4)
191 writel(*(op->key + i / 4), ss->base + SS_KEY0 + i);
194 for (i = 0; i < 4 && i < ivsize / 4; i++) {
195 v = *(u32 *)(areq->info + i * 4);
196 writel(v, ss->base + SS_IV0 + i * 4);
199 writel(mode, ss->base + SS_CTL);
201 sg_miter_start(&mi, areq->src, sg_nents(areq->src),
202 SG_MITER_FROM_SG | SG_MITER_ATOMIC);
203 sg_miter_start(&mo, areq->dst, sg_nents(areq->dst),
204 SG_MITER_TO_SG | SG_MITER_ATOMIC);
207 if (!mi.addr || !mo.addr) {
208 dev_err_ratelimited(ss->dev, "ERROR: sg_miter return null\n");
212 ileft = areq->nbytes;
213 oleft = areq->nbytes;
220 * todo is the number of consecutive 4byte word that we
221 * can read from current SG
223 todo = min3(rx_cnt, ileft / 4, (mi.length - oi) / 4);
224 if (todo > 0 && ob == 0) {
225 writesl(ss->base + SS_RXFIFO, mi.addr + oi,
231 * not enough consecutive bytes, so we need to
232 * linearize in buf. todo is in bytes
233 * After that copy, if we have a multiple of 4
234 * we need to be able to write all buf in one
235 * pass, so it is why we min() with rx_cnt
237 todo = min3(rx_cnt * 4 - ob, ileft,
239 memcpy(buf + ob, mi.addr + oi, todo);
244 writesl(ss->base + SS_RXFIFO, buf,
249 if (oi == mi.length) {
255 spaces = readl(ss->base + SS_FCSR);
256 rx_cnt = SS_RXFIFO_SPACES(spaces);
257 tx_cnt = SS_TXFIFO_SPACES(spaces);
258 dev_dbg(ss->dev, "%x %u/%u %u/%u cnt=%u %u/%u %u/%u cnt=%u %u\n",
260 oi, mi.length, ileft, areq->nbytes, rx_cnt,
261 oo, mo.length, oleft, areq->nbytes, tx_cnt, ob);
265 /* todo in 4bytes word */
266 todo = min3(tx_cnt, oleft / 4, (mo.length - oo) / 4);
268 readsl(ss->base + SS_TXFIFO, mo.addr + oo, todo);
271 if (oo == mo.length) {
277 * read obl bytes in bufo, we read at maximum for
278 * emptying the device
280 readsl(ss->base + SS_TXFIFO, bufo, tx_cnt);
285 * how many bytes we can copy ?
286 * no more than remaining SG size
287 * no more than remaining buffer
288 * no need to test against oleft
290 todo = min(mo.length - oo, obl - obo);
291 memcpy(mo.addr + oo, bufo + obo, todo);
295 if (oo == mo.length) {
300 /* bufo must be fully used here */
304 for (i = 0; i < 4 && i < ivsize / 4; i++) {
305 v = readl(ss->base + SS_IV0 + i * 4);
306 *(u32 *)(areq->info + i * 4) = v;
313 writel(0, ss->base + SS_CTL);
314 spin_unlock_irqrestore(&ss->slock, flags);
320 int sun4i_ss_cbc_aes_encrypt(struct ablkcipher_request *areq)
322 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
323 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
324 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
326 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
328 return sun4i_ss_cipher_poll(areq);
331 int sun4i_ss_cbc_aes_decrypt(struct ablkcipher_request *areq)
333 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
334 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
335 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
337 rctx->mode = SS_OP_AES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
339 return sun4i_ss_cipher_poll(areq);
343 int sun4i_ss_ecb_aes_encrypt(struct ablkcipher_request *areq)
345 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
346 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
347 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
349 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
351 return sun4i_ss_cipher_poll(areq);
354 int sun4i_ss_ecb_aes_decrypt(struct ablkcipher_request *areq)
356 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
357 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
358 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
360 rctx->mode = SS_OP_AES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
362 return sun4i_ss_cipher_poll(areq);
366 int sun4i_ss_cbc_des_encrypt(struct ablkcipher_request *areq)
368 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
369 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
370 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
372 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
374 return sun4i_ss_cipher_poll(areq);
377 int sun4i_ss_cbc_des_decrypt(struct ablkcipher_request *areq)
379 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
380 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
381 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
383 rctx->mode = SS_OP_DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
385 return sun4i_ss_cipher_poll(areq);
389 int sun4i_ss_ecb_des_encrypt(struct ablkcipher_request *areq)
391 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
392 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
393 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
395 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
397 return sun4i_ss_cipher_poll(areq);
400 int sun4i_ss_ecb_des_decrypt(struct ablkcipher_request *areq)
402 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
403 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
404 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
406 rctx->mode = SS_OP_DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
408 return sun4i_ss_cipher_poll(areq);
412 int sun4i_ss_cbc_des3_encrypt(struct ablkcipher_request *areq)
414 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
415 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
416 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
418 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_ENCRYPTION |
420 return sun4i_ss_cipher_poll(areq);
423 int sun4i_ss_cbc_des3_decrypt(struct ablkcipher_request *areq)
425 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
426 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
427 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
429 rctx->mode = SS_OP_3DES | SS_CBC | SS_ENABLED | SS_DECRYPTION |
431 return sun4i_ss_cipher_poll(areq);
435 int sun4i_ss_ecb_des3_encrypt(struct ablkcipher_request *areq)
437 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
438 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
439 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
441 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_ENCRYPTION |
443 return sun4i_ss_cipher_poll(areq);
446 int sun4i_ss_ecb_des3_decrypt(struct ablkcipher_request *areq)
448 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(areq);
449 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
450 struct sun4i_cipher_req_ctx *rctx = ablkcipher_request_ctx(areq);
452 rctx->mode = SS_OP_3DES | SS_ECB | SS_ENABLED | SS_DECRYPTION |
454 return sun4i_ss_cipher_poll(areq);
457 int sun4i_ss_cipher_init(struct crypto_tfm *tfm)
459 struct sun4i_tfm_ctx *op = crypto_tfm_ctx(tfm);
460 struct crypto_alg *alg = tfm->__crt_alg;
461 struct sun4i_ss_alg_template *algt;
463 memset(op, 0, sizeof(struct sun4i_tfm_ctx));
465 algt = container_of(alg, struct sun4i_ss_alg_template, alg.crypto);
468 tfm->crt_ablkcipher.reqsize = sizeof(struct sun4i_cipher_req_ctx);
473 /* check and set the AES key, prepare the mode to be used */
474 int sun4i_ss_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
477 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
478 struct sun4i_ss_ctx *ss = op->ss;
482 op->keymode = SS_AES_128BITS;
485 op->keymode = SS_AES_192BITS;
488 op->keymode = SS_AES_256BITS;
491 dev_err(ss->dev, "ERROR: Invalid keylen %u\n", keylen);
492 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
496 memcpy(op->key, key, keylen);
500 /* check and set the DES key, prepare the mode to be used */
501 int sun4i_ss_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
504 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
505 struct sun4i_ss_ctx *ss = op->ss;
507 u32 tmp[DES_EXPKEY_WORDS];
510 if (unlikely(keylen != DES_KEY_SIZE)) {
511 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
512 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
516 flags = crypto_ablkcipher_get_flags(tfm);
518 ret = des_ekey(tmp, key);
519 if (unlikely(ret == 0) && (flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
520 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
521 dev_dbg(ss->dev, "Weak key %u\n", keylen);
526 memcpy(op->key, key, keylen);
530 /* check and set the 3DES key, prepare the mode to be used */
531 int sun4i_ss_des3_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
534 struct sun4i_tfm_ctx *op = crypto_ablkcipher_ctx(tfm);
535 struct sun4i_ss_ctx *ss = op->ss;
537 if (unlikely(keylen != 3 * DES_KEY_SIZE)) {
538 dev_err(ss->dev, "Invalid keylen %u\n", keylen);
539 crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
543 memcpy(op->key, key, keylen);