1 // SPDX-License-Identifier: GPL-2.0-only
3 * Hash algorithms supported by the CESA: MD5, SHA1 and SHA256.
8 * This work is based on an initial version written by
9 * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc >
12 #include <crypto/hmac.h>
13 #include <crypto/md5.h>
14 #include <crypto/sha.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
20 struct mv_cesa_ahash_dma_iter {
21 struct mv_cesa_dma_iter base;
22 struct mv_cesa_sg_dma_iter src;
26 mv_cesa_ahash_req_iter_init(struct mv_cesa_ahash_dma_iter *iter,
27 struct ahash_request *req)
29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
30 unsigned int len = req->nbytes + creq->cache_ptr;
33 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
35 mv_cesa_req_dma_iter_init(&iter->base, len);
36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE);
37 iter->src.op_offset = creq->cache_ptr;
41 mv_cesa_ahash_req_iter_next_op(struct mv_cesa_ahash_dma_iter *iter)
43 iter->src.op_offset = 0;
45 return mv_cesa_req_dma_iter_next_op(&iter->base);
49 mv_cesa_ahash_dma_alloc_cache(struct mv_cesa_ahash_dma_req *req, gfp_t flags)
51 req->cache = dma_pool_alloc(cesa_dev->dma->cache_pool, flags,
60 mv_cesa_ahash_dma_free_cache(struct mv_cesa_ahash_dma_req *req)
65 dma_pool_free(cesa_dev->dma->cache_pool, req->cache,
69 static int mv_cesa_ahash_dma_alloc_padding(struct mv_cesa_ahash_dma_req *req,
75 req->padding = dma_pool_alloc(cesa_dev->dma->padding_pool, flags,
83 static void mv_cesa_ahash_dma_free_padding(struct mv_cesa_ahash_dma_req *req)
88 dma_pool_free(cesa_dev->dma->padding_pool, req->padding,
93 static inline void mv_cesa_ahash_dma_last_cleanup(struct ahash_request *req)
95 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
97 mv_cesa_ahash_dma_free_padding(&creq->req.dma);
100 static inline void mv_cesa_ahash_dma_cleanup(struct ahash_request *req)
102 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
104 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
105 mv_cesa_ahash_dma_free_cache(&creq->req.dma);
106 mv_cesa_dma_cleanup(&creq->base);
109 static inline void mv_cesa_ahash_cleanup(struct ahash_request *req)
111 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
113 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
114 mv_cesa_ahash_dma_cleanup(req);
117 static void mv_cesa_ahash_last_cleanup(struct ahash_request *req)
119 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
121 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
122 mv_cesa_ahash_dma_last_cleanup(req);
125 static int mv_cesa_ahash_pad_len(struct mv_cesa_ahash_req *creq)
127 unsigned int index, padlen;
129 index = creq->len & CESA_HASH_BLOCK_SIZE_MSK;
130 padlen = (index < 56) ? (56 - index) : (64 + 56 - index);
135 static int mv_cesa_ahash_pad_req(struct mv_cesa_ahash_req *creq, u8 *buf)
140 /* Pad out to 56 mod 64 */
141 padlen = mv_cesa_ahash_pad_len(creq);
142 memset(buf + 1, 0, padlen - 1);
145 __le64 bits = cpu_to_le64(creq->len << 3);
147 memcpy(buf + padlen, &bits, sizeof(bits));
149 __be64 bits = cpu_to_be64(creq->len << 3);
151 memcpy(buf + padlen, &bits, sizeof(bits));
157 static void mv_cesa_ahash_std_step(struct ahash_request *req)
159 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
160 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
161 struct mv_cesa_engine *engine = creq->base.engine;
162 struct mv_cesa_op_ctx *op;
163 unsigned int new_cache_ptr = 0;
166 unsigned int digsize;
169 mv_cesa_adjust_op(engine, &creq->op_tmpl);
170 memcpy_toio(engine->sram, &creq->op_tmpl, sizeof(creq->op_tmpl));
173 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
174 for (i = 0; i < digsize / 4; i++)
175 writel_relaxed(creq->state[i],
176 engine->regs + CESA_IVDIG(i));
180 memcpy_toio(engine->sram + CESA_SA_DATA_SRAM_OFFSET,
181 creq->cache, creq->cache_ptr);
183 len = min_t(size_t, req->nbytes + creq->cache_ptr - sreq->offset,
184 CESA_SA_SRAM_PAYLOAD_SIZE);
186 if (!creq->last_req) {
187 new_cache_ptr = len & CESA_HASH_BLOCK_SIZE_MSK;
188 len &= ~CESA_HASH_BLOCK_SIZE_MSK;
191 if (len - creq->cache_ptr)
192 sreq->offset += sg_pcopy_to_buffer(req->src, creq->src_nents,
194 CESA_SA_DATA_SRAM_OFFSET +
196 len - creq->cache_ptr,
201 frag_mode = mv_cesa_get_op_cfg(op) & CESA_SA_DESC_CFG_FRAG_MSK;
203 if (creq->last_req && sreq->offset == req->nbytes &&
204 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
205 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
206 frag_mode = CESA_SA_DESC_CFG_NOT_FRAG;
207 else if (frag_mode == CESA_SA_DESC_CFG_MID_FRAG)
208 frag_mode = CESA_SA_DESC_CFG_LAST_FRAG;
211 if (frag_mode == CESA_SA_DESC_CFG_NOT_FRAG ||
212 frag_mode == CESA_SA_DESC_CFG_LAST_FRAG) {
214 creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX) {
215 mv_cesa_set_mac_op_total_len(op, creq->len);
217 int trailerlen = mv_cesa_ahash_pad_len(creq) + 8;
219 if (len + trailerlen > CESA_SA_SRAM_PAYLOAD_SIZE) {
220 len &= CESA_HASH_BLOCK_SIZE_MSK;
221 new_cache_ptr = 64 - trailerlen;
222 memcpy_fromio(creq->cache,
224 CESA_SA_DATA_SRAM_OFFSET + len,
227 i = mv_cesa_ahash_pad_req(creq, creq->cache);
229 memcpy_toio(engine->sram + len +
230 CESA_SA_DATA_SRAM_OFFSET,
234 if (frag_mode == CESA_SA_DESC_CFG_LAST_FRAG)
235 frag_mode = CESA_SA_DESC_CFG_MID_FRAG;
237 frag_mode = CESA_SA_DESC_CFG_FIRST_FRAG;
241 mv_cesa_set_mac_op_frag_len(op, len);
242 mv_cesa_update_op_cfg(op, frag_mode, CESA_SA_DESC_CFG_FRAG_MSK);
244 /* FIXME: only update enc_len field */
245 memcpy_toio(engine->sram, op, sizeof(*op));
247 if (frag_mode == CESA_SA_DESC_CFG_FIRST_FRAG)
248 mv_cesa_update_op_cfg(op, CESA_SA_DESC_CFG_MID_FRAG,
249 CESA_SA_DESC_CFG_FRAG_MSK);
251 creq->cache_ptr = new_cache_ptr;
253 mv_cesa_set_int_mask(engine, CESA_SA_INT_ACCEL0_DONE);
254 writel_relaxed(CESA_SA_CFG_PARA_DIS, engine->regs + CESA_SA_CFG);
255 WARN_ON(readl(engine->regs + CESA_SA_CMD) &
256 CESA_SA_CMD_EN_CESA_SA_ACCL0);
257 writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD);
260 static int mv_cesa_ahash_std_process(struct ahash_request *req, u32 status)
262 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
263 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
265 if (sreq->offset < (req->nbytes - creq->cache_ptr))
271 static inline void mv_cesa_ahash_dma_prepare(struct ahash_request *req)
273 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
274 struct mv_cesa_req *basereq = &creq->base;
276 mv_cesa_dma_prepare(basereq, basereq->engine);
279 static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
281 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
282 struct mv_cesa_ahash_std_req *sreq = &creq->req.std;
287 static void mv_cesa_ahash_dma_step(struct ahash_request *req)
289 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
290 struct mv_cesa_req *base = &creq->base;
292 /* We must explicitly set the digest state. */
293 if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
294 struct mv_cesa_engine *engine = base->engine;
297 /* Set the hash state in the IVDIG regs. */
298 for (i = 0; i < ARRAY_SIZE(creq->state); i++)
299 writel_relaxed(creq->state[i], engine->regs +
303 mv_cesa_dma_step(base);
306 static void mv_cesa_ahash_step(struct crypto_async_request *req)
308 struct ahash_request *ahashreq = ahash_request_cast(req);
309 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
311 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
312 mv_cesa_ahash_dma_step(ahashreq);
314 mv_cesa_ahash_std_step(ahashreq);
317 static int mv_cesa_ahash_process(struct crypto_async_request *req, u32 status)
319 struct ahash_request *ahashreq = ahash_request_cast(req);
320 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
322 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
323 return mv_cesa_dma_process(&creq->base, status);
325 return mv_cesa_ahash_std_process(ahashreq, status);
328 static void mv_cesa_ahash_complete(struct crypto_async_request *req)
330 struct ahash_request *ahashreq = ahash_request_cast(req);
331 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
332 struct mv_cesa_engine *engine = creq->base.engine;
333 unsigned int digsize;
336 digsize = crypto_ahash_digestsize(crypto_ahash_reqtfm(ahashreq));
338 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ &&
339 (creq->base.chain.last->flags & CESA_TDMA_TYPE_MSK) ==
344 * Result is already in the correct endianness when the SA is
347 data = creq->base.chain.last->op->ctx.hash.hash;
348 for (i = 0; i < digsize / 4; i++)
349 creq->state[i] = le32_to_cpu(data[i]);
351 memcpy(ahashreq->result, data, digsize);
353 for (i = 0; i < digsize / 4; i++)
354 creq->state[i] = readl_relaxed(engine->regs +
356 if (creq->last_req) {
358 * Hardware's MD5 digest is in little endian format, but
359 * SHA in big endian format
362 __le32 *result = (void *)ahashreq->result;
364 for (i = 0; i < digsize / 4; i++)
365 result[i] = cpu_to_le32(creq->state[i]);
367 __be32 *result = (void *)ahashreq->result;
369 for (i = 0; i < digsize / 4; i++)
370 result[i] = cpu_to_be32(creq->state[i]);
375 atomic_sub(ahashreq->nbytes, &engine->load);
378 static void mv_cesa_ahash_prepare(struct crypto_async_request *req,
379 struct mv_cesa_engine *engine)
381 struct ahash_request *ahashreq = ahash_request_cast(req);
382 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
384 creq->base.engine = engine;
386 if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
387 mv_cesa_ahash_dma_prepare(ahashreq);
389 mv_cesa_ahash_std_prepare(ahashreq);
392 static void mv_cesa_ahash_req_cleanup(struct crypto_async_request *req)
394 struct ahash_request *ahashreq = ahash_request_cast(req);
395 struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
398 mv_cesa_ahash_last_cleanup(ahashreq);
400 mv_cesa_ahash_cleanup(ahashreq);
403 sg_pcopy_to_buffer(ahashreq->src, creq->src_nents,
406 ahashreq->nbytes - creq->cache_ptr);
409 static const struct mv_cesa_req_ops mv_cesa_ahash_req_ops = {
410 .step = mv_cesa_ahash_step,
411 .process = mv_cesa_ahash_process,
412 .cleanup = mv_cesa_ahash_req_cleanup,
413 .complete = mv_cesa_ahash_complete,
416 static void mv_cesa_ahash_init(struct ahash_request *req,
417 struct mv_cesa_op_ctx *tmpl, bool algo_le)
419 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
421 memset(creq, 0, sizeof(*creq));
422 mv_cesa_update_op_cfg(tmpl,
423 CESA_SA_DESC_CFG_OP_MAC_ONLY |
424 CESA_SA_DESC_CFG_FIRST_FRAG,
425 CESA_SA_DESC_CFG_OP_MSK |
426 CESA_SA_DESC_CFG_FRAG_MSK);
427 mv_cesa_set_mac_op_total_len(tmpl, 0);
428 mv_cesa_set_mac_op_frag_len(tmpl, 0);
429 creq->op_tmpl = *tmpl;
431 creq->algo_le = algo_le;
434 static inline int mv_cesa_ahash_cra_init(struct crypto_tfm *tfm)
436 struct mv_cesa_hash_ctx *ctx = crypto_tfm_ctx(tfm);
438 ctx->base.ops = &mv_cesa_ahash_req_ops;
440 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
441 sizeof(struct mv_cesa_ahash_req));
445 static bool mv_cesa_ahash_cache_req(struct ahash_request *req)
447 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
450 if (creq->cache_ptr + req->nbytes < CESA_MAX_HASH_BLOCK_SIZE &&
457 sg_pcopy_to_buffer(req->src, creq->src_nents,
458 creq->cache + creq->cache_ptr,
461 creq->cache_ptr += req->nbytes;
467 static struct mv_cesa_op_ctx *
468 mv_cesa_dma_add_frag(struct mv_cesa_tdma_chain *chain,
469 struct mv_cesa_op_ctx *tmpl, unsigned int frag_len,
472 struct mv_cesa_op_ctx *op;
475 op = mv_cesa_dma_add_op(chain, tmpl, false, flags);
479 /* Set the operation block fragment length. */
480 mv_cesa_set_mac_op_frag_len(op, frag_len);
482 /* Append dummy desc to launch operation */
483 ret = mv_cesa_dma_add_dummy_launch(chain, flags);
487 if (mv_cesa_mac_op_is_first_frag(tmpl))
488 mv_cesa_update_op_cfg(tmpl,
489 CESA_SA_DESC_CFG_MID_FRAG,
490 CESA_SA_DESC_CFG_FRAG_MSK);
496 mv_cesa_ahash_dma_add_cache(struct mv_cesa_tdma_chain *chain,
497 struct mv_cesa_ahash_req *creq,
500 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
503 if (!creq->cache_ptr)
506 ret = mv_cesa_ahash_dma_alloc_cache(ahashdreq, flags);
510 memcpy(ahashdreq->cache, creq->cache, creq->cache_ptr);
512 return mv_cesa_dma_add_data_transfer(chain,
513 CESA_SA_DATA_SRAM_OFFSET,
514 ahashdreq->cache_dma,
516 CESA_TDMA_DST_IN_SRAM,
520 static struct mv_cesa_op_ctx *
521 mv_cesa_ahash_dma_last_req(struct mv_cesa_tdma_chain *chain,
522 struct mv_cesa_ahash_dma_iter *dma_iter,
523 struct mv_cesa_ahash_req *creq,
524 unsigned int frag_len, gfp_t flags)
526 struct mv_cesa_ahash_dma_req *ahashdreq = &creq->req.dma;
527 unsigned int len, trailerlen, padoff = 0;
528 struct mv_cesa_op_ctx *op;
532 * If the transfer is smaller than our maximum length, and we have
533 * some data outstanding, we can ask the engine to finish the hash.
535 if (creq->len <= CESA_SA_DESC_MAC_SRC_TOTAL_LEN_MAX && frag_len) {
536 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len,
541 mv_cesa_set_mac_op_total_len(op, creq->len);
542 mv_cesa_update_op_cfg(op, mv_cesa_mac_op_is_first_frag(op) ?
543 CESA_SA_DESC_CFG_NOT_FRAG :
544 CESA_SA_DESC_CFG_LAST_FRAG,
545 CESA_SA_DESC_CFG_FRAG_MSK);
547 ret = mv_cesa_dma_add_result_op(chain,
548 CESA_SA_CFG_SRAM_OFFSET,
549 CESA_SA_DATA_SRAM_OFFSET,
550 CESA_TDMA_SRC_IN_SRAM, flags);
552 return ERR_PTR(-ENOMEM);
557 * The request is longer than the engine can handle, or we have
558 * no data outstanding. Manually generate the padding, adding it
559 * as a "mid" fragment.
561 ret = mv_cesa_ahash_dma_alloc_padding(ahashdreq, flags);
565 trailerlen = mv_cesa_ahash_pad_req(creq, ahashdreq->padding);
567 len = min(CESA_SA_SRAM_PAYLOAD_SIZE - frag_len, trailerlen);
569 ret = mv_cesa_dma_add_data_transfer(chain,
570 CESA_SA_DATA_SRAM_OFFSET +
572 ahashdreq->padding_dma,
573 len, CESA_TDMA_DST_IN_SRAM,
578 op = mv_cesa_dma_add_frag(chain, &creq->op_tmpl, frag_len + len,
583 if (len == trailerlen)
589 ret = mv_cesa_dma_add_data_transfer(chain,
590 CESA_SA_DATA_SRAM_OFFSET,
591 ahashdreq->padding_dma +
594 CESA_TDMA_DST_IN_SRAM,
599 return mv_cesa_dma_add_frag(chain, &creq->op_tmpl, trailerlen - padoff,
603 static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
605 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
606 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
607 GFP_KERNEL : GFP_ATOMIC;
608 struct mv_cesa_req *basereq = &creq->base;
609 struct mv_cesa_ahash_dma_iter iter;
610 struct mv_cesa_op_ctx *op = NULL;
611 unsigned int frag_len;
612 bool set_state = false;
616 basereq->chain.first = NULL;
617 basereq->chain.last = NULL;
619 if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
622 if (creq->src_nents) {
623 ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
631 mv_cesa_tdma_desc_iter_init(&basereq->chain);
632 mv_cesa_ahash_req_iter_init(&iter, req);
635 * Add the cache (left-over data from a previous block) first.
636 * This will never overflow the SRAM size.
638 ret = mv_cesa_ahash_dma_add_cache(&basereq->chain, creq, flags);
644 * Add all the new data, inserting an operation block and
645 * launch command between each full SRAM block-worth of
646 * data. We intentionally do not add the final op block.
649 ret = mv_cesa_dma_add_op_transfers(&basereq->chain,
655 frag_len = iter.base.op_len;
657 if (!mv_cesa_ahash_req_iter_next_op(&iter))
660 op = mv_cesa_dma_add_frag(&basereq->chain,
669 /* Account for the data that was in the cache. */
670 frag_len = iter.base.op_len;
674 * At this point, frag_len indicates whether we have any data
675 * outstanding which needs an operation. Queue up the final
676 * operation, which depends whether this is the final request.
679 op = mv_cesa_ahash_dma_last_req(&basereq->chain, &iter, creq,
682 op = mv_cesa_dma_add_frag(&basereq->chain, &creq->op_tmpl,
691 * If results are copied via DMA, this means that this
692 * request can be directly processed by the engine,
693 * without partial updates. So we can chain it at the
694 * DMA level with other requests.
696 type = basereq->chain.last->flags & CESA_TDMA_TYPE_MSK;
698 if (op && type != CESA_TDMA_RESULT) {
699 /* Add dummy desc to wait for crypto operation end */
700 ret = mv_cesa_dma_add_dummy_end(&basereq->chain, flags);
706 creq->cache_ptr = req->nbytes + creq->cache_ptr -
711 basereq->chain.last->flags |= CESA_TDMA_END_OF_REQ;
713 if (type != CESA_TDMA_RESULT)
714 basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
718 * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
719 * let the step logic know that the IVDIG registers should be
720 * explicitly set before launching a TDMA chain.
722 basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
728 mv_cesa_dma_cleanup(basereq);
729 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, DMA_TO_DEVICE);
732 mv_cesa_ahash_last_cleanup(req);
737 static int mv_cesa_ahash_req_init(struct ahash_request *req, bool *cached)
739 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
741 creq->src_nents = sg_nents_for_len(req->src, req->nbytes);
742 if (creq->src_nents < 0) {
743 dev_err(cesa_dev->dev, "Invalid number of src SG");
744 return creq->src_nents;
747 *cached = mv_cesa_ahash_cache_req(req);
752 if (cesa_dev->caps->has_tdma)
753 return mv_cesa_ahash_dma_req_init(req);
758 static int mv_cesa_ahash_queue_req(struct ahash_request *req)
760 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
761 struct mv_cesa_engine *engine;
765 ret = mv_cesa_ahash_req_init(req, &cached);
772 engine = mv_cesa_select_engine(req->nbytes);
773 mv_cesa_ahash_prepare(&req->base, engine);
775 ret = mv_cesa_queue_req(&req->base, &creq->base);
777 if (mv_cesa_req_needs_cleanup(&req->base, ret))
778 mv_cesa_ahash_cleanup(req);
783 static int mv_cesa_ahash_update(struct ahash_request *req)
785 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
787 creq->len += req->nbytes;
789 return mv_cesa_ahash_queue_req(req);
792 static int mv_cesa_ahash_final(struct ahash_request *req)
794 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
795 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
797 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
798 creq->last_req = true;
801 return mv_cesa_ahash_queue_req(req);
804 static int mv_cesa_ahash_finup(struct ahash_request *req)
806 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
807 struct mv_cesa_op_ctx *tmpl = &creq->op_tmpl;
809 creq->len += req->nbytes;
810 mv_cesa_set_mac_op_total_len(tmpl, creq->len);
811 creq->last_req = true;
813 return mv_cesa_ahash_queue_req(req);
816 static int mv_cesa_ahash_export(struct ahash_request *req, void *hash,
817 u64 *len, void *cache)
819 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
820 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
821 unsigned int digsize = crypto_ahash_digestsize(ahash);
822 unsigned int blocksize;
824 blocksize = crypto_ahash_blocksize(ahash);
827 memcpy(hash, creq->state, digsize);
828 memset(cache, 0, blocksize);
829 memcpy(cache, creq->cache, creq->cache_ptr);
834 static int mv_cesa_ahash_import(struct ahash_request *req, const void *hash,
835 u64 len, const void *cache)
837 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
838 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
839 unsigned int digsize = crypto_ahash_digestsize(ahash);
840 unsigned int blocksize;
841 unsigned int cache_ptr;
844 ret = crypto_ahash_init(req);
848 blocksize = crypto_ahash_blocksize(ahash);
849 if (len >= blocksize)
850 mv_cesa_update_op_cfg(&creq->op_tmpl,
851 CESA_SA_DESC_CFG_MID_FRAG,
852 CESA_SA_DESC_CFG_FRAG_MSK);
855 memcpy(creq->state, hash, digsize);
858 cache_ptr = do_div(len, blocksize);
862 memcpy(creq->cache, cache, cache_ptr);
863 creq->cache_ptr = cache_ptr;
868 static int mv_cesa_md5_init(struct ahash_request *req)
870 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
871 struct mv_cesa_op_ctx tmpl = { };
873 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_MD5);
875 mv_cesa_ahash_init(req, &tmpl, true);
877 creq->state[0] = MD5_H0;
878 creq->state[1] = MD5_H1;
879 creq->state[2] = MD5_H2;
880 creq->state[3] = MD5_H3;
885 static int mv_cesa_md5_export(struct ahash_request *req, void *out)
887 struct md5_state *out_state = out;
889 return mv_cesa_ahash_export(req, out_state->hash,
890 &out_state->byte_count, out_state->block);
893 static int mv_cesa_md5_import(struct ahash_request *req, const void *in)
895 const struct md5_state *in_state = in;
897 return mv_cesa_ahash_import(req, in_state->hash, in_state->byte_count,
901 static int mv_cesa_md5_digest(struct ahash_request *req)
905 ret = mv_cesa_md5_init(req);
909 return mv_cesa_ahash_finup(req);
912 struct ahash_alg mv_md5_alg = {
913 .init = mv_cesa_md5_init,
914 .update = mv_cesa_ahash_update,
915 .final = mv_cesa_ahash_final,
916 .finup = mv_cesa_ahash_finup,
917 .digest = mv_cesa_md5_digest,
918 .export = mv_cesa_md5_export,
919 .import = mv_cesa_md5_import,
921 .digestsize = MD5_DIGEST_SIZE,
922 .statesize = sizeof(struct md5_state),
925 .cra_driver_name = "mv-md5",
927 .cra_flags = CRYPTO_ALG_ASYNC |
928 CRYPTO_ALG_ALLOCATES_MEMORY |
929 CRYPTO_ALG_KERN_DRIVER_ONLY,
930 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
931 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
932 .cra_init = mv_cesa_ahash_cra_init,
933 .cra_module = THIS_MODULE,
938 static int mv_cesa_sha1_init(struct ahash_request *req)
940 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
941 struct mv_cesa_op_ctx tmpl = { };
943 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA1);
945 mv_cesa_ahash_init(req, &tmpl, false);
947 creq->state[0] = SHA1_H0;
948 creq->state[1] = SHA1_H1;
949 creq->state[2] = SHA1_H2;
950 creq->state[3] = SHA1_H3;
951 creq->state[4] = SHA1_H4;
956 static int mv_cesa_sha1_export(struct ahash_request *req, void *out)
958 struct sha1_state *out_state = out;
960 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
964 static int mv_cesa_sha1_import(struct ahash_request *req, const void *in)
966 const struct sha1_state *in_state = in;
968 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
972 static int mv_cesa_sha1_digest(struct ahash_request *req)
976 ret = mv_cesa_sha1_init(req);
980 return mv_cesa_ahash_finup(req);
983 struct ahash_alg mv_sha1_alg = {
984 .init = mv_cesa_sha1_init,
985 .update = mv_cesa_ahash_update,
986 .final = mv_cesa_ahash_final,
987 .finup = mv_cesa_ahash_finup,
988 .digest = mv_cesa_sha1_digest,
989 .export = mv_cesa_sha1_export,
990 .import = mv_cesa_sha1_import,
992 .digestsize = SHA1_DIGEST_SIZE,
993 .statesize = sizeof(struct sha1_state),
996 .cra_driver_name = "mv-sha1",
998 .cra_flags = CRYPTO_ALG_ASYNC |
999 CRYPTO_ALG_ALLOCATES_MEMORY |
1000 CRYPTO_ALG_KERN_DRIVER_ONLY,
1001 .cra_blocksize = SHA1_BLOCK_SIZE,
1002 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1003 .cra_init = mv_cesa_ahash_cra_init,
1004 .cra_module = THIS_MODULE,
1009 static int mv_cesa_sha256_init(struct ahash_request *req)
1011 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
1012 struct mv_cesa_op_ctx tmpl = { };
1014 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_SHA256);
1016 mv_cesa_ahash_init(req, &tmpl, false);
1018 creq->state[0] = SHA256_H0;
1019 creq->state[1] = SHA256_H1;
1020 creq->state[2] = SHA256_H2;
1021 creq->state[3] = SHA256_H3;
1022 creq->state[4] = SHA256_H4;
1023 creq->state[5] = SHA256_H5;
1024 creq->state[6] = SHA256_H6;
1025 creq->state[7] = SHA256_H7;
1030 static int mv_cesa_sha256_digest(struct ahash_request *req)
1034 ret = mv_cesa_sha256_init(req);
1038 return mv_cesa_ahash_finup(req);
1041 static int mv_cesa_sha256_export(struct ahash_request *req, void *out)
1043 struct sha256_state *out_state = out;
1045 return mv_cesa_ahash_export(req, out_state->state, &out_state->count,
1049 static int mv_cesa_sha256_import(struct ahash_request *req, const void *in)
1051 const struct sha256_state *in_state = in;
1053 return mv_cesa_ahash_import(req, in_state->state, in_state->count,
1057 struct ahash_alg mv_sha256_alg = {
1058 .init = mv_cesa_sha256_init,
1059 .update = mv_cesa_ahash_update,
1060 .final = mv_cesa_ahash_final,
1061 .finup = mv_cesa_ahash_finup,
1062 .digest = mv_cesa_sha256_digest,
1063 .export = mv_cesa_sha256_export,
1064 .import = mv_cesa_sha256_import,
1066 .digestsize = SHA256_DIGEST_SIZE,
1067 .statesize = sizeof(struct sha256_state),
1069 .cra_name = "sha256",
1070 .cra_driver_name = "mv-sha256",
1071 .cra_priority = 300,
1072 .cra_flags = CRYPTO_ALG_ASYNC |
1073 CRYPTO_ALG_ALLOCATES_MEMORY |
1074 CRYPTO_ALG_KERN_DRIVER_ONLY,
1075 .cra_blocksize = SHA256_BLOCK_SIZE,
1076 .cra_ctxsize = sizeof(struct mv_cesa_hash_ctx),
1077 .cra_init = mv_cesa_ahash_cra_init,
1078 .cra_module = THIS_MODULE,
1083 struct mv_cesa_ahash_result {
1084 struct completion completion;
1088 static void mv_cesa_hmac_ahash_complete(struct crypto_async_request *req,
1091 struct mv_cesa_ahash_result *result = req->data;
1093 if (error == -EINPROGRESS)
1096 result->error = error;
1097 complete(&result->completion);
1100 static int mv_cesa_ahmac_iv_state_init(struct ahash_request *req, u8 *pad,
1101 void *state, unsigned int blocksize)
1103 struct mv_cesa_ahash_result result;
1104 struct scatterlist sg;
1107 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1108 mv_cesa_hmac_ahash_complete, &result);
1109 sg_init_one(&sg, pad, blocksize);
1110 ahash_request_set_crypt(req, &sg, pad, blocksize);
1111 init_completion(&result.completion);
1113 ret = crypto_ahash_init(req);
1117 ret = crypto_ahash_update(req);
1118 if (ret && ret != -EINPROGRESS)
1121 wait_for_completion_interruptible(&result.completion);
1123 return result.error;
1125 ret = crypto_ahash_export(req, state);
1132 static int mv_cesa_ahmac_pad_init(struct ahash_request *req,
1133 const u8 *key, unsigned int keylen,
1135 unsigned int blocksize)
1137 struct mv_cesa_ahash_result result;
1138 struct scatterlist sg;
1142 if (keylen <= blocksize) {
1143 memcpy(ipad, key, keylen);
1145 u8 *keydup = kmemdup(key, keylen, GFP_KERNEL);
1150 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
1151 mv_cesa_hmac_ahash_complete,
1153 sg_init_one(&sg, keydup, keylen);
1154 ahash_request_set_crypt(req, &sg, ipad, keylen);
1155 init_completion(&result.completion);
1157 ret = crypto_ahash_digest(req);
1158 if (ret == -EINPROGRESS) {
1159 wait_for_completion_interruptible(&result.completion);
1163 /* Set the memory region to 0 to avoid any leak. */
1164 kfree_sensitive(keydup);
1169 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
1172 memset(ipad + keylen, 0, blocksize - keylen);
1173 memcpy(opad, ipad, blocksize);
1175 for (i = 0; i < blocksize; i++) {
1176 ipad[i] ^= HMAC_IPAD_VALUE;
1177 opad[i] ^= HMAC_OPAD_VALUE;
1183 static int mv_cesa_ahmac_setkey(const char *hash_alg_name,
1184 const u8 *key, unsigned int keylen,
1185 void *istate, void *ostate)
1187 struct ahash_request *req;
1188 struct crypto_ahash *tfm;
1189 unsigned int blocksize;
1194 tfm = crypto_alloc_ahash(hash_alg_name, 0, 0);
1196 return PTR_ERR(tfm);
1198 req = ahash_request_alloc(tfm, GFP_KERNEL);
1204 crypto_ahash_clear_flags(tfm, ~0);
1206 blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1208 ipad = kcalloc(2, blocksize, GFP_KERNEL);
1214 opad = ipad + blocksize;
1216 ret = mv_cesa_ahmac_pad_init(req, key, keylen, ipad, opad, blocksize);
1220 ret = mv_cesa_ahmac_iv_state_init(req, ipad, istate, blocksize);
1224 ret = mv_cesa_ahmac_iv_state_init(req, opad, ostate, blocksize);
1229 ahash_request_free(req);
1231 crypto_free_ahash(tfm);
1236 static int mv_cesa_ahmac_cra_init(struct crypto_tfm *tfm)
1238 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(tfm);
1240 ctx->base.ops = &mv_cesa_ahash_req_ops;
1242 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1243 sizeof(struct mv_cesa_ahash_req));
1247 static int mv_cesa_ahmac_md5_init(struct ahash_request *req)
1249 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1250 struct mv_cesa_op_ctx tmpl = { };
1252 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_MD5);
1253 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1255 mv_cesa_ahash_init(req, &tmpl, true);
1260 static int mv_cesa_ahmac_md5_setkey(struct crypto_ahash *tfm, const u8 *key,
1261 unsigned int keylen)
1263 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1264 struct md5_state istate, ostate;
1267 ret = mv_cesa_ahmac_setkey("mv-md5", key, keylen, &istate, &ostate);
1271 for (i = 0; i < ARRAY_SIZE(istate.hash); i++)
1272 ctx->iv[i] = cpu_to_be32(istate.hash[i]);
1274 for (i = 0; i < ARRAY_SIZE(ostate.hash); i++)
1275 ctx->iv[i + 8] = cpu_to_be32(ostate.hash[i]);
1280 static int mv_cesa_ahmac_md5_digest(struct ahash_request *req)
1284 ret = mv_cesa_ahmac_md5_init(req);
1288 return mv_cesa_ahash_finup(req);
1291 struct ahash_alg mv_ahmac_md5_alg = {
1292 .init = mv_cesa_ahmac_md5_init,
1293 .update = mv_cesa_ahash_update,
1294 .final = mv_cesa_ahash_final,
1295 .finup = mv_cesa_ahash_finup,
1296 .digest = mv_cesa_ahmac_md5_digest,
1297 .setkey = mv_cesa_ahmac_md5_setkey,
1298 .export = mv_cesa_md5_export,
1299 .import = mv_cesa_md5_import,
1301 .digestsize = MD5_DIGEST_SIZE,
1302 .statesize = sizeof(struct md5_state),
1304 .cra_name = "hmac(md5)",
1305 .cra_driver_name = "mv-hmac-md5",
1306 .cra_priority = 300,
1307 .cra_flags = CRYPTO_ALG_ASYNC |
1308 CRYPTO_ALG_ALLOCATES_MEMORY |
1309 CRYPTO_ALG_KERN_DRIVER_ONLY,
1310 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
1311 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1312 .cra_init = mv_cesa_ahmac_cra_init,
1313 .cra_module = THIS_MODULE,
1318 static int mv_cesa_ahmac_sha1_init(struct ahash_request *req)
1320 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1321 struct mv_cesa_op_ctx tmpl = { };
1323 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA1);
1324 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1326 mv_cesa_ahash_init(req, &tmpl, false);
1331 static int mv_cesa_ahmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
1332 unsigned int keylen)
1334 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1335 struct sha1_state istate, ostate;
1338 ret = mv_cesa_ahmac_setkey("mv-sha1", key, keylen, &istate, &ostate);
1342 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1343 ctx->iv[i] = cpu_to_be32(istate.state[i]);
1345 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1346 ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1351 static int mv_cesa_ahmac_sha1_digest(struct ahash_request *req)
1355 ret = mv_cesa_ahmac_sha1_init(req);
1359 return mv_cesa_ahash_finup(req);
1362 struct ahash_alg mv_ahmac_sha1_alg = {
1363 .init = mv_cesa_ahmac_sha1_init,
1364 .update = mv_cesa_ahash_update,
1365 .final = mv_cesa_ahash_final,
1366 .finup = mv_cesa_ahash_finup,
1367 .digest = mv_cesa_ahmac_sha1_digest,
1368 .setkey = mv_cesa_ahmac_sha1_setkey,
1369 .export = mv_cesa_sha1_export,
1370 .import = mv_cesa_sha1_import,
1372 .digestsize = SHA1_DIGEST_SIZE,
1373 .statesize = sizeof(struct sha1_state),
1375 .cra_name = "hmac(sha1)",
1376 .cra_driver_name = "mv-hmac-sha1",
1377 .cra_priority = 300,
1378 .cra_flags = CRYPTO_ALG_ASYNC |
1379 CRYPTO_ALG_ALLOCATES_MEMORY |
1380 CRYPTO_ALG_KERN_DRIVER_ONLY,
1381 .cra_blocksize = SHA1_BLOCK_SIZE,
1382 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1383 .cra_init = mv_cesa_ahmac_cra_init,
1384 .cra_module = THIS_MODULE,
1389 static int mv_cesa_ahmac_sha256_setkey(struct crypto_ahash *tfm, const u8 *key,
1390 unsigned int keylen)
1392 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
1393 struct sha256_state istate, ostate;
1396 ret = mv_cesa_ahmac_setkey("mv-sha256", key, keylen, &istate, &ostate);
1400 for (i = 0; i < ARRAY_SIZE(istate.state); i++)
1401 ctx->iv[i] = cpu_to_be32(istate.state[i]);
1403 for (i = 0; i < ARRAY_SIZE(ostate.state); i++)
1404 ctx->iv[i + 8] = cpu_to_be32(ostate.state[i]);
1409 static int mv_cesa_ahmac_sha256_init(struct ahash_request *req)
1411 struct mv_cesa_hmac_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
1412 struct mv_cesa_op_ctx tmpl = { };
1414 mv_cesa_set_op_cfg(&tmpl, CESA_SA_DESC_CFG_MACM_HMAC_SHA256);
1415 memcpy(tmpl.ctx.hash.iv, ctx->iv, sizeof(ctx->iv));
1417 mv_cesa_ahash_init(req, &tmpl, false);
1422 static int mv_cesa_ahmac_sha256_digest(struct ahash_request *req)
1426 ret = mv_cesa_ahmac_sha256_init(req);
1430 return mv_cesa_ahash_finup(req);
1433 struct ahash_alg mv_ahmac_sha256_alg = {
1434 .init = mv_cesa_ahmac_sha256_init,
1435 .update = mv_cesa_ahash_update,
1436 .final = mv_cesa_ahash_final,
1437 .finup = mv_cesa_ahash_finup,
1438 .digest = mv_cesa_ahmac_sha256_digest,
1439 .setkey = mv_cesa_ahmac_sha256_setkey,
1440 .export = mv_cesa_sha256_export,
1441 .import = mv_cesa_sha256_import,
1443 .digestsize = SHA256_DIGEST_SIZE,
1444 .statesize = sizeof(struct sha256_state),
1446 .cra_name = "hmac(sha256)",
1447 .cra_driver_name = "mv-hmac-sha256",
1448 .cra_priority = 300,
1449 .cra_flags = CRYPTO_ALG_ASYNC |
1450 CRYPTO_ALG_ALLOCATES_MEMORY |
1451 CRYPTO_ALG_KERN_DRIVER_ONLY,
1452 .cra_blocksize = SHA256_BLOCK_SIZE,
1453 .cra_ctxsize = sizeof(struct mv_cesa_hmac_ctx),
1454 .cra_init = mv_cesa_ahmac_cra_init,
1455 .cra_module = THIS_MODULE,