]> Git Repo - linux.git/blob - drivers/crypto/inside-secure/safexcel_hash.c
Merge tag 'sound-4.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[linux.git] / drivers / crypto / inside-secure / safexcel_hash.c
1 /*
2  * Copyright (C) 2017 Marvell
3  *
4  * Antoine Tenart <[email protected]>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2. This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10
11 #include <crypto/hmac.h>
12 #include <crypto/sha.h>
13 #include <linux/device.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/dmapool.h>
16
17 #include "safexcel.h"
18
19 struct safexcel_ahash_ctx {
20         struct safexcel_context base;
21         struct safexcel_crypto_priv *priv;
22
23         u32 alg;
24         u32 digest;
25
26         u32 ipad[SHA1_DIGEST_SIZE / sizeof(u32)];
27         u32 opad[SHA1_DIGEST_SIZE / sizeof(u32)];
28 };
29
30 struct safexcel_ahash_req {
31         bool last_req;
32         bool finish;
33         bool hmac;
34         bool needs_inv;
35
36         int nents;
37
38         u8 state_sz;    /* expected sate size, only set once */
39         u32 state[SHA256_DIGEST_SIZE / sizeof(u32)] __aligned(sizeof(u32));
40
41         u64 len;
42         u64 processed;
43
44         u8 cache[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
45         u8 cache_next[SHA256_BLOCK_SIZE] __aligned(sizeof(u32));
46 };
47
48 struct safexcel_ahash_export_state {
49         u64 len;
50         u64 processed;
51
52         u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
53         u8 cache[SHA256_BLOCK_SIZE];
54 };
55
56 static void safexcel_hash_token(struct safexcel_command_desc *cdesc,
57                                 u32 input_length, u32 result_length)
58 {
59         struct safexcel_token *token =
60                 (struct safexcel_token *)cdesc->control_data.token;
61
62         token[0].opcode = EIP197_TOKEN_OPCODE_DIRECTION;
63         token[0].packet_length = input_length;
64         token[0].stat = EIP197_TOKEN_STAT_LAST_HASH;
65         token[0].instructions = EIP197_TOKEN_INS_TYPE_HASH;
66
67         token[1].opcode = EIP197_TOKEN_OPCODE_INSERT;
68         token[1].packet_length = result_length;
69         token[1].stat = EIP197_TOKEN_STAT_LAST_HASH |
70                         EIP197_TOKEN_STAT_LAST_PACKET;
71         token[1].instructions = EIP197_TOKEN_INS_TYPE_OUTPUT |
72                                 EIP197_TOKEN_INS_INSERT_HASH_DIGEST;
73 }
74
75 static void safexcel_context_control(struct safexcel_ahash_ctx *ctx,
76                                      struct safexcel_ahash_req *req,
77                                      struct safexcel_command_desc *cdesc,
78                                      unsigned int digestsize,
79                                      unsigned int blocksize)
80 {
81         int i;
82
83         cdesc->control_data.control0 |= CONTEXT_CONTROL_TYPE_HASH_OUT;
84         cdesc->control_data.control0 |= ctx->alg;
85         cdesc->control_data.control0 |= ctx->digest;
86
87         if (ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED) {
88                 if (req->processed) {
89                         if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
90                                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(6);
91                         else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224 ||
92                                  ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
93                                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(9);
94
95                         cdesc->control_data.control1 |= CONTEXT_CONTROL_DIGEST_CNT;
96                 } else {
97                         cdesc->control_data.control0 |= CONTEXT_CONTROL_RESTART_HASH;
98                 }
99
100                 if (!req->finish)
101                         cdesc->control_data.control0 |= CONTEXT_CONTROL_NO_FINISH_HASH;
102
103                 /*
104                  * Copy the input digest if needed, and setup the context
105                  * fields. Do this now as we need it to setup the first command
106                  * descriptor.
107                  */
108                 if (req->processed) {
109                         for (i = 0; i < digestsize / sizeof(u32); i++)
110                                 ctx->base.ctxr->data[i] = cpu_to_le32(req->state[i]);
111
112                         if (req->finish)
113                                 ctx->base.ctxr->data[i] = cpu_to_le32(req->processed / blocksize);
114                 }
115         } else if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC) {
116                 cdesc->control_data.control0 |= CONTEXT_CONTROL_SIZE(10);
117
118                 memcpy(ctx->base.ctxr->data, ctx->ipad, digestsize);
119                 memcpy(ctx->base.ctxr->data + digestsize / sizeof(u32),
120                        ctx->opad, digestsize);
121         }
122 }
123
124 static int safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring,
125                                       struct crypto_async_request *async,
126                                       bool *should_complete, int *ret)
127 {
128         struct safexcel_result_desc *rdesc;
129         struct ahash_request *areq = ahash_request_cast(async);
130         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
131         struct safexcel_ahash_req *sreq = ahash_request_ctx(areq);
132         int cache_len;
133
134         *ret = 0;
135
136         spin_lock_bh(&priv->ring[ring].egress_lock);
137         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
138         if (IS_ERR(rdesc)) {
139                 dev_err(priv->dev,
140                         "hash: result: could not retrieve the result descriptor\n");
141                 *ret = PTR_ERR(rdesc);
142         } else if (rdesc->result_data.error_code) {
143                 dev_err(priv->dev,
144                         "hash: result: result descriptor error (%d)\n",
145                         rdesc->result_data.error_code);
146                 *ret = -EINVAL;
147         }
148
149         safexcel_complete(priv, ring);
150         spin_unlock_bh(&priv->ring[ring].egress_lock);
151
152         if (sreq->finish)
153                 memcpy(areq->result, sreq->state,
154                        crypto_ahash_digestsize(ahash));
155
156         if (sreq->nents) {
157                 dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
158                 sreq->nents = 0;
159         }
160
161         safexcel_free_context(priv, async, sreq->state_sz);
162
163         cache_len = sreq->len - sreq->processed;
164         if (cache_len)
165                 memcpy(sreq->cache, sreq->cache_next, cache_len);
166
167         *should_complete = true;
168
169         return 1;
170 }
171
172 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring,
173                                    struct safexcel_request *request,
174                                    int *commands, int *results)
175 {
176         struct ahash_request *areq = ahash_request_cast(async);
177         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
178         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
179         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
180         struct safexcel_crypto_priv *priv = ctx->priv;
181         struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
182         struct safexcel_result_desc *rdesc;
183         struct scatterlist *sg;
184         int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
185
186         queued = len = req->len - req->processed;
187         if (queued < crypto_ahash_blocksize(ahash))
188                 cache_len = queued;
189         else
190                 cache_len = queued - areq->nbytes;
191
192         if (!req->last_req) {
193                 /* If this is not the last request and the queued data does not
194                  * fit into full blocks, cache it for the next send() call.
195                  */
196                 extra = queued & (crypto_ahash_blocksize(ahash) - 1);
197                 if (!extra)
198                         /* If this is not the last request and the queued data
199                          * is a multiple of a block, cache the last one for now.
200                          */
201                         extra = queued - crypto_ahash_blocksize(ahash);
202
203                 if (extra) {
204                         sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
205                                            req->cache_next, extra,
206                                            areq->nbytes - extra);
207
208                         queued -= extra;
209                         len -= extra;
210
211                         if (!queued) {
212                                 *commands = 0;
213                                 *results = 0;
214                                 return 0;
215                         }
216                 }
217         }
218
219         spin_lock_bh(&priv->ring[ring].egress_lock);
220
221         /* Add a command descriptor for the cached data, if any */
222         if (cache_len) {
223                 ctx->base.cache = kzalloc(cache_len, EIP197_GFP_FLAGS(*async));
224                 if (!ctx->base.cache) {
225                         ret = -ENOMEM;
226                         goto unlock;
227                 }
228                 memcpy(ctx->base.cache, req->cache, cache_len);
229                 ctx->base.cache_dma = dma_map_single(priv->dev, ctx->base.cache,
230                                                      cache_len, DMA_TO_DEVICE);
231                 if (dma_mapping_error(priv->dev, ctx->base.cache_dma)) {
232                         ret = -EINVAL;
233                         goto free_cache;
234                 }
235
236                 ctx->base.cache_sz = cache_len;
237                 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
238                                                  (cache_len == len),
239                                                  ctx->base.cache_dma,
240                                                  cache_len, len,
241                                                  ctx->base.ctxr_dma);
242                 if (IS_ERR(first_cdesc)) {
243                         ret = PTR_ERR(first_cdesc);
244                         goto unmap_cache;
245                 }
246                 n_cdesc++;
247
248                 queued -= cache_len;
249                 if (!queued)
250                         goto send_command;
251         }
252
253         /* Now handle the current ahash request buffer(s) */
254         req->nents = dma_map_sg(priv->dev, areq->src,
255                                 sg_nents_for_len(areq->src, areq->nbytes),
256                                 DMA_TO_DEVICE);
257         if (!req->nents) {
258                 ret = -ENOMEM;
259                 goto cdesc_rollback;
260         }
261
262         for_each_sg(areq->src, sg, req->nents, i) {
263                 int sglen = sg_dma_len(sg);
264
265                 /* Do not overflow the request */
266                 if (queued - sglen < 0)
267                         sglen = queued;
268
269                 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
270                                            !(queued - sglen), sg_dma_address(sg),
271                                            sglen, len, ctx->base.ctxr_dma);
272                 if (IS_ERR(cdesc)) {
273                         ret = PTR_ERR(cdesc);
274                         goto cdesc_rollback;
275                 }
276                 n_cdesc++;
277
278                 if (n_cdesc == 1)
279                         first_cdesc = cdesc;
280
281                 queued -= sglen;
282                 if (!queued)
283                         break;
284         }
285
286 send_command:
287         /* Setup the context options */
288         safexcel_context_control(ctx, req, first_cdesc, req->state_sz,
289                                  crypto_ahash_blocksize(ahash));
290
291         /* Add the token */
292         safexcel_hash_token(first_cdesc, len, req->state_sz);
293
294         ctx->base.result_dma = dma_map_single(priv->dev, req->state,
295                                               req->state_sz, DMA_FROM_DEVICE);
296         if (dma_mapping_error(priv->dev, ctx->base.result_dma)) {
297                 ret = -EINVAL;
298                 goto cdesc_rollback;
299         }
300
301         /* Add a result descriptor */
302         rdesc = safexcel_add_rdesc(priv, ring, 1, 1, ctx->base.result_dma,
303                                    req->state_sz);
304         if (IS_ERR(rdesc)) {
305                 ret = PTR_ERR(rdesc);
306                 goto cdesc_rollback;
307         }
308
309         spin_unlock_bh(&priv->ring[ring].egress_lock);
310
311         req->processed += len;
312         request->req = &areq->base;
313
314         *commands = n_cdesc;
315         *results = 1;
316         return 0;
317
318 cdesc_rollback:
319         for (i = 0; i < n_cdesc; i++)
320                 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
321 unmap_cache:
322         if (ctx->base.cache_dma) {
323                 dma_unmap_single(priv->dev, ctx->base.cache_dma,
324                                  ctx->base.cache_sz, DMA_TO_DEVICE);
325                 ctx->base.cache_sz = 0;
326         }
327 free_cache:
328         kfree(ctx->base.cache);
329         ctx->base.cache = NULL;
330
331 unlock:
332         spin_unlock_bh(&priv->ring[ring].egress_lock);
333         return ret;
334 }
335
336 static inline bool safexcel_ahash_needs_inv_get(struct ahash_request *areq)
337 {
338         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
339         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
340         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
341         unsigned int state_w_sz = req->state_sz / sizeof(u32);
342         int i;
343
344         for (i = 0; i < state_w_sz; i++)
345                 if (ctx->base.ctxr->data[i] != cpu_to_le32(req->state[i]))
346                         return true;
347
348         if (ctx->base.ctxr->data[state_w_sz] !=
349             cpu_to_le32(req->processed / crypto_ahash_blocksize(ahash)))
350                 return true;
351
352         return false;
353 }
354
355 static int safexcel_handle_inv_result(struct safexcel_crypto_priv *priv,
356                                       int ring,
357                                       struct crypto_async_request *async,
358                                       bool *should_complete, int *ret)
359 {
360         struct safexcel_result_desc *rdesc;
361         struct ahash_request *areq = ahash_request_cast(async);
362         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
363         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(ahash);
364         int enq_ret;
365
366         *ret = 0;
367
368         spin_lock_bh(&priv->ring[ring].egress_lock);
369         rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
370         if (IS_ERR(rdesc)) {
371                 dev_err(priv->dev,
372                         "hash: invalidate: could not retrieve the result descriptor\n");
373                 *ret = PTR_ERR(rdesc);
374         } else if (rdesc->result_data.error_code) {
375                 dev_err(priv->dev,
376                         "hash: invalidate: result descriptor error (%d)\n",
377                         rdesc->result_data.error_code);
378                 *ret = -EINVAL;
379         }
380
381         safexcel_complete(priv, ring);
382         spin_unlock_bh(&priv->ring[ring].egress_lock);
383
384         if (ctx->base.exit_inv) {
385                 dma_pool_free(priv->context_pool, ctx->base.ctxr,
386                               ctx->base.ctxr_dma);
387
388                 *should_complete = true;
389                 return 1;
390         }
391
392         ring = safexcel_select_ring(priv);
393         ctx->base.ring = ring;
394
395         spin_lock_bh(&priv->ring[ring].queue_lock);
396         enq_ret = crypto_enqueue_request(&priv->ring[ring].queue, async);
397         spin_unlock_bh(&priv->ring[ring].queue_lock);
398
399         if (enq_ret != -EINPROGRESS)
400                 *ret = enq_ret;
401
402         queue_work(priv->ring[ring].workqueue,
403                    &priv->ring[ring].work_data.work);
404
405         *should_complete = false;
406
407         return 1;
408 }
409
410 static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
411                                   struct crypto_async_request *async,
412                                   bool *should_complete, int *ret)
413 {
414         struct ahash_request *areq = ahash_request_cast(async);
415         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
416         int err;
417
418         BUG_ON(priv->version == EIP97 && req->needs_inv);
419
420         if (req->needs_inv) {
421                 req->needs_inv = false;
422                 err = safexcel_handle_inv_result(priv, ring, async,
423                                                  should_complete, ret);
424         } else {
425                 err = safexcel_handle_req_result(priv, ring, async,
426                                                  should_complete, ret);
427         }
428
429         return err;
430 }
431
432 static int safexcel_ahash_send_inv(struct crypto_async_request *async,
433                                    int ring, struct safexcel_request *request,
434                                    int *commands, int *results)
435 {
436         struct ahash_request *areq = ahash_request_cast(async);
437         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
438         int ret;
439
440         ret = safexcel_invalidate_cache(async, ctx->priv,
441                                         ctx->base.ctxr_dma, ring, request);
442         if (unlikely(ret))
443                 return ret;
444
445         *commands = 1;
446         *results = 1;
447
448         return 0;
449 }
450
451 static int safexcel_ahash_send(struct crypto_async_request *async,
452                                int ring, struct safexcel_request *request,
453                                int *commands, int *results)
454 {
455         struct ahash_request *areq = ahash_request_cast(async);
456         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
457         int ret;
458
459         if (req->needs_inv)
460                 ret = safexcel_ahash_send_inv(async, ring, request,
461                                               commands, results);
462         else
463                 ret = safexcel_ahash_send_req(async, ring, request,
464                                               commands, results);
465         return ret;
466 }
467
468 static int safexcel_ahash_exit_inv(struct crypto_tfm *tfm)
469 {
470         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
471         struct safexcel_crypto_priv *priv = ctx->priv;
472         AHASH_REQUEST_ON_STACK(req, __crypto_ahash_cast(tfm));
473         struct safexcel_ahash_req *rctx = ahash_request_ctx(req);
474         struct safexcel_inv_result result = {};
475         int ring = ctx->base.ring;
476
477         memset(req, 0, sizeof(struct ahash_request));
478
479         /* create invalidation request */
480         init_completion(&result.completion);
481         ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
482                                    safexcel_inv_complete, &result);
483
484         ahash_request_set_tfm(req, __crypto_ahash_cast(tfm));
485         ctx = crypto_tfm_ctx(req->base.tfm);
486         ctx->base.exit_inv = true;
487         rctx->needs_inv = true;
488
489         spin_lock_bh(&priv->ring[ring].queue_lock);
490         crypto_enqueue_request(&priv->ring[ring].queue, &req->base);
491         spin_unlock_bh(&priv->ring[ring].queue_lock);
492
493         queue_work(priv->ring[ring].workqueue,
494                    &priv->ring[ring].work_data.work);
495
496         wait_for_completion_interruptible(&result.completion);
497
498         if (result.error) {
499                 dev_warn(priv->dev, "hash: completion error (%d)\n",
500                          result.error);
501                 return result.error;
502         }
503
504         return 0;
505 }
506
507 /* safexcel_ahash_cache: cache data until at least one request can be sent to
508  * the engine, aka. when there is at least 1 block size in the pipe.
509  */
510 static int safexcel_ahash_cache(struct ahash_request *areq)
511 {
512         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
513         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
514         int queued, cache_len;
515
516         /* cache_len: everyting accepted by the driver but not sent yet,
517          * tot sz handled by update() - last req sz - tot sz handled by send()
518          */
519         cache_len = req->len - areq->nbytes - req->processed;
520         /* queued: everything accepted by the driver which will be handled by
521          * the next send() calls.
522          * tot sz handled by update() - tot sz handled by send()
523          */
524         queued = req->len - req->processed;
525
526         /*
527          * In case there isn't enough bytes to proceed (less than a
528          * block size), cache the data until we have enough.
529          */
530         if (cache_len + areq->nbytes <= crypto_ahash_blocksize(ahash)) {
531                 sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
532                                    req->cache + cache_len,
533                                    areq->nbytes, 0);
534                 return areq->nbytes;
535         }
536
537         /* We couldn't cache all the data */
538         return -E2BIG;
539 }
540
541 static int safexcel_ahash_enqueue(struct ahash_request *areq)
542 {
543         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
544         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
545         struct safexcel_crypto_priv *priv = ctx->priv;
546         int ret, ring;
547
548         req->needs_inv = false;
549
550         if (ctx->base.ctxr) {
551                 if (priv->version == EIP197 &&
552                     !ctx->base.needs_inv && req->processed &&
553                     ctx->digest == CONTEXT_CONTROL_DIGEST_PRECOMPUTED)
554                         /* We're still setting needs_inv here, even though it is
555                          * cleared right away, because the needs_inv flag can be
556                          * set in other functions and we want to keep the same
557                          * logic.
558                          */
559                         ctx->base.needs_inv = safexcel_ahash_needs_inv_get(areq);
560
561                 if (ctx->base.needs_inv) {
562                         ctx->base.needs_inv = false;
563                         req->needs_inv = true;
564                 }
565         } else {
566                 ctx->base.ring = safexcel_select_ring(priv);
567                 ctx->base.ctxr = dma_pool_zalloc(priv->context_pool,
568                                                  EIP197_GFP_FLAGS(areq->base),
569                                                  &ctx->base.ctxr_dma);
570                 if (!ctx->base.ctxr)
571                         return -ENOMEM;
572         }
573
574         ring = ctx->base.ring;
575
576         spin_lock_bh(&priv->ring[ring].queue_lock);
577         ret = crypto_enqueue_request(&priv->ring[ring].queue, &areq->base);
578         spin_unlock_bh(&priv->ring[ring].queue_lock);
579
580         queue_work(priv->ring[ring].workqueue,
581                    &priv->ring[ring].work_data.work);
582
583         return ret;
584 }
585
586 static int safexcel_ahash_update(struct ahash_request *areq)
587 {
588         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
589         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
590         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
591
592         /* If the request is 0 length, do nothing */
593         if (!areq->nbytes)
594                 return 0;
595
596         req->len += areq->nbytes;
597
598         safexcel_ahash_cache(areq);
599
600         /*
601          * We're not doing partial updates when performing an hmac request.
602          * Everything will be handled by the final() call.
603          */
604         if (ctx->digest == CONTEXT_CONTROL_DIGEST_HMAC)
605                 return 0;
606
607         if (req->hmac)
608                 return safexcel_ahash_enqueue(areq);
609
610         if (!req->last_req &&
611             req->len - req->processed > crypto_ahash_blocksize(ahash))
612                 return safexcel_ahash_enqueue(areq);
613
614         return 0;
615 }
616
617 static int safexcel_ahash_final(struct ahash_request *areq)
618 {
619         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
620         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
621
622         req->last_req = true;
623         req->finish = true;
624
625         /* If we have an overall 0 length request */
626         if (!(req->len + areq->nbytes)) {
627                 if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA1)
628                         memcpy(areq->result, sha1_zero_message_hash,
629                                SHA1_DIGEST_SIZE);
630                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA224)
631                         memcpy(areq->result, sha224_zero_message_hash,
632                                SHA224_DIGEST_SIZE);
633                 else if (ctx->alg == CONTEXT_CONTROL_CRYPTO_ALG_SHA256)
634                         memcpy(areq->result, sha256_zero_message_hash,
635                                SHA256_DIGEST_SIZE);
636
637                 return 0;
638         }
639
640         return safexcel_ahash_enqueue(areq);
641 }
642
643 static int safexcel_ahash_finup(struct ahash_request *areq)
644 {
645         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
646
647         req->last_req = true;
648         req->finish = true;
649
650         safexcel_ahash_update(areq);
651         return safexcel_ahash_final(areq);
652 }
653
654 static int safexcel_ahash_export(struct ahash_request *areq, void *out)
655 {
656         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
657         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
658         struct safexcel_ahash_export_state *export = out;
659
660         export->len = req->len;
661         export->processed = req->processed;
662
663         memcpy(export->state, req->state, req->state_sz);
664         memcpy(export->cache, req->cache, crypto_ahash_blocksize(ahash));
665
666         return 0;
667 }
668
669 static int safexcel_ahash_import(struct ahash_request *areq, const void *in)
670 {
671         struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
672         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
673         const struct safexcel_ahash_export_state *export = in;
674         int ret;
675
676         ret = crypto_ahash_init(areq);
677         if (ret)
678                 return ret;
679
680         req->len = export->len;
681         req->processed = export->processed;
682
683         memcpy(req->cache, export->cache, crypto_ahash_blocksize(ahash));
684         memcpy(req->state, export->state, req->state_sz);
685
686         return 0;
687 }
688
689 static int safexcel_ahash_cra_init(struct crypto_tfm *tfm)
690 {
691         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
692         struct safexcel_alg_template *tmpl =
693                 container_of(__crypto_ahash_alg(tfm->__crt_alg),
694                              struct safexcel_alg_template, alg.ahash);
695
696         ctx->priv = tmpl->priv;
697         ctx->base.send = safexcel_ahash_send;
698         ctx->base.handle_result = safexcel_handle_result;
699
700         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
701                                  sizeof(struct safexcel_ahash_req));
702         return 0;
703 }
704
705 static int safexcel_sha1_init(struct ahash_request *areq)
706 {
707         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
708         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
709
710         memset(req, 0, sizeof(*req));
711
712         req->state[0] = SHA1_H0;
713         req->state[1] = SHA1_H1;
714         req->state[2] = SHA1_H2;
715         req->state[3] = SHA1_H3;
716         req->state[4] = SHA1_H4;
717
718         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA1;
719         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
720         req->state_sz = SHA1_DIGEST_SIZE;
721
722         return 0;
723 }
724
725 static int safexcel_sha1_digest(struct ahash_request *areq)
726 {
727         int ret = safexcel_sha1_init(areq);
728
729         if (ret)
730                 return ret;
731
732         return safexcel_ahash_finup(areq);
733 }
734
735 static void safexcel_ahash_cra_exit(struct crypto_tfm *tfm)
736 {
737         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
738         struct safexcel_crypto_priv *priv = ctx->priv;
739         int ret;
740
741         /* context not allocated, skip invalidation */
742         if (!ctx->base.ctxr)
743                 return;
744
745         if (priv->version == EIP197) {
746                 ret = safexcel_ahash_exit_inv(tfm);
747                 if (ret)
748                         dev_warn(priv->dev, "hash: invalidation error %d\n", ret);
749         } else {
750                 dma_pool_free(priv->context_pool, ctx->base.ctxr,
751                               ctx->base.ctxr_dma);
752         }
753 }
754
755 struct safexcel_alg_template safexcel_alg_sha1 = {
756         .type = SAFEXCEL_ALG_TYPE_AHASH,
757         .alg.ahash = {
758                 .init = safexcel_sha1_init,
759                 .update = safexcel_ahash_update,
760                 .final = safexcel_ahash_final,
761                 .finup = safexcel_ahash_finup,
762                 .digest = safexcel_sha1_digest,
763                 .export = safexcel_ahash_export,
764                 .import = safexcel_ahash_import,
765                 .halg = {
766                         .digestsize = SHA1_DIGEST_SIZE,
767                         .statesize = sizeof(struct safexcel_ahash_export_state),
768                         .base = {
769                                 .cra_name = "sha1",
770                                 .cra_driver_name = "safexcel-sha1",
771                                 .cra_priority = 300,
772                                 .cra_flags = CRYPTO_ALG_ASYNC |
773                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
774                                 .cra_blocksize = SHA1_BLOCK_SIZE,
775                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
776                                 .cra_init = safexcel_ahash_cra_init,
777                                 .cra_exit = safexcel_ahash_cra_exit,
778                                 .cra_module = THIS_MODULE,
779                         },
780                 },
781         },
782 };
783
784 static int safexcel_hmac_sha1_init(struct ahash_request *areq)
785 {
786         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
787
788         safexcel_sha1_init(areq);
789         ctx->digest = CONTEXT_CONTROL_DIGEST_HMAC;
790         return 0;
791 }
792
793 static int safexcel_hmac_sha1_digest(struct ahash_request *areq)
794 {
795         int ret = safexcel_hmac_sha1_init(areq);
796
797         if (ret)
798                 return ret;
799
800         return safexcel_ahash_finup(areq);
801 }
802
803 struct safexcel_ahash_result {
804         struct completion completion;
805         int error;
806 };
807
808 static void safexcel_ahash_complete(struct crypto_async_request *req, int error)
809 {
810         struct safexcel_ahash_result *result = req->data;
811
812         if (error == -EINPROGRESS)
813                 return;
814
815         result->error = error;
816         complete(&result->completion);
817 }
818
819 static int safexcel_hmac_init_pad(struct ahash_request *areq,
820                                   unsigned int blocksize, const u8 *key,
821                                   unsigned int keylen, u8 *ipad, u8 *opad)
822 {
823         struct safexcel_ahash_result result;
824         struct scatterlist sg;
825         int ret, i;
826         u8 *keydup;
827
828         if (keylen <= blocksize) {
829                 memcpy(ipad, key, keylen);
830         } else {
831                 keydup = kmemdup(key, keylen, GFP_KERNEL);
832                 if (!keydup)
833                         return -ENOMEM;
834
835                 ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
836                                            safexcel_ahash_complete, &result);
837                 sg_init_one(&sg, keydup, keylen);
838                 ahash_request_set_crypt(areq, &sg, ipad, keylen);
839                 init_completion(&result.completion);
840
841                 ret = crypto_ahash_digest(areq);
842                 if (ret == -EINPROGRESS) {
843                         wait_for_completion_interruptible(&result.completion);
844                         ret = result.error;
845                 }
846
847                 /* Avoid leaking */
848                 memzero_explicit(keydup, keylen);
849                 kfree(keydup);
850
851                 if (ret)
852                         return ret;
853
854                 keylen = crypto_ahash_digestsize(crypto_ahash_reqtfm(areq));
855         }
856
857         memset(ipad + keylen, 0, blocksize - keylen);
858         memcpy(opad, ipad, blocksize);
859
860         for (i = 0; i < blocksize; i++) {
861                 ipad[i] ^= HMAC_IPAD_VALUE;
862                 opad[i] ^= HMAC_OPAD_VALUE;
863         }
864
865         return 0;
866 }
867
868 static int safexcel_hmac_init_iv(struct ahash_request *areq,
869                                  unsigned int blocksize, u8 *pad, void *state)
870 {
871         struct safexcel_ahash_result result;
872         struct safexcel_ahash_req *req;
873         struct scatterlist sg;
874         int ret;
875
876         ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_BACKLOG,
877                                    safexcel_ahash_complete, &result);
878         sg_init_one(&sg, pad, blocksize);
879         ahash_request_set_crypt(areq, &sg, pad, blocksize);
880         init_completion(&result.completion);
881
882         ret = crypto_ahash_init(areq);
883         if (ret)
884                 return ret;
885
886         req = ahash_request_ctx(areq);
887         req->hmac = true;
888         req->last_req = true;
889
890         ret = crypto_ahash_update(areq);
891         if (ret && ret != -EINPROGRESS && ret != -EBUSY)
892                 return ret;
893
894         wait_for_completion_interruptible(&result.completion);
895         if (result.error)
896                 return result.error;
897
898         return crypto_ahash_export(areq, state);
899 }
900
901 static int safexcel_hmac_setkey(const char *alg, const u8 *key,
902                                 unsigned int keylen, void *istate, void *ostate)
903 {
904         struct ahash_request *areq;
905         struct crypto_ahash *tfm;
906         unsigned int blocksize;
907         u8 *ipad, *opad;
908         int ret;
909
910         tfm = crypto_alloc_ahash(alg, CRYPTO_ALG_TYPE_AHASH,
911                                  CRYPTO_ALG_TYPE_AHASH_MASK);
912         if (IS_ERR(tfm))
913                 return PTR_ERR(tfm);
914
915         areq = ahash_request_alloc(tfm, GFP_KERNEL);
916         if (!areq) {
917                 ret = -ENOMEM;
918                 goto free_ahash;
919         }
920
921         crypto_ahash_clear_flags(tfm, ~0);
922         blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
923
924         ipad = kzalloc(2 * blocksize, GFP_KERNEL);
925         if (!ipad) {
926                 ret = -ENOMEM;
927                 goto free_request;
928         }
929
930         opad = ipad + blocksize;
931
932         ret = safexcel_hmac_init_pad(areq, blocksize, key, keylen, ipad, opad);
933         if (ret)
934                 goto free_ipad;
935
936         ret = safexcel_hmac_init_iv(areq, blocksize, ipad, istate);
937         if (ret)
938                 goto free_ipad;
939
940         ret = safexcel_hmac_init_iv(areq, blocksize, opad, ostate);
941
942 free_ipad:
943         kfree(ipad);
944 free_request:
945         ahash_request_free(areq);
946 free_ahash:
947         crypto_free_ahash(tfm);
948
949         return ret;
950 }
951
952 static int safexcel_hmac_sha1_setkey(struct crypto_ahash *tfm, const u8 *key,
953                                      unsigned int keylen)
954 {
955         struct safexcel_ahash_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
956         struct safexcel_crypto_priv *priv = ctx->priv;
957         struct safexcel_ahash_export_state istate, ostate;
958         int ret, i;
959
960         ret = safexcel_hmac_setkey("safexcel-sha1", key, keylen, &istate, &ostate);
961         if (ret)
962                 return ret;
963
964         if (priv->version == EIP197 && ctx->base.ctxr) {
965                 for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(u32); i++) {
966                         if (ctx->ipad[i] != le32_to_cpu(istate.state[i]) ||
967                             ctx->opad[i] != le32_to_cpu(ostate.state[i])) {
968                                 ctx->base.needs_inv = true;
969                                 break;
970                         }
971                 }
972         }
973
974         memcpy(ctx->ipad, &istate.state, SHA1_DIGEST_SIZE);
975         memcpy(ctx->opad, &ostate.state, SHA1_DIGEST_SIZE);
976
977         return 0;
978 }
979
980 struct safexcel_alg_template safexcel_alg_hmac_sha1 = {
981         .type = SAFEXCEL_ALG_TYPE_AHASH,
982         .alg.ahash = {
983                 .init = safexcel_hmac_sha1_init,
984                 .update = safexcel_ahash_update,
985                 .final = safexcel_ahash_final,
986                 .finup = safexcel_ahash_finup,
987                 .digest = safexcel_hmac_sha1_digest,
988                 .setkey = safexcel_hmac_sha1_setkey,
989                 .export = safexcel_ahash_export,
990                 .import = safexcel_ahash_import,
991                 .halg = {
992                         .digestsize = SHA1_DIGEST_SIZE,
993                         .statesize = sizeof(struct safexcel_ahash_export_state),
994                         .base = {
995                                 .cra_name = "hmac(sha1)",
996                                 .cra_driver_name = "safexcel-hmac-sha1",
997                                 .cra_priority = 300,
998                                 .cra_flags = CRYPTO_ALG_ASYNC |
999                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1000                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1001                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1002                                 .cra_init = safexcel_ahash_cra_init,
1003                                 .cra_exit = safexcel_ahash_cra_exit,
1004                                 .cra_module = THIS_MODULE,
1005                         },
1006                 },
1007         },
1008 };
1009
1010 static int safexcel_sha256_init(struct ahash_request *areq)
1011 {
1012         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1013         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1014
1015         memset(req, 0, sizeof(*req));
1016
1017         req->state[0] = SHA256_H0;
1018         req->state[1] = SHA256_H1;
1019         req->state[2] = SHA256_H2;
1020         req->state[3] = SHA256_H3;
1021         req->state[4] = SHA256_H4;
1022         req->state[5] = SHA256_H5;
1023         req->state[6] = SHA256_H6;
1024         req->state[7] = SHA256_H7;
1025
1026         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA256;
1027         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1028         req->state_sz = SHA256_DIGEST_SIZE;
1029
1030         return 0;
1031 }
1032
1033 static int safexcel_sha256_digest(struct ahash_request *areq)
1034 {
1035         int ret = safexcel_sha256_init(areq);
1036
1037         if (ret)
1038                 return ret;
1039
1040         return safexcel_ahash_finup(areq);
1041 }
1042
1043 struct safexcel_alg_template safexcel_alg_sha256 = {
1044         .type = SAFEXCEL_ALG_TYPE_AHASH,
1045         .alg.ahash = {
1046                 .init = safexcel_sha256_init,
1047                 .update = safexcel_ahash_update,
1048                 .final = safexcel_ahash_final,
1049                 .finup = safexcel_ahash_finup,
1050                 .digest = safexcel_sha256_digest,
1051                 .export = safexcel_ahash_export,
1052                 .import = safexcel_ahash_import,
1053                 .halg = {
1054                         .digestsize = SHA256_DIGEST_SIZE,
1055                         .statesize = sizeof(struct safexcel_ahash_export_state),
1056                         .base = {
1057                                 .cra_name = "sha256",
1058                                 .cra_driver_name = "safexcel-sha256",
1059                                 .cra_priority = 300,
1060                                 .cra_flags = CRYPTO_ALG_ASYNC |
1061                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1062                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1063                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1064                                 .cra_init = safexcel_ahash_cra_init,
1065                                 .cra_exit = safexcel_ahash_cra_exit,
1066                                 .cra_module = THIS_MODULE,
1067                         },
1068                 },
1069         },
1070 };
1071
1072 static int safexcel_sha224_init(struct ahash_request *areq)
1073 {
1074         struct safexcel_ahash_ctx *ctx = crypto_ahash_ctx(crypto_ahash_reqtfm(areq));
1075         struct safexcel_ahash_req *req = ahash_request_ctx(areq);
1076
1077         memset(req, 0, sizeof(*req));
1078
1079         req->state[0] = SHA224_H0;
1080         req->state[1] = SHA224_H1;
1081         req->state[2] = SHA224_H2;
1082         req->state[3] = SHA224_H3;
1083         req->state[4] = SHA224_H4;
1084         req->state[5] = SHA224_H5;
1085         req->state[6] = SHA224_H6;
1086         req->state[7] = SHA224_H7;
1087
1088         ctx->alg = CONTEXT_CONTROL_CRYPTO_ALG_SHA224;
1089         ctx->digest = CONTEXT_CONTROL_DIGEST_PRECOMPUTED;
1090         req->state_sz = SHA256_DIGEST_SIZE;
1091
1092         return 0;
1093 }
1094
1095 static int safexcel_sha224_digest(struct ahash_request *areq)
1096 {
1097         int ret = safexcel_sha224_init(areq);
1098
1099         if (ret)
1100                 return ret;
1101
1102         return safexcel_ahash_finup(areq);
1103 }
1104
1105 struct safexcel_alg_template safexcel_alg_sha224 = {
1106         .type = SAFEXCEL_ALG_TYPE_AHASH,
1107         .alg.ahash = {
1108                 .init = safexcel_sha224_init,
1109                 .update = safexcel_ahash_update,
1110                 .final = safexcel_ahash_final,
1111                 .finup = safexcel_ahash_finup,
1112                 .digest = safexcel_sha224_digest,
1113                 .export = safexcel_ahash_export,
1114                 .import = safexcel_ahash_import,
1115                 .halg = {
1116                         .digestsize = SHA224_DIGEST_SIZE,
1117                         .statesize = sizeof(struct safexcel_ahash_export_state),
1118                         .base = {
1119                                 .cra_name = "sha224",
1120                                 .cra_driver_name = "safexcel-sha224",
1121                                 .cra_priority = 300,
1122                                 .cra_flags = CRYPTO_ALG_ASYNC |
1123                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1124                                 .cra_blocksize = SHA224_BLOCK_SIZE,
1125                                 .cra_ctxsize = sizeof(struct safexcel_ahash_ctx),
1126                                 .cra_init = safexcel_ahash_cra_init,
1127                                 .cra_exit = safexcel_ahash_cra_exit,
1128                                 .cra_module = THIS_MODULE,
1129                         },
1130                 },
1131         },
1132 };
This page took 0.103269 seconds and 4 git commands to generate.