]> Git Repo - linux.git/blob - drivers/crypto/ux500/hash/hash_core.c
bpf, sockmap: Avoid returning unneeded EAGAIN when redirecting to self
[linux.git] / drivers / crypto / ux500 / hash / hash_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Cryptographic API.
4  * Support for Nomadik hardware crypto engine.
5
6  * Copyright (C) ST-Ericsson SA 2010
7  * Author: Shujuan Chen <[email protected]> for ST-Ericsson
8  * Author: Joakim Bech <[email protected]> for ST-Ericsson
9  * Author: Berne Hebark <[email protected]> for ST-Ericsson.
10  * Author: Niklas Hernaeus <[email protected]> for ST-Ericsson.
11  * Author: Andreas Westin <[email protected]> for ST-Ericsson.
12  */
13
14 #define pr_fmt(fmt) "hashX hashX: " fmt
15
16 #include <linux/clk.h>
17 #include <linux/device.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/io.h>
22 #include <linux/klist.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/mod_devicetable.h>
26 #include <linux/platform_device.h>
27 #include <linux/crypto.h>
28
29 #include <linux/regulator/consumer.h>
30 #include <linux/dmaengine.h>
31 #include <linux/bitops.h>
32
33 #include <crypto/internal/hash.h>
34 #include <crypto/sha.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/algapi.h>
37
38 #include <linux/platform_data/crypto-ux500.h>
39
40 #include "hash_alg.h"
41
42 static int hash_mode;
43 module_param(hash_mode, int, 0);
44 MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
45
46 /* HMAC-SHA1, no key */
47 static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
48         0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
49         0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
50         0x70, 0x69, 0x0e, 0x1d
51 };
52
53 /* HMAC-SHA256, no key */
54 static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
55         0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
56         0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
57         0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
58         0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
59 };
60
61 /**
62  * struct hash_driver_data - data specific to the driver.
63  *
64  * @device_list:        A list of registered devices to choose from.
65  * @device_allocation:  A semaphore initialized with number of devices.
66  */
67 struct hash_driver_data {
68         struct klist            device_list;
69         struct semaphore        device_allocation;
70 };
71
72 static struct hash_driver_data  driver_data;
73
74 /* Declaration of functions */
75 /**
76  * hash_messagepad - Pads a message and write the nblw bits.
77  * @device_data:        Structure for the hash device.
78  * @message:            Last word of a message
79  * @index_bytes:        The number of bytes in the last message
80  *
81  * This function manages the final part of the digest calculation, when less
82  * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
83  *
84  */
85 static void hash_messagepad(struct hash_device_data *device_data,
86                             const u32 *message, u8 index_bytes);
87
88 /**
89  * release_hash_device - Releases a previously allocated hash device.
90  * @device_data:        Structure for the hash device.
91  *
92  */
93 static void release_hash_device(struct hash_device_data *device_data)
94 {
95         spin_lock(&device_data->ctx_lock);
96         device_data->current_ctx->device = NULL;
97         device_data->current_ctx = NULL;
98         spin_unlock(&device_data->ctx_lock);
99
100         /*
101          * The down_interruptible part for this semaphore is called in
102          * cryp_get_device_data.
103          */
104         up(&driver_data.device_allocation);
105 }
106
107 static void hash_dma_setup_channel(struct hash_device_data *device_data,
108                                    struct device *dev)
109 {
110         struct hash_platform_data *platform_data = dev->platform_data;
111         struct dma_slave_config conf = {
112                 .direction = DMA_MEM_TO_DEV,
113                 .dst_addr = device_data->phybase + HASH_DMA_FIFO,
114                 .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
115                 .dst_maxburst = 16,
116         };
117
118         dma_cap_zero(device_data->dma.mask);
119         dma_cap_set(DMA_SLAVE, device_data->dma.mask);
120
121         device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
122         device_data->dma.chan_mem2hash =
123                 dma_request_channel(device_data->dma.mask,
124                                     platform_data->dma_filter,
125                                     device_data->dma.cfg_mem2hash);
126
127         dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
128
129         init_completion(&device_data->dma.complete);
130 }
131
132 static void hash_dma_callback(void *data)
133 {
134         struct hash_ctx *ctx = data;
135
136         complete(&ctx->device->dma.complete);
137 }
138
139 static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
140                                  int len, enum dma_data_direction direction)
141 {
142         struct dma_async_tx_descriptor *desc = NULL;
143         struct dma_chan *channel = NULL;
144
145         if (direction != DMA_TO_DEVICE) {
146                 dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
147                         __func__);
148                 return -EFAULT;
149         }
150
151         sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
152
153         channel = ctx->device->dma.chan_mem2hash;
154         ctx->device->dma.sg = sg;
155         ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
156                         ctx->device->dma.sg, ctx->device->dma.nents,
157                         direction);
158
159         if (!ctx->device->dma.sg_len) {
160                 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
161                         __func__);
162                 return -EFAULT;
163         }
164
165         dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
166                 __func__);
167         desc = dmaengine_prep_slave_sg(channel,
168                         ctx->device->dma.sg, ctx->device->dma.sg_len,
169                         DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
170         if (!desc) {
171                 dev_err(ctx->device->dev,
172                         "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
173                 return -EFAULT;
174         }
175
176         desc->callback = hash_dma_callback;
177         desc->callback_param = ctx;
178
179         dmaengine_submit(desc);
180         dma_async_issue_pending(channel);
181
182         return 0;
183 }
184
185 static void hash_dma_done(struct hash_ctx *ctx)
186 {
187         struct dma_chan *chan;
188
189         chan = ctx->device->dma.chan_mem2hash;
190         dmaengine_terminate_all(chan);
191         dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
192                      ctx->device->dma.sg_len, DMA_TO_DEVICE);
193 }
194
195 static int hash_dma_write(struct hash_ctx *ctx,
196                           struct scatterlist *sg, int len)
197 {
198         int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
199         if (error) {
200                 dev_dbg(ctx->device->dev,
201                         "%s: hash_set_dma_transfer() failed\n", __func__);
202                 return error;
203         }
204
205         return len;
206 }
207
208 /**
209  * get_empty_message_digest - Returns a pre-calculated digest for
210  * the empty message.
211  * @device_data:        Structure for the hash device.
212  * @zero_hash:          Buffer to return the empty message digest.
213  * @zero_hash_size:     Hash size of the empty message digest.
214  * @zero_digest:        True if zero_digest returned.
215  */
216 static int get_empty_message_digest(
217                 struct hash_device_data *device_data,
218                 u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
219 {
220         int ret = 0;
221         struct hash_ctx *ctx = device_data->current_ctx;
222         *zero_digest = false;
223
224         /**
225          * Caller responsible for ctx != NULL.
226          */
227
228         if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
229                 if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
230                         memcpy(zero_hash, &sha1_zero_message_hash[0],
231                                SHA1_DIGEST_SIZE);
232                         *zero_hash_size = SHA1_DIGEST_SIZE;
233                         *zero_digest = true;
234                 } else if (HASH_ALGO_SHA256 ==
235                                 ctx->config.algorithm) {
236                         memcpy(zero_hash, &sha256_zero_message_hash[0],
237                                SHA256_DIGEST_SIZE);
238                         *zero_hash_size = SHA256_DIGEST_SIZE;
239                         *zero_digest = true;
240                 } else {
241                         dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
242                                 __func__);
243                         ret = -EINVAL;
244                         goto out;
245                 }
246         } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
247                 if (!ctx->keylen) {
248                         if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
249                                 memcpy(zero_hash, &zero_message_hmac_sha1[0],
250                                        SHA1_DIGEST_SIZE);
251                                 *zero_hash_size = SHA1_DIGEST_SIZE;
252                                 *zero_digest = true;
253                         } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
254                                 memcpy(zero_hash, &zero_message_hmac_sha256[0],
255                                        SHA256_DIGEST_SIZE);
256                                 *zero_hash_size = SHA256_DIGEST_SIZE;
257                                 *zero_digest = true;
258                         } else {
259                                 dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
260                                         __func__);
261                                 ret = -EINVAL;
262                                 goto out;
263                         }
264                 } else {
265                         dev_dbg(device_data->dev,
266                                 "%s: Continue hash calculation, since hmac key available\n",
267                                 __func__);
268                 }
269         }
270 out:
271
272         return ret;
273 }
274
275 /**
276  * hash_disable_power - Request to disable power and clock.
277  * @device_data:        Structure for the hash device.
278  * @save_device_state:  If true, saves the current hw state.
279  *
280  * This function request for disabling power (regulator) and clock,
281  * and could also save current hw state.
282  */
283 static int hash_disable_power(struct hash_device_data *device_data,
284                               bool save_device_state)
285 {
286         int ret = 0;
287         struct device *dev = device_data->dev;
288
289         spin_lock(&device_data->power_state_lock);
290         if (!device_data->power_state)
291                 goto out;
292
293         if (save_device_state) {
294                 hash_save_state(device_data,
295                                 &device_data->state);
296                 device_data->restore_dev_state = true;
297         }
298
299         clk_disable(device_data->clk);
300         ret = regulator_disable(device_data->regulator);
301         if (ret)
302                 dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
303
304         device_data->power_state = false;
305
306 out:
307         spin_unlock(&device_data->power_state_lock);
308
309         return ret;
310 }
311
312 /**
313  * hash_enable_power - Request to enable power and clock.
314  * @device_data:                Structure for the hash device.
315  * @restore_device_state:       If true, restores a previous saved hw state.
316  *
317  * This function request for enabling power (regulator) and clock,
318  * and could also restore a previously saved hw state.
319  */
320 static int hash_enable_power(struct hash_device_data *device_data,
321                              bool restore_device_state)
322 {
323         int ret = 0;
324         struct device *dev = device_data->dev;
325
326         spin_lock(&device_data->power_state_lock);
327         if (!device_data->power_state) {
328                 ret = regulator_enable(device_data->regulator);
329                 if (ret) {
330                         dev_err(dev, "%s: regulator_enable() failed!\n",
331                                 __func__);
332                         goto out;
333                 }
334                 ret = clk_enable(device_data->clk);
335                 if (ret) {
336                         dev_err(dev, "%s: clk_enable() failed!\n", __func__);
337                         ret = regulator_disable(
338                                         device_data->regulator);
339                         goto out;
340                 }
341                 device_data->power_state = true;
342         }
343
344         if (device_data->restore_dev_state) {
345                 if (restore_device_state) {
346                         device_data->restore_dev_state = false;
347                         hash_resume_state(device_data, &device_data->state);
348                 }
349         }
350 out:
351         spin_unlock(&device_data->power_state_lock);
352
353         return ret;
354 }
355
356 /**
357  * hash_get_device_data - Checks for an available hash device and return it.
358  * @hash_ctx:           Structure for the hash context.
359  * @device_data:        Structure for the hash device.
360  *
361  * This function check for an available hash device and return it to
362  * the caller.
363  * Note! Caller need to release the device, calling up().
364  */
365 static int hash_get_device_data(struct hash_ctx *ctx,
366                                 struct hash_device_data **device_data)
367 {
368         int                     ret;
369         struct klist_iter       device_iterator;
370         struct klist_node       *device_node;
371         struct hash_device_data *local_device_data = NULL;
372
373         /* Wait until a device is available */
374         ret = down_interruptible(&driver_data.device_allocation);
375         if (ret)
376                 return ret;  /* Interrupted */
377
378         /* Select a device */
379         klist_iter_init(&driver_data.device_list, &device_iterator);
380         device_node = klist_next(&device_iterator);
381         while (device_node) {
382                 local_device_data = container_of(device_node,
383                                            struct hash_device_data, list_node);
384                 spin_lock(&local_device_data->ctx_lock);
385                 /* current_ctx allocates a device, NULL = unallocated */
386                 if (local_device_data->current_ctx) {
387                         device_node = klist_next(&device_iterator);
388                 } else {
389                         local_device_data->current_ctx = ctx;
390                         ctx->device = local_device_data;
391                         spin_unlock(&local_device_data->ctx_lock);
392                         break;
393                 }
394                 spin_unlock(&local_device_data->ctx_lock);
395         }
396         klist_iter_exit(&device_iterator);
397
398         if (!device_node) {
399                 /**
400                  * No free device found.
401                  * Since we allocated a device with down_interruptible, this
402                  * should not be able to happen.
403                  * Number of available devices, which are contained in
404                  * device_allocation, is therefore decremented by not doing
405                  * an up(device_allocation).
406                  */
407                 return -EBUSY;
408         }
409
410         *device_data = local_device_data;
411
412         return 0;
413 }
414
415 /**
416  * hash_hw_write_key - Writes the key to the hardware registries.
417  *
418  * @device_data:        Structure for the hash device.
419  * @key:                Key to be written.
420  * @keylen:             The lengt of the key.
421  *
422  * Note! This function DOES NOT write to the NBLW registry, even though
423  * specified in the the hw design spec. Either due to incorrect info in the
424  * spec or due to a bug in the hw.
425  */
426 static void hash_hw_write_key(struct hash_device_data *device_data,
427                               const u8 *key, unsigned int keylen)
428 {
429         u32 word = 0;
430         int nwords = 1;
431
432         HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
433
434         while (keylen >= 4) {
435                 u32 *key_word = (u32 *)key;
436
437                 HASH_SET_DIN(key_word, nwords);
438                 keylen -= 4;
439                 key += 4;
440         }
441
442         /* Take care of the remaining bytes in the last word */
443         if (keylen) {
444                 word = 0;
445                 while (keylen) {
446                         word |= (key[keylen - 1] << (8 * (keylen - 1)));
447                         keylen--;
448                 }
449
450                 HASH_SET_DIN(&word, nwords);
451         }
452
453         while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
454                 cpu_relax();
455
456         HASH_SET_DCAL;
457
458         while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
459                 cpu_relax();
460 }
461
462 /**
463  * init_hash_hw - Initialise the hash hardware for a new calculation.
464  * @device_data:        Structure for the hash device.
465  * @ctx:                The hash context.
466  *
467  * This function will enable the bits needed to clear and start a new
468  * calculation.
469  */
470 static int init_hash_hw(struct hash_device_data *device_data,
471                         struct hash_ctx *ctx)
472 {
473         int ret = 0;
474
475         ret = hash_setconfiguration(device_data, &ctx->config);
476         if (ret) {
477                 dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
478                         __func__);
479                 return ret;
480         }
481
482         hash_begin(device_data, ctx);
483
484         if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
485                 hash_hw_write_key(device_data, ctx->key, ctx->keylen);
486
487         return ret;
488 }
489
490 /**
491  * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
492  *
493  * @sg:         Scatterlist.
494  * @size:       Size in bytes.
495  * @aligned:    True if sg data aligned to work in DMA mode.
496  *
497  */
498 static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
499 {
500         int nents = 0;
501         bool aligned_data = true;
502
503         while (size > 0 && sg) {
504                 nents++;
505                 size -= sg->length;
506
507                 /* hash_set_dma_transfer will align last nent */
508                 if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
509                     (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
510                         aligned_data = false;
511
512                 sg = sg_next(sg);
513         }
514
515         if (aligned)
516                 *aligned = aligned_data;
517
518         if (size != 0)
519                 return -EFAULT;
520
521         return nents;
522 }
523
524 /**
525  * hash_dma_valid_data - checks for dma valid sg data.
526  * @sg:         Scatterlist.
527  * @datasize:   Datasize in bytes.
528  *
529  * NOTE! This function checks for dma valid sg data, since dma
530  * only accept datasizes of even wordsize.
531  */
532 static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
533 {
534         bool aligned;
535
536         /* Need to include at least one nent, else error */
537         if (hash_get_nents(sg, datasize, &aligned) < 1)
538                 return false;
539
540         return aligned;
541 }
542
543 /**
544  * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
545  * @req: The hash request for the job.
546  *
547  * Initialize structures.
548  */
549 static int ux500_hash_init(struct ahash_request *req)
550 {
551         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
552         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
553         struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
554
555         if (!ctx->key)
556                 ctx->keylen = 0;
557
558         memset(&req_ctx->state, 0, sizeof(struct hash_state));
559         req_ctx->updated = 0;
560         if (hash_mode == HASH_MODE_DMA) {
561                 if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
562                         req_ctx->dma_mode = false; /* Don't use DMA */
563
564                         pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
565                                  __func__, HASH_DMA_ALIGN_SIZE);
566                 } else {
567                         if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
568                             hash_dma_valid_data(req->src, req->nbytes)) {
569                                 req_ctx->dma_mode = true;
570                         } else {
571                                 req_ctx->dma_mode = false;
572                                 pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
573                                          __func__,
574                                          HASH_DMA_PERFORMANCE_MIN_SIZE);
575                         }
576                 }
577         }
578         return 0;
579 }
580
581 /**
582  * hash_processblock - This function processes a single block of 512 bits (64
583  *                     bytes), word aligned, starting at message.
584  * @device_data:        Structure for the hash device.
585  * @message:            Block (512 bits) of message to be written to
586  *                      the HASH hardware.
587  *
588  */
589 static void hash_processblock(struct hash_device_data *device_data,
590                               const u32 *message, int length)
591 {
592         int len = length / HASH_BYTES_PER_WORD;
593         /*
594          * NBLW bits. Reset the number of bits in last word (NBLW).
595          */
596         HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
597
598         /*
599          * Write message data to the HASH_DIN register.
600          */
601         HASH_SET_DIN(message, len);
602 }
603
604 /**
605  * hash_messagepad - Pads a message and write the nblw bits.
606  * @device_data:        Structure for the hash device.
607  * @message:            Last word of a message.
608  * @index_bytes:        The number of bytes in the last message.
609  *
610  * This function manages the final part of the digest calculation, when less
611  * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
612  *
613  */
614 static void hash_messagepad(struct hash_device_data *device_data,
615                             const u32 *message, u8 index_bytes)
616 {
617         int nwords = 1;
618
619         /*
620          * Clear hash str register, only clear NBLW
621          * since DCAL will be reset by hardware.
622          */
623         HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
624
625         /* Main loop */
626         while (index_bytes >= 4) {
627                 HASH_SET_DIN(message, nwords);
628                 index_bytes -= 4;
629                 message++;
630         }
631
632         if (index_bytes)
633                 HASH_SET_DIN(message, nwords);
634
635         while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
636                 cpu_relax();
637
638         /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
639         HASH_SET_NBLW(index_bytes * 8);
640         dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
641                 __func__, readl_relaxed(&device_data->base->din),
642                 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
643         HASH_SET_DCAL;
644         dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
645                 __func__, readl_relaxed(&device_data->base->din),
646                 readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
647
648         while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
649                 cpu_relax();
650 }
651
652 /**
653  * hash_incrementlength - Increments the length of the current message.
654  * @ctx: Hash context
655  * @incr: Length of message processed already
656  *
657  * Overflow cannot occur, because conditions for overflow are checked in
658  * hash_hw_update.
659  */
660 static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
661 {
662         ctx->state.length.low_word += incr;
663
664         /* Check for wrap-around */
665         if (ctx->state.length.low_word < incr)
666                 ctx->state.length.high_word++;
667 }
668
669 /**
670  * hash_setconfiguration - Sets the required configuration for the hash
671  *                         hardware.
672  * @device_data:        Structure for the hash device.
673  * @config:             Pointer to a configuration structure.
674  */
675 int hash_setconfiguration(struct hash_device_data *device_data,
676                           struct hash_config *config)
677 {
678         int ret = 0;
679
680         if (config->algorithm != HASH_ALGO_SHA1 &&
681             config->algorithm != HASH_ALGO_SHA256)
682                 return -EPERM;
683
684         /*
685          * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
686          * to be written to HASH_DIN is considered as 32 bits.
687          */
688         HASH_SET_DATA_FORMAT(config->data_format);
689
690         /*
691          * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
692          */
693         switch (config->algorithm) {
694         case HASH_ALGO_SHA1:
695                 HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
696                 break;
697
698         case HASH_ALGO_SHA256:
699                 HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
700                 break;
701
702         default:
703                 dev_err(device_data->dev, "%s: Incorrect algorithm\n",
704                         __func__);
705                 return -EPERM;
706         }
707
708         /*
709          * MODE bit. This bit selects between HASH or HMAC mode for the
710          * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
711          */
712         if (HASH_OPER_MODE_HASH == config->oper_mode)
713                 HASH_CLEAR_BITS(&device_data->base->cr,
714                                 HASH_CR_MODE_MASK);
715         else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
716                 HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
717                 if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
718                         /* Truncate key to blocksize */
719                         dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
720                         HASH_SET_BITS(&device_data->base->cr,
721                                       HASH_CR_LKEY_MASK);
722                 } else {
723                         dev_dbg(device_data->dev, "%s: LKEY cleared\n",
724                                 __func__);
725                         HASH_CLEAR_BITS(&device_data->base->cr,
726                                         HASH_CR_LKEY_MASK);
727                 }
728         } else {        /* Wrong hash mode */
729                 ret = -EPERM;
730                 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
731                         __func__);
732         }
733         return ret;
734 }
735
736 /**
737  * hash_begin - This routine resets some globals and initializes the hash
738  *              hardware.
739  * @device_data:        Structure for the hash device.
740  * @ctx:                Hash context.
741  */
742 void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
743 {
744         /* HW and SW initializations */
745         /* Note: there is no need to initialize buffer and digest members */
746
747         while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
748                 cpu_relax();
749
750         /*
751          * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
752          * prepare the initialize the HASH accelerator to compute the message
753          * digest of a new message.
754          */
755         HASH_INITIALIZE;
756
757         /*
758          * NBLW bits. Reset the number of bits in last word (NBLW).
759          */
760         HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
761 }
762
763 static int hash_process_data(struct hash_device_data *device_data,
764                              struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
765                              int msg_length, u8 *data_buffer, u8 *buffer,
766                              u8 *index)
767 {
768         int ret = 0;
769         u32 count;
770
771         do {
772                 if ((*index + msg_length) < HASH_BLOCK_SIZE) {
773                         for (count = 0; count < msg_length; count++) {
774                                 buffer[*index + count] =
775                                         *(data_buffer + count);
776                         }
777                         *index += msg_length;
778                         msg_length = 0;
779                 } else {
780                         if (req_ctx->updated) {
781                                 ret = hash_resume_state(device_data,
782                                                 &device_data->state);
783                                 memmove(req_ctx->state.buffer,
784                                         device_data->state.buffer,
785                                         HASH_BLOCK_SIZE);
786                                 if (ret) {
787                                         dev_err(device_data->dev,
788                                                 "%s: hash_resume_state() failed!\n",
789                                                 __func__);
790                                         goto out;
791                                 }
792                         } else {
793                                 ret = init_hash_hw(device_data, ctx);
794                                 if (ret) {
795                                         dev_err(device_data->dev,
796                                                 "%s: init_hash_hw() failed!\n",
797                                                 __func__);
798                                         goto out;
799                                 }
800                                 req_ctx->updated = 1;
801                         }
802                         /*
803                          * If 'data_buffer' is four byte aligned and
804                          * local buffer does not have any data, we can
805                          * write data directly from 'data_buffer' to
806                          * HW peripheral, otherwise we first copy data
807                          * to a local buffer
808                          */
809                         if (IS_ALIGNED((unsigned long)data_buffer, 4) &&
810                             (0 == *index))
811                                 hash_processblock(device_data,
812                                                   (const u32 *)data_buffer,
813                                                   HASH_BLOCK_SIZE);
814                         else {
815                                 for (count = 0;
816                                      count < (u32)(HASH_BLOCK_SIZE - *index);
817                                      count++) {
818                                         buffer[*index + count] =
819                                                 *(data_buffer + count);
820                                 }
821                                 hash_processblock(device_data,
822                                                   (const u32 *)buffer,
823                                                   HASH_BLOCK_SIZE);
824                         }
825                         hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
826                         data_buffer += (HASH_BLOCK_SIZE - *index);
827
828                         msg_length -= (HASH_BLOCK_SIZE - *index);
829                         *index = 0;
830
831                         ret = hash_save_state(device_data,
832                                         &device_data->state);
833
834                         memmove(device_data->state.buffer,
835                                 req_ctx->state.buffer,
836                                 HASH_BLOCK_SIZE);
837                         if (ret) {
838                                 dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
839                                         __func__);
840                                 goto out;
841                         }
842                 }
843         } while (msg_length != 0);
844 out:
845
846         return ret;
847 }
848
849 /**
850  * hash_dma_final - The hash dma final function for SHA1/SHA256.
851  * @req:        The hash request for the job.
852  */
853 static int hash_dma_final(struct ahash_request *req)
854 {
855         int ret = 0;
856         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
857         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
858         struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
859         struct hash_device_data *device_data;
860         u8 digest[SHA256_DIGEST_SIZE];
861         int bytes_written = 0;
862
863         ret = hash_get_device_data(ctx, &device_data);
864         if (ret)
865                 return ret;
866
867         dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
868                 (unsigned long)ctx);
869
870         if (req_ctx->updated) {
871                 ret = hash_resume_state(device_data, &device_data->state);
872
873                 if (ret) {
874                         dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
875                                 __func__);
876                         goto out;
877                 }
878         }
879
880         if (!req_ctx->updated) {
881                 ret = hash_setconfiguration(device_data, &ctx->config);
882                 if (ret) {
883                         dev_err(device_data->dev,
884                                 "%s: hash_setconfiguration() failed!\n",
885                                 __func__);
886                         goto out;
887                 }
888
889                 /* Enable DMA input */
890                 if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
891                         HASH_CLEAR_BITS(&device_data->base->cr,
892                                         HASH_CR_DMAE_MASK);
893                 } else {
894                         HASH_SET_BITS(&device_data->base->cr,
895                                       HASH_CR_DMAE_MASK);
896                         HASH_SET_BITS(&device_data->base->cr,
897                                       HASH_CR_PRIVN_MASK);
898                 }
899
900                 HASH_INITIALIZE;
901
902                 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
903                         hash_hw_write_key(device_data, ctx->key, ctx->keylen);
904
905                 /* Number of bits in last word = (nbytes * 8) % 32 */
906                 HASH_SET_NBLW((req->nbytes * 8) % 32);
907                 req_ctx->updated = 1;
908         }
909
910         /* Store the nents in the dma struct. */
911         ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
912         if (!ctx->device->dma.nents) {
913                 dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
914                         __func__);
915                 ret = ctx->device->dma.nents;
916                 goto out;
917         }
918
919         bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
920         if (bytes_written != req->nbytes) {
921                 dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
922                         __func__);
923                 ret = bytes_written;
924                 goto out;
925         }
926
927         wait_for_completion(&ctx->device->dma.complete);
928         hash_dma_done(ctx);
929
930         while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
931                 cpu_relax();
932
933         if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
934                 unsigned int keylen = ctx->keylen;
935                 u8 *key = ctx->key;
936
937                 dev_dbg(device_data->dev, "%s: keylen: %d\n",
938                         __func__, ctx->keylen);
939                 hash_hw_write_key(device_data, key, keylen);
940         }
941
942         hash_get_digest(device_data, digest, ctx->config.algorithm);
943         memcpy(req->result, digest, ctx->digestsize);
944
945 out:
946         release_hash_device(device_data);
947
948         /**
949          * Allocated in setkey, and only used in HMAC.
950          */
951         kfree(ctx->key);
952
953         return ret;
954 }
955
956 /**
957  * hash_hw_final - The final hash calculation function
958  * @req:        The hash request for the job.
959  */
960 static int hash_hw_final(struct ahash_request *req)
961 {
962         int ret = 0;
963         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
964         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
965         struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
966         struct hash_device_data *device_data;
967         u8 digest[SHA256_DIGEST_SIZE];
968
969         ret = hash_get_device_data(ctx, &device_data);
970         if (ret)
971                 return ret;
972
973         dev_dbg(device_data->dev, "%s: (ctx=0x%lx)!\n", __func__,
974                 (unsigned long)ctx);
975
976         if (req_ctx->updated) {
977                 ret = hash_resume_state(device_data, &device_data->state);
978
979                 if (ret) {
980                         dev_err(device_data->dev,
981                                 "%s: hash_resume_state() failed!\n", __func__);
982                         goto out;
983                 }
984         } else if (req->nbytes == 0 && ctx->keylen == 0) {
985                 u8 zero_hash[SHA256_DIGEST_SIZE];
986                 u32 zero_hash_size = 0;
987                 bool zero_digest = false;
988                 /**
989                  * Use a pre-calculated empty message digest
990                  * (workaround since hw return zeroes, hw bug!?)
991                  */
992                 ret = get_empty_message_digest(device_data, &zero_hash[0],
993                                 &zero_hash_size, &zero_digest);
994                 if (!ret && likely(zero_hash_size == ctx->digestsize) &&
995                     zero_digest) {
996                         memcpy(req->result, &zero_hash[0], ctx->digestsize);
997                         goto out;
998                 } else if (!ret && !zero_digest) {
999                         dev_dbg(device_data->dev,
1000                                 "%s: HMAC zero msg with key, continue...\n",
1001                                 __func__);
1002                 } else {
1003                         dev_err(device_data->dev,
1004                                 "%s: ret=%d, or wrong digest size? %s\n",
1005                                 __func__, ret,
1006                                 zero_hash_size == ctx->digestsize ?
1007                                 "true" : "false");
1008                         /* Return error */
1009                         goto out;
1010                 }
1011         } else if (req->nbytes == 0 && ctx->keylen > 0) {
1012                 dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
1013                         __func__);
1014                 goto out;
1015         }
1016
1017         if (!req_ctx->updated) {
1018                 ret = init_hash_hw(device_data, ctx);
1019                 if (ret) {
1020                         dev_err(device_data->dev,
1021                                 "%s: init_hash_hw() failed!\n", __func__);
1022                         goto out;
1023                 }
1024         }
1025
1026         if (req_ctx->state.index) {
1027                 hash_messagepad(device_data, req_ctx->state.buffer,
1028                                 req_ctx->state.index);
1029         } else {
1030                 HASH_SET_DCAL;
1031                 while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1032                         cpu_relax();
1033         }
1034
1035         if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
1036                 unsigned int keylen = ctx->keylen;
1037                 u8 *key = ctx->key;
1038
1039                 dev_dbg(device_data->dev, "%s: keylen: %d\n",
1040                         __func__, ctx->keylen);
1041                 hash_hw_write_key(device_data, key, keylen);
1042         }
1043
1044         hash_get_digest(device_data, digest, ctx->config.algorithm);
1045         memcpy(req->result, digest, ctx->digestsize);
1046
1047 out:
1048         release_hash_device(device_data);
1049
1050         /**
1051          * Allocated in setkey, and only used in HMAC.
1052          */
1053         kfree(ctx->key);
1054
1055         return ret;
1056 }
1057
1058 /**
1059  * hash_hw_update - Updates current HASH computation hashing another part of
1060  *                  the message.
1061  * @req:        Byte array containing the message to be hashed (caller
1062  *              allocated).
1063  */
1064 int hash_hw_update(struct ahash_request *req)
1065 {
1066         int ret = 0;
1067         u8 index = 0;
1068         u8 *buffer;
1069         struct hash_device_data *device_data;
1070         u8 *data_buffer;
1071         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1072         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1073         struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1074         struct crypto_hash_walk walk;
1075         int msg_length;
1076
1077         index = req_ctx->state.index;
1078         buffer = (u8 *)req_ctx->state.buffer;
1079
1080         ret = hash_get_device_data(ctx, &device_data);
1081         if (ret)
1082                 return ret;
1083
1084         msg_length = crypto_hash_walk_first(req, &walk);
1085
1086         /* Empty message ("") is correct indata */
1087         if (msg_length == 0) {
1088                 ret = 0;
1089                 goto release_dev;
1090         }
1091
1092         /* Check if ctx->state.length + msg_length
1093            overflows */
1094         if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
1095             HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
1096                 pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
1097                 ret = crypto_hash_walk_done(&walk, -EPERM);
1098                 goto release_dev;
1099         }
1100
1101         /* Main loop */
1102         while (0 != msg_length) {
1103                 data_buffer = walk.data;
1104                 ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
1105                                 data_buffer, buffer, &index);
1106
1107                 if (ret) {
1108                         dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
1109                                 __func__);
1110                         crypto_hash_walk_done(&walk, ret);
1111                         goto release_dev;
1112                 }
1113
1114                 msg_length = crypto_hash_walk_done(&walk, 0);
1115         }
1116
1117         req_ctx->state.index = index;
1118         dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
1119                 __func__, req_ctx->state.index, req_ctx->state.bit_index);
1120
1121 release_dev:
1122         release_hash_device(device_data);
1123
1124         return ret;
1125 }
1126
1127 /**
1128  * hash_resume_state - Function that resumes the state of an calculation.
1129  * @device_data:        Pointer to the device structure.
1130  * @device_state:       The state to be restored in the hash hardware
1131  */
1132 int hash_resume_state(struct hash_device_data *device_data,
1133                       const struct hash_state *device_state)
1134 {
1135         u32 temp_cr;
1136         s32 count;
1137         int hash_mode = HASH_OPER_MODE_HASH;
1138
1139         if (NULL == device_state) {
1140                 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1141                         __func__);
1142                 return -EPERM;
1143         }
1144
1145         /* Check correctness of index and length members */
1146         if (device_state->index > HASH_BLOCK_SIZE ||
1147             (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
1148                 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1149                         __func__);
1150                 return -EPERM;
1151         }
1152
1153         /*
1154          * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
1155          * prepare the initialize the HASH accelerator to compute the message
1156          * digest of a new message.
1157          */
1158         HASH_INITIALIZE;
1159
1160         temp_cr = device_state->temp_cr;
1161         writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
1162
1163         if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1164                 hash_mode = HASH_OPER_MODE_HMAC;
1165         else
1166                 hash_mode = HASH_OPER_MODE_HASH;
1167
1168         for (count = 0; count < HASH_CSR_COUNT; count++) {
1169                 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1170                         break;
1171
1172                 writel_relaxed(device_state->csr[count],
1173                                &device_data->base->csrx[count]);
1174         }
1175
1176         writel_relaxed(device_state->csfull, &device_data->base->csfull);
1177         writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
1178
1179         writel_relaxed(device_state->str_reg, &device_data->base->str);
1180         writel_relaxed(temp_cr, &device_data->base->cr);
1181
1182         return 0;
1183 }
1184
1185 /**
1186  * hash_save_state - Function that saves the state of hardware.
1187  * @device_data:        Pointer to the device structure.
1188  * @device_state:       The strucure where the hardware state should be saved.
1189  */
1190 int hash_save_state(struct hash_device_data *device_data,
1191                     struct hash_state *device_state)
1192 {
1193         u32 temp_cr;
1194         u32 count;
1195         int hash_mode = HASH_OPER_MODE_HASH;
1196
1197         if (NULL == device_state) {
1198                 dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
1199                         __func__);
1200                 return -ENOTSUPP;
1201         }
1202
1203         /* Write dummy value to force digest intermediate calculation. This
1204          * actually makes sure that there isn't any ongoing calculation in the
1205          * hardware.
1206          */
1207         while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
1208                 cpu_relax();
1209
1210         temp_cr = readl_relaxed(&device_data->base->cr);
1211
1212         device_state->str_reg = readl_relaxed(&device_data->base->str);
1213
1214         device_state->din_reg = readl_relaxed(&device_data->base->din);
1215
1216         if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
1217                 hash_mode = HASH_OPER_MODE_HMAC;
1218         else
1219                 hash_mode = HASH_OPER_MODE_HASH;
1220
1221         for (count = 0; count < HASH_CSR_COUNT; count++) {
1222                 if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
1223                         break;
1224
1225                 device_state->csr[count] =
1226                         readl_relaxed(&device_data->base->csrx[count]);
1227         }
1228
1229         device_state->csfull = readl_relaxed(&device_data->base->csfull);
1230         device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
1231
1232         device_state->temp_cr = temp_cr;
1233
1234         return 0;
1235 }
1236
1237 /**
1238  * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
1239  * @device_data:
1240  *
1241  */
1242 int hash_check_hw(struct hash_device_data *device_data)
1243 {
1244         /* Checking Peripheral Ids  */
1245         if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
1246             HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
1247             HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
1248             HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
1249             HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
1250             HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
1251             HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
1252             HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
1253                 return 0;
1254         }
1255
1256         dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
1257         return -ENOTSUPP;
1258 }
1259
1260 /**
1261  * hash_get_digest - Gets the digest.
1262  * @device_data:        Pointer to the device structure.
1263  * @digest:             User allocated byte array for the calculated digest.
1264  * @algorithm:          The algorithm in use.
1265  */
1266 void hash_get_digest(struct hash_device_data *device_data,
1267                      u8 *digest, int algorithm)
1268 {
1269         u32 temp_hx_val, count;
1270         int loop_ctr;
1271
1272         if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
1273                 dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
1274                         __func__, algorithm);
1275                 return;
1276         }
1277
1278         if (algorithm == HASH_ALGO_SHA1)
1279                 loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
1280         else
1281                 loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
1282
1283         dev_dbg(device_data->dev, "%s: digest array:(0x%lx)\n",
1284                 __func__, (unsigned long)digest);
1285
1286         /* Copy result into digest array */
1287         for (count = 0; count < loop_ctr; count++) {
1288                 temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
1289                 digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
1290                 digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
1291                 digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
1292                 digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
1293         }
1294 }
1295
1296 /**
1297  * hash_update - The hash update function for SHA1/SHA2 (SHA256).
1298  * @req: The hash request for the job.
1299  */
1300 static int ahash_update(struct ahash_request *req)
1301 {
1302         int ret = 0;
1303         struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1304
1305         if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
1306                 ret = hash_hw_update(req);
1307         /* Skip update for DMA, all data will be passed to DMA in final */
1308
1309         if (ret) {
1310                 pr_err("%s: hash_hw_update() failed!\n", __func__);
1311         }
1312
1313         return ret;
1314 }
1315
1316 /**
1317  * hash_final - The hash final function for SHA1/SHA2 (SHA256).
1318  * @req:        The hash request for the job.
1319  */
1320 static int ahash_final(struct ahash_request *req)
1321 {
1322         int ret = 0;
1323         struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
1324
1325         pr_debug("%s: data size: %d\n", __func__, req->nbytes);
1326
1327         if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
1328                 ret = hash_dma_final(req);
1329         else
1330                 ret = hash_hw_final(req);
1331
1332         if (ret) {
1333                 pr_err("%s: hash_hw/dma_final() failed\n", __func__);
1334         }
1335
1336         return ret;
1337 }
1338
1339 static int hash_setkey(struct crypto_ahash *tfm,
1340                        const u8 *key, unsigned int keylen, int alg)
1341 {
1342         int ret = 0;
1343         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1344
1345         /**
1346          * Freed in final.
1347          */
1348         ctx->key = kmemdup(key, keylen, GFP_KERNEL);
1349         if (!ctx->key) {
1350                 pr_err("%s: Failed to allocate ctx->key for %d\n",
1351                        __func__, alg);
1352                 return -ENOMEM;
1353         }
1354         ctx->keylen = keylen;
1355
1356         return ret;
1357 }
1358
1359 static int ahash_sha1_init(struct ahash_request *req)
1360 {
1361         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1362         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1363
1364         ctx->config.data_format = HASH_DATA_8_BITS;
1365         ctx->config.algorithm = HASH_ALGO_SHA1;
1366         ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1367         ctx->digestsize = SHA1_DIGEST_SIZE;
1368
1369         return ux500_hash_init(req);
1370 }
1371
1372 static int ahash_sha256_init(struct ahash_request *req)
1373 {
1374         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1375         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1376
1377         ctx->config.data_format = HASH_DATA_8_BITS;
1378         ctx->config.algorithm = HASH_ALGO_SHA256;
1379         ctx->config.oper_mode = HASH_OPER_MODE_HASH;
1380         ctx->digestsize = SHA256_DIGEST_SIZE;
1381
1382         return ux500_hash_init(req);
1383 }
1384
1385 static int ahash_sha1_digest(struct ahash_request *req)
1386 {
1387         int ret2, ret1;
1388
1389         ret1 = ahash_sha1_init(req);
1390         if (ret1)
1391                 goto out;
1392
1393         ret1 = ahash_update(req);
1394         ret2 = ahash_final(req);
1395
1396 out:
1397         return ret1 ? ret1 : ret2;
1398 }
1399
1400 static int ahash_sha256_digest(struct ahash_request *req)
1401 {
1402         int ret2, ret1;
1403
1404         ret1 = ahash_sha256_init(req);
1405         if (ret1)
1406                 goto out;
1407
1408         ret1 = ahash_update(req);
1409         ret2 = ahash_final(req);
1410
1411 out:
1412         return ret1 ? ret1 : ret2;
1413 }
1414
1415 static int ahash_noimport(struct ahash_request *req, const void *in)
1416 {
1417         return -ENOSYS;
1418 }
1419
1420 static int ahash_noexport(struct ahash_request *req, void *out)
1421 {
1422         return -ENOSYS;
1423 }
1424
1425 static int hmac_sha1_init(struct ahash_request *req)
1426 {
1427         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1428         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1429
1430         ctx->config.data_format = HASH_DATA_8_BITS;
1431         ctx->config.algorithm   = HASH_ALGO_SHA1;
1432         ctx->config.oper_mode   = HASH_OPER_MODE_HMAC;
1433         ctx->digestsize         = SHA1_DIGEST_SIZE;
1434
1435         return ux500_hash_init(req);
1436 }
1437
1438 static int hmac_sha256_init(struct ahash_request *req)
1439 {
1440         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1441         struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
1442
1443         ctx->config.data_format = HASH_DATA_8_BITS;
1444         ctx->config.algorithm   = HASH_ALGO_SHA256;
1445         ctx->config.oper_mode   = HASH_OPER_MODE_HMAC;
1446         ctx->digestsize         = SHA256_DIGEST_SIZE;
1447
1448         return ux500_hash_init(req);
1449 }
1450
1451 static int hmac_sha1_digest(struct ahash_request *req)
1452 {
1453         int ret2, ret1;
1454
1455         ret1 = hmac_sha1_init(req);
1456         if (ret1)
1457                 goto out;
1458
1459         ret1 = ahash_update(req);
1460         ret2 = ahash_final(req);
1461
1462 out:
1463         return ret1 ? ret1 : ret2;
1464 }
1465
1466 static int hmac_sha256_digest(struct ahash_request *req)
1467 {
1468         int ret2, ret1;
1469
1470         ret1 = hmac_sha256_init(req);
1471         if (ret1)
1472                 goto out;
1473
1474         ret1 = ahash_update(req);
1475         ret2 = ahash_final(req);
1476
1477 out:
1478         return ret1 ? ret1 : ret2;
1479 }
1480
1481 static int hmac_sha1_setkey(struct crypto_ahash *tfm,
1482                             const u8 *key, unsigned int keylen)
1483 {
1484         return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
1485 }
1486
1487 static int hmac_sha256_setkey(struct crypto_ahash *tfm,
1488                               const u8 *key, unsigned int keylen)
1489 {
1490         return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
1491 }
1492
1493 struct hash_algo_template {
1494         struct hash_config conf;
1495         struct ahash_alg hash;
1496 };
1497
1498 static int hash_cra_init(struct crypto_tfm *tfm)
1499 {
1500         struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
1501         struct crypto_alg *alg = tfm->__crt_alg;
1502         struct hash_algo_template *hash_alg;
1503
1504         hash_alg = container_of(__crypto_ahash_alg(alg),
1505                         struct hash_algo_template,
1506                         hash);
1507
1508         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1509                                  sizeof(struct hash_req_ctx));
1510
1511         ctx->config.data_format = HASH_DATA_8_BITS;
1512         ctx->config.algorithm = hash_alg->conf.algorithm;
1513         ctx->config.oper_mode = hash_alg->conf.oper_mode;
1514
1515         ctx->digestsize = hash_alg->hash.halg.digestsize;
1516
1517         return 0;
1518 }
1519
1520 static struct hash_algo_template hash_algs[] = {
1521         {
1522                 .conf.algorithm = HASH_ALGO_SHA1,
1523                 .conf.oper_mode = HASH_OPER_MODE_HASH,
1524                 .hash = {
1525                         .init = ux500_hash_init,
1526                         .update = ahash_update,
1527                         .final = ahash_final,
1528                         .digest = ahash_sha1_digest,
1529                         .export = ahash_noexport,
1530                         .import = ahash_noimport,
1531                         .halg.digestsize = SHA1_DIGEST_SIZE,
1532                         .halg.statesize = sizeof(struct hash_ctx),
1533                         .halg.base = {
1534                                 .cra_name = "sha1",
1535                                 .cra_driver_name = "sha1-ux500",
1536                                 .cra_flags = CRYPTO_ALG_ASYNC,
1537                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1538                                 .cra_ctxsize = sizeof(struct hash_ctx),
1539                                 .cra_init = hash_cra_init,
1540                                 .cra_module = THIS_MODULE,
1541                         }
1542                 }
1543         },
1544         {
1545                 .conf.algorithm = HASH_ALGO_SHA256,
1546                 .conf.oper_mode = HASH_OPER_MODE_HASH,
1547                 .hash = {
1548                         .init = ux500_hash_init,
1549                         .update = ahash_update,
1550                         .final = ahash_final,
1551                         .digest = ahash_sha256_digest,
1552                         .export = ahash_noexport,
1553                         .import = ahash_noimport,
1554                         .halg.digestsize = SHA256_DIGEST_SIZE,
1555                         .halg.statesize = sizeof(struct hash_ctx),
1556                         .halg.base = {
1557                                 .cra_name = "sha256",
1558                                 .cra_driver_name = "sha256-ux500",
1559                                 .cra_flags = CRYPTO_ALG_ASYNC,
1560                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1561                                 .cra_ctxsize = sizeof(struct hash_ctx),
1562                                 .cra_init = hash_cra_init,
1563                                 .cra_module = THIS_MODULE,
1564                         }
1565                 }
1566         },
1567         {
1568                 .conf.algorithm = HASH_ALGO_SHA1,
1569                 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1570                         .hash = {
1571                         .init = ux500_hash_init,
1572                         .update = ahash_update,
1573                         .final = ahash_final,
1574                         .digest = hmac_sha1_digest,
1575                         .setkey = hmac_sha1_setkey,
1576                         .export = ahash_noexport,
1577                         .import = ahash_noimport,
1578                         .halg.digestsize = SHA1_DIGEST_SIZE,
1579                         .halg.statesize = sizeof(struct hash_ctx),
1580                         .halg.base = {
1581                                 .cra_name = "hmac(sha1)",
1582                                 .cra_driver_name = "hmac-sha1-ux500",
1583                                 .cra_flags = CRYPTO_ALG_ASYNC,
1584                                 .cra_blocksize = SHA1_BLOCK_SIZE,
1585                                 .cra_ctxsize = sizeof(struct hash_ctx),
1586                                 .cra_init = hash_cra_init,
1587                                 .cra_module = THIS_MODULE,
1588                         }
1589                 }
1590         },
1591         {
1592                 .conf.algorithm = HASH_ALGO_SHA256,
1593                 .conf.oper_mode = HASH_OPER_MODE_HMAC,
1594                 .hash = {
1595                         .init = ux500_hash_init,
1596                         .update = ahash_update,
1597                         .final = ahash_final,
1598                         .digest = hmac_sha256_digest,
1599                         .setkey = hmac_sha256_setkey,
1600                         .export = ahash_noexport,
1601                         .import = ahash_noimport,
1602                         .halg.digestsize = SHA256_DIGEST_SIZE,
1603                         .halg.statesize = sizeof(struct hash_ctx),
1604                         .halg.base = {
1605                                 .cra_name = "hmac(sha256)",
1606                                 .cra_driver_name = "hmac-sha256-ux500",
1607                                 .cra_flags = CRYPTO_ALG_ASYNC,
1608                                 .cra_blocksize = SHA256_BLOCK_SIZE,
1609                                 .cra_ctxsize = sizeof(struct hash_ctx),
1610                                 .cra_init = hash_cra_init,
1611                                 .cra_module = THIS_MODULE,
1612                         }
1613                 }
1614         }
1615 };
1616
1617 /**
1618  * hash_algs_register_all -
1619  */
1620 static int ahash_algs_register_all(struct hash_device_data *device_data)
1621 {
1622         int ret;
1623         int i;
1624         int count;
1625
1626         for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
1627                 ret = crypto_register_ahash(&hash_algs[i].hash);
1628                 if (ret) {
1629                         count = i;
1630                         dev_err(device_data->dev, "%s: alg registration failed\n",
1631                                 hash_algs[i].hash.halg.base.cra_driver_name);
1632                         goto unreg;
1633                 }
1634         }
1635         return 0;
1636 unreg:
1637         for (i = 0; i < count; i++)
1638                 crypto_unregister_ahash(&hash_algs[i].hash);
1639         return ret;
1640 }
1641
1642 /**
1643  * hash_algs_unregister_all -
1644  */
1645 static void ahash_algs_unregister_all(struct hash_device_data *device_data)
1646 {
1647         int i;
1648
1649         for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
1650                 crypto_unregister_ahash(&hash_algs[i].hash);
1651 }
1652
1653 /**
1654  * ux500_hash_probe - Function that probes the hash hardware.
1655  * @pdev: The platform device.
1656  */
1657 static int ux500_hash_probe(struct platform_device *pdev)
1658 {
1659         int                     ret = 0;
1660         struct resource         *res = NULL;
1661         struct hash_device_data *device_data;
1662         struct device           *dev = &pdev->dev;
1663
1664         device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
1665         if (!device_data) {
1666                 ret = -ENOMEM;
1667                 goto out;
1668         }
1669
1670         device_data->dev = dev;
1671         device_data->current_ctx = NULL;
1672
1673         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1674         if (!res) {
1675                 dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
1676                 ret = -ENODEV;
1677                 goto out;
1678         }
1679
1680         device_data->phybase = res->start;
1681         device_data->base = devm_ioremap_resource(dev, res);
1682         if (IS_ERR(device_data->base)) {
1683                 dev_err(dev, "%s: ioremap() failed!\n", __func__);
1684                 ret = PTR_ERR(device_data->base);
1685                 goto out;
1686         }
1687         spin_lock_init(&device_data->ctx_lock);
1688         spin_lock_init(&device_data->power_state_lock);
1689
1690         /* Enable power for HASH1 hardware block */
1691         device_data->regulator = regulator_get(dev, "v-ape");
1692         if (IS_ERR(device_data->regulator)) {
1693                 dev_err(dev, "%s: regulator_get() failed!\n", __func__);
1694                 ret = PTR_ERR(device_data->regulator);
1695                 device_data->regulator = NULL;
1696                 goto out;
1697         }
1698
1699         /* Enable the clock for HASH1 hardware block */
1700         device_data->clk = devm_clk_get(dev, NULL);
1701         if (IS_ERR(device_data->clk)) {
1702                 dev_err(dev, "%s: clk_get() failed!\n", __func__);
1703                 ret = PTR_ERR(device_data->clk);
1704                 goto out_regulator;
1705         }
1706
1707         ret = clk_prepare(device_data->clk);
1708         if (ret) {
1709                 dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
1710                 goto out_regulator;
1711         }
1712
1713         /* Enable device power (and clock) */
1714         ret = hash_enable_power(device_data, false);
1715         if (ret) {
1716                 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1717                 goto out_clk_unprepare;
1718         }
1719
1720         ret = hash_check_hw(device_data);
1721         if (ret) {
1722                 dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
1723                 goto out_power;
1724         }
1725
1726         if (hash_mode == HASH_MODE_DMA)
1727                 hash_dma_setup_channel(device_data, dev);
1728
1729         platform_set_drvdata(pdev, device_data);
1730
1731         /* Put the new device into the device list... */
1732         klist_add_tail(&device_data->list_node, &driver_data.device_list);
1733         /* ... and signal that a new device is available. */
1734         up(&driver_data.device_allocation);
1735
1736         ret = ahash_algs_register_all(device_data);
1737         if (ret) {
1738                 dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
1739                         __func__);
1740                 goto out_power;
1741         }
1742
1743         dev_info(dev, "successfully registered\n");
1744         return 0;
1745
1746 out_power:
1747         hash_disable_power(device_data, false);
1748
1749 out_clk_unprepare:
1750         clk_unprepare(device_data->clk);
1751
1752 out_regulator:
1753         regulator_put(device_data->regulator);
1754
1755 out:
1756         return ret;
1757 }
1758
1759 /**
1760  * ux500_hash_remove - Function that removes the hash device from the platform.
1761  * @pdev: The platform device.
1762  */
1763 static int ux500_hash_remove(struct platform_device *pdev)
1764 {
1765         struct hash_device_data *device_data;
1766         struct device           *dev = &pdev->dev;
1767
1768         device_data = platform_get_drvdata(pdev);
1769         if (!device_data) {
1770                 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1771                 return -ENOMEM;
1772         }
1773
1774         /* Try to decrease the number of available devices. */
1775         if (down_trylock(&driver_data.device_allocation))
1776                 return -EBUSY;
1777
1778         /* Check that the device is free */
1779         spin_lock(&device_data->ctx_lock);
1780         /* current_ctx allocates a device, NULL = unallocated */
1781         if (device_data->current_ctx) {
1782                 /* The device is busy */
1783                 spin_unlock(&device_data->ctx_lock);
1784                 /* Return the device to the pool. */
1785                 up(&driver_data.device_allocation);
1786                 return -EBUSY;
1787         }
1788
1789         spin_unlock(&device_data->ctx_lock);
1790
1791         /* Remove the device from the list */
1792         if (klist_node_attached(&device_data->list_node))
1793                 klist_remove(&device_data->list_node);
1794
1795         /* If this was the last device, remove the services */
1796         if (list_empty(&driver_data.device_list.k_list))
1797                 ahash_algs_unregister_all(device_data);
1798
1799         if (hash_disable_power(device_data, false))
1800                 dev_err(dev, "%s: hash_disable_power() failed\n",
1801                         __func__);
1802
1803         clk_unprepare(device_data->clk);
1804         regulator_put(device_data->regulator);
1805
1806         return 0;
1807 }
1808
1809 /**
1810  * ux500_hash_shutdown - Function that shutdown the hash device.
1811  * @pdev: The platform device
1812  */
1813 static void ux500_hash_shutdown(struct platform_device *pdev)
1814 {
1815         struct hash_device_data *device_data;
1816
1817         device_data = platform_get_drvdata(pdev);
1818         if (!device_data) {
1819                 dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
1820                         __func__);
1821                 return;
1822         }
1823
1824         /* Check that the device is free */
1825         spin_lock(&device_data->ctx_lock);
1826         /* current_ctx allocates a device, NULL = unallocated */
1827         if (!device_data->current_ctx) {
1828                 if (down_trylock(&driver_data.device_allocation))
1829                         dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
1830                                 __func__);
1831                 /**
1832                  * (Allocate the device)
1833                  * Need to set this to non-null (dummy) value,
1834                  * to avoid usage if context switching.
1835                  */
1836                 device_data->current_ctx++;
1837         }
1838         spin_unlock(&device_data->ctx_lock);
1839
1840         /* Remove the device from the list */
1841         if (klist_node_attached(&device_data->list_node))
1842                 klist_remove(&device_data->list_node);
1843
1844         /* If this was the last device, remove the services */
1845         if (list_empty(&driver_data.device_list.k_list))
1846                 ahash_algs_unregister_all(device_data);
1847
1848         if (hash_disable_power(device_data, false))
1849                 dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
1850                         __func__);
1851 }
1852
1853 #ifdef CONFIG_PM_SLEEP
1854 /**
1855  * ux500_hash_suspend - Function that suspends the hash device.
1856  * @dev:        Device to suspend.
1857  */
1858 static int ux500_hash_suspend(struct device *dev)
1859 {
1860         int ret;
1861         struct hash_device_data *device_data;
1862         struct hash_ctx *temp_ctx = NULL;
1863
1864         device_data = dev_get_drvdata(dev);
1865         if (!device_data) {
1866                 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1867                 return -ENOMEM;
1868         }
1869
1870         spin_lock(&device_data->ctx_lock);
1871         if (!device_data->current_ctx)
1872                 device_data->current_ctx++;
1873         spin_unlock(&device_data->ctx_lock);
1874
1875         if (device_data->current_ctx == ++temp_ctx) {
1876                 if (down_interruptible(&driver_data.device_allocation))
1877                         dev_dbg(dev, "%s: down_interruptible() failed\n",
1878                                 __func__);
1879                 ret = hash_disable_power(device_data, false);
1880
1881         } else {
1882                 ret = hash_disable_power(device_data, true);
1883         }
1884
1885         if (ret)
1886                 dev_err(dev, "%s: hash_disable_power()\n", __func__);
1887
1888         return ret;
1889 }
1890
1891 /**
1892  * ux500_hash_resume - Function that resume the hash device.
1893  * @dev:        Device to resume.
1894  */
1895 static int ux500_hash_resume(struct device *dev)
1896 {
1897         int ret = 0;
1898         struct hash_device_data *device_data;
1899         struct hash_ctx *temp_ctx = NULL;
1900
1901         device_data = dev_get_drvdata(dev);
1902         if (!device_data) {
1903                 dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
1904                 return -ENOMEM;
1905         }
1906
1907         spin_lock(&device_data->ctx_lock);
1908         if (device_data->current_ctx == ++temp_ctx)
1909                 device_data->current_ctx = NULL;
1910         spin_unlock(&device_data->ctx_lock);
1911
1912         if (!device_data->current_ctx)
1913                 up(&driver_data.device_allocation);
1914         else
1915                 ret = hash_enable_power(device_data, true);
1916
1917         if (ret)
1918                 dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
1919
1920         return ret;
1921 }
1922 #endif
1923
1924 static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
1925
1926 static const struct of_device_id ux500_hash_match[] = {
1927         { .compatible = "stericsson,ux500-hash" },
1928         { },
1929 };
1930 MODULE_DEVICE_TABLE(of, ux500_hash_match);
1931
1932 static struct platform_driver hash_driver = {
1933         .probe  = ux500_hash_probe,
1934         .remove = ux500_hash_remove,
1935         .shutdown = ux500_hash_shutdown,
1936         .driver = {
1937                 .name  = "hash1",
1938                 .of_match_table = ux500_hash_match,
1939                 .pm    = &ux500_hash_pm,
1940         }
1941 };
1942
1943 /**
1944  * ux500_hash_mod_init - The kernel module init function.
1945  */
1946 static int __init ux500_hash_mod_init(void)
1947 {
1948         klist_init(&driver_data.device_list, NULL, NULL);
1949         /* Initialize the semaphore to 0 devices (locked state) */
1950         sema_init(&driver_data.device_allocation, 0);
1951
1952         return platform_driver_register(&hash_driver);
1953 }
1954
1955 /**
1956  * ux500_hash_mod_fini - The kernel module exit function.
1957  */
1958 static void __exit ux500_hash_mod_fini(void)
1959 {
1960         platform_driver_unregister(&hash_driver);
1961 }
1962
1963 module_init(ux500_hash_mod_init);
1964 module_exit(ux500_hash_mod_fini);
1965
1966 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
1967 MODULE_LICENSE("GPL");
1968
1969 MODULE_ALIAS_CRYPTO("sha1-all");
1970 MODULE_ALIAS_CRYPTO("sha256-all");
1971 MODULE_ALIAS_CRYPTO("hmac-sha1-all");
1972 MODULE_ALIAS_CRYPTO("hmac-sha256-all");
This page took 0.149418 seconds and 4 git commands to generate.