]>
Commit | Line | Data |
---|---|---|
ebc82efa NR |
1 | /* |
2 | * Cryptographic API. | |
3 | * | |
4 | * Support for ATMEL SHA1/SHA256 HW acceleration. | |
5 | * | |
6 | * Copyright (c) 2012 Eukréa Electromatique - ATMEL | |
7 | * Author: Nicolas Royer <[email protected]> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as published | |
11 | * by the Free Software Foundation. | |
12 | * | |
13 | * Some ideas are from omap-sham.c drivers. | |
14 | */ | |
15 | ||
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/err.h> | |
21 | #include <linux/clk.h> | |
22 | #include <linux/io.h> | |
23 | #include <linux/hw_random.h> | |
24 | #include <linux/platform_device.h> | |
25 | ||
26 | #include <linux/device.h> | |
ebc82efa NR |
27 | #include <linux/init.h> |
28 | #include <linux/errno.h> | |
29 | #include <linux/interrupt.h> | |
ebc82efa | 30 | #include <linux/irq.h> |
ebc82efa NR |
31 | #include <linux/scatterlist.h> |
32 | #include <linux/dma-mapping.h> | |
abfe7ae4 | 33 | #include <linux/of_device.h> |
ebc82efa NR |
34 | #include <linux/delay.h> |
35 | #include <linux/crypto.h> | |
36 | #include <linux/cryptohash.h> | |
37 | #include <crypto/scatterwalk.h> | |
38 | #include <crypto/algapi.h> | |
39 | #include <crypto/sha.h> | |
40 | #include <crypto/hash.h> | |
41 | #include <crypto/internal/hash.h> | |
d4905b38 | 42 | #include <linux/platform_data/crypto-atmel.h> |
ebc82efa | 43 | #include "atmel-sha-regs.h" |
89a82ef8 | 44 | #include "atmel-authenc.h" |
ebc82efa NR |
45 | |
46 | /* SHA flags */ | |
47 | #define SHA_FLAGS_BUSY BIT(0) | |
48 | #define SHA_FLAGS_FINAL BIT(1) | |
49 | #define SHA_FLAGS_DMA_ACTIVE BIT(2) | |
50 | #define SHA_FLAGS_OUTPUT_READY BIT(3) | |
51 | #define SHA_FLAGS_INIT BIT(4) | |
52 | #define SHA_FLAGS_CPU BIT(5) | |
53 | #define SHA_FLAGS_DMA_READY BIT(6) | |
0569fc46 | 54 | #define SHA_FLAGS_DUMP_REG BIT(7) |
ebc82efa | 55 | |
81d8750b | 56 | /* bits[11:8] are reserved. */ |
f07cebad | 57 | |
ebc82efa NR |
58 | #define SHA_FLAGS_FINUP BIT(16) |
59 | #define SHA_FLAGS_SG BIT(17) | |
d4905b38 NR |
60 | #define SHA_FLAGS_ERROR BIT(23) |
61 | #define SHA_FLAGS_PAD BIT(24) | |
7cee3508 | 62 | #define SHA_FLAGS_RESTORE BIT(25) |
eec12f66 CP |
63 | #define SHA_FLAGS_IDATAR0 BIT(26) |
64 | #define SHA_FLAGS_WAIT_DATARDY BIT(27) | |
ebc82efa | 65 | |
81d8750b | 66 | #define SHA_OP_INIT 0 |
ebc82efa NR |
67 | #define SHA_OP_UPDATE 1 |
68 | #define SHA_OP_FINAL 2 | |
81d8750b | 69 | #define SHA_OP_DIGEST 3 |
ebc82efa | 70 | |
cc831d32 | 71 | #define SHA_BUFFER_LEN (PAGE_SIZE / 16) |
ebc82efa NR |
72 | |
73 | #define ATMEL_SHA_DMA_THRESHOLD 56 | |
74 | ||
d4905b38 NR |
75 | struct atmel_sha_caps { |
76 | bool has_dma; | |
77 | bool has_dualbuff; | |
78 | bool has_sha224; | |
79 | bool has_sha_384_512; | |
7cee3508 | 80 | bool has_uihv; |
81d8750b | 81 | bool has_hmac; |
d4905b38 | 82 | }; |
ebc82efa NR |
83 | |
84 | struct atmel_sha_dev; | |
85 | ||
cc831d32 | 86 | /* |
9c4274d9 | 87 | * .statesize = sizeof(struct atmel_sha_reqctx) must be <= PAGE_SIZE / 8 as |
cc831d32 CP |
88 | * tested by the ahash_prepare_alg() function. |
89 | */ | |
ebc82efa NR |
90 | struct atmel_sha_reqctx { |
91 | struct atmel_sha_dev *dd; | |
92 | unsigned long flags; | |
93 | unsigned long op; | |
94 | ||
d4905b38 NR |
95 | u8 digest[SHA512_DIGEST_SIZE] __aligned(sizeof(u32)); |
96 | u64 digcnt[2]; | |
ebc82efa NR |
97 | size_t bufcnt; |
98 | size_t buflen; | |
99 | dma_addr_t dma_addr; | |
100 | ||
101 | /* walk state */ | |
102 | struct scatterlist *sg; | |
103 | unsigned int offset; /* offset in current sg */ | |
104 | unsigned int total; /* total request */ | |
105 | ||
d4905b38 | 106 | size_t block_size; |
81d8750b | 107 | size_t hash_size; |
d4905b38 | 108 | |
9c4274d9 | 109 | u8 buffer[SHA_BUFFER_LEN + SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); |
ebc82efa NR |
110 | }; |
111 | ||
a29af939 CP |
112 | typedef int (*atmel_sha_fn_t)(struct atmel_sha_dev *); |
113 | ||
ebc82efa NR |
114 | struct atmel_sha_ctx { |
115 | struct atmel_sha_dev *dd; | |
a29af939 | 116 | atmel_sha_fn_t start; |
ebc82efa NR |
117 | |
118 | unsigned long flags; | |
ebc82efa NR |
119 | }; |
120 | ||
d4905b38 NR |
121 | #define ATMEL_SHA_QUEUE_LENGTH 50 |
122 | ||
123 | struct atmel_sha_dma { | |
124 | struct dma_chan *chan; | |
125 | struct dma_slave_config dma_conf; | |
69303cf0 CP |
126 | struct scatterlist *sg; |
127 | int nents; | |
128 | unsigned int last_sg_length; | |
d4905b38 | 129 | }; |
ebc82efa NR |
130 | |
131 | struct atmel_sha_dev { | |
132 | struct list_head list; | |
133 | unsigned long phys_base; | |
134 | struct device *dev; | |
135 | struct clk *iclk; | |
136 | int irq; | |
137 | void __iomem *io_base; | |
138 | ||
139 | spinlock_t lock; | |
140 | int err; | |
141 | struct tasklet_struct done_task; | |
f56809c3 | 142 | struct tasklet_struct queue_task; |
ebc82efa NR |
143 | |
144 | unsigned long flags; | |
145 | struct crypto_queue queue; | |
146 | struct ahash_request *req; | |
a29af939 | 147 | bool is_async; |
89a82ef8 | 148 | bool force_complete; |
b5ce82a7 | 149 | atmel_sha_fn_t resume; |
eec12f66 | 150 | atmel_sha_fn_t cpu_transfer_complete; |
d4905b38 NR |
151 | |
152 | struct atmel_sha_dma dma_lch_in; | |
153 | ||
154 | struct atmel_sha_caps caps; | |
155 | ||
81d8750b CP |
156 | struct scatterlist tmp; |
157 | ||
d4905b38 | 158 | u32 hw_version; |
ebc82efa NR |
159 | }; |
160 | ||
161 | struct atmel_sha_drv { | |
162 | struct list_head dev_list; | |
163 | spinlock_t lock; | |
164 | }; | |
165 | ||
166 | static struct atmel_sha_drv atmel_sha = { | |
167 | .dev_list = LIST_HEAD_INIT(atmel_sha.dev_list), | |
168 | .lock = __SPIN_LOCK_UNLOCKED(atmel_sha.lock), | |
169 | }; | |
170 | ||
0569fc46 CP |
171 | #ifdef VERBOSE_DEBUG |
172 | static const char *atmel_sha_reg_name(u32 offset, char *tmp, size_t sz, bool wr) | |
173 | { | |
174 | switch (offset) { | |
175 | case SHA_CR: | |
176 | return "CR"; | |
177 | ||
178 | case SHA_MR: | |
179 | return "MR"; | |
180 | ||
181 | case SHA_IER: | |
182 | return "IER"; | |
183 | ||
184 | case SHA_IDR: | |
185 | return "IDR"; | |
186 | ||
187 | case SHA_IMR: | |
188 | return "IMR"; | |
189 | ||
190 | case SHA_ISR: | |
191 | return "ISR"; | |
192 | ||
193 | case SHA_MSR: | |
194 | return "MSR"; | |
195 | ||
196 | case SHA_BCR: | |
197 | return "BCR"; | |
198 | ||
199 | case SHA_REG_DIN(0): | |
200 | case SHA_REG_DIN(1): | |
201 | case SHA_REG_DIN(2): | |
202 | case SHA_REG_DIN(3): | |
203 | case SHA_REG_DIN(4): | |
204 | case SHA_REG_DIN(5): | |
205 | case SHA_REG_DIN(6): | |
206 | case SHA_REG_DIN(7): | |
207 | case SHA_REG_DIN(8): | |
208 | case SHA_REG_DIN(9): | |
209 | case SHA_REG_DIN(10): | |
210 | case SHA_REG_DIN(11): | |
211 | case SHA_REG_DIN(12): | |
212 | case SHA_REG_DIN(13): | |
213 | case SHA_REG_DIN(14): | |
214 | case SHA_REG_DIN(15): | |
215 | snprintf(tmp, sz, "IDATAR[%u]", (offset - SHA_REG_DIN(0)) >> 2); | |
216 | break; | |
217 | ||
218 | case SHA_REG_DIGEST(0): | |
219 | case SHA_REG_DIGEST(1): | |
220 | case SHA_REG_DIGEST(2): | |
221 | case SHA_REG_DIGEST(3): | |
222 | case SHA_REG_DIGEST(4): | |
223 | case SHA_REG_DIGEST(5): | |
224 | case SHA_REG_DIGEST(6): | |
225 | case SHA_REG_DIGEST(7): | |
226 | case SHA_REG_DIGEST(8): | |
227 | case SHA_REG_DIGEST(9): | |
228 | case SHA_REG_DIGEST(10): | |
229 | case SHA_REG_DIGEST(11): | |
230 | case SHA_REG_DIGEST(12): | |
231 | case SHA_REG_DIGEST(13): | |
232 | case SHA_REG_DIGEST(14): | |
233 | case SHA_REG_DIGEST(15): | |
234 | if (wr) | |
235 | snprintf(tmp, sz, "IDATAR[%u]", | |
236 | 16u + ((offset - SHA_REG_DIGEST(0)) >> 2)); | |
237 | else | |
238 | snprintf(tmp, sz, "ODATAR[%u]", | |
239 | (offset - SHA_REG_DIGEST(0)) >> 2); | |
240 | break; | |
241 | ||
242 | case SHA_HW_VERSION: | |
243 | return "HWVER"; | |
244 | ||
245 | default: | |
246 | snprintf(tmp, sz, "0x%02x", offset); | |
247 | break; | |
248 | } | |
249 | ||
250 | return tmp; | |
251 | } | |
252 | ||
253 | #endif /* VERBOSE_DEBUG */ | |
254 | ||
ebc82efa NR |
255 | static inline u32 atmel_sha_read(struct atmel_sha_dev *dd, u32 offset) |
256 | { | |
0569fc46 CP |
257 | u32 value = readl_relaxed(dd->io_base + offset); |
258 | ||
259 | #ifdef VERBOSE_DEBUG | |
260 | if (dd->flags & SHA_FLAGS_DUMP_REG) { | |
261 | char tmp[16]; | |
262 | ||
263 | dev_vdbg(dd->dev, "read 0x%08x from %s\n", value, | |
264 | atmel_sha_reg_name(offset, tmp, sizeof(tmp), false)); | |
265 | } | |
266 | #endif /* VERBOSE_DEBUG */ | |
267 | ||
268 | return value; | |
ebc82efa NR |
269 | } |
270 | ||
271 | static inline void atmel_sha_write(struct atmel_sha_dev *dd, | |
272 | u32 offset, u32 value) | |
273 | { | |
0569fc46 CP |
274 | #ifdef VERBOSE_DEBUG |
275 | if (dd->flags & SHA_FLAGS_DUMP_REG) { | |
276 | char tmp[16]; | |
277 | ||
278 | dev_vdbg(dd->dev, "write 0x%08x into %s\n", value, | |
279 | atmel_sha_reg_name(offset, tmp, sizeof(tmp), true)); | |
280 | } | |
281 | #endif /* VERBOSE_DEBUG */ | |
282 | ||
ebc82efa NR |
283 | writel_relaxed(value, dd->io_base + offset); |
284 | } | |
285 | ||
a29af939 CP |
286 | static inline int atmel_sha_complete(struct atmel_sha_dev *dd, int err) |
287 | { | |
288 | struct ahash_request *req = dd->req; | |
289 | ||
290 | dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU | | |
0569fc46 CP |
291 | SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY | |
292 | SHA_FLAGS_DUMP_REG); | |
a29af939 CP |
293 | |
294 | clk_disable(dd->iclk); | |
295 | ||
89a82ef8 | 296 | if ((dd->is_async || dd->force_complete) && req->base.complete) |
a29af939 CP |
297 | req->base.complete(&req->base, err); |
298 | ||
299 | /* handle new request */ | |
300 | tasklet_schedule(&dd->queue_task); | |
301 | ||
302 | return err; | |
303 | } | |
304 | ||
ebc82efa NR |
305 | static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) |
306 | { | |
307 | size_t count; | |
308 | ||
309 | while ((ctx->bufcnt < ctx->buflen) && ctx->total) { | |
310 | count = min(ctx->sg->length - ctx->offset, ctx->total); | |
311 | count = min(count, ctx->buflen - ctx->bufcnt); | |
312 | ||
803eeae8 LZ |
313 | if (count <= 0) { |
314 | /* | |
315 | * Check if count <= 0 because the buffer is full or | |
316 | * because the sg length is 0. In the latest case, | |
317 | * check if there is another sg in the list, a 0 length | |
318 | * sg doesn't necessarily mean the end of the sg list. | |
319 | */ | |
320 | if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { | |
321 | ctx->sg = sg_next(ctx->sg); | |
322 | continue; | |
323 | } else { | |
324 | break; | |
325 | } | |
326 | } | |
ebc82efa NR |
327 | |
328 | scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, | |
329 | ctx->offset, count, 0); | |
330 | ||
331 | ctx->bufcnt += count; | |
332 | ctx->offset += count; | |
333 | ctx->total -= count; | |
334 | ||
335 | if (ctx->offset == ctx->sg->length) { | |
336 | ctx->sg = sg_next(ctx->sg); | |
337 | if (ctx->sg) | |
338 | ctx->offset = 0; | |
339 | else | |
340 | ctx->total = 0; | |
341 | } | |
342 | } | |
343 | ||
344 | return 0; | |
345 | } | |
346 | ||
347 | /* | |
d4905b38 NR |
348 | * The purpose of this padding is to ensure that the padded message is a |
349 | * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). | |
350 | * The bit "1" is appended at the end of the message followed by | |
351 | * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or | |
352 | * 128 bits block (SHA384/SHA512) equals to the message length in bits | |
353 | * is appended. | |
ebc82efa | 354 | * |
d4905b38 | 355 | * For SHA1/SHA224/SHA256, padlen is calculated as followed: |
ebc82efa NR |
356 | * - if message length < 56 bytes then padlen = 56 - message length |
357 | * - else padlen = 64 + 56 - message length | |
d4905b38 NR |
358 | * |
359 | * For SHA384/SHA512, padlen is calculated as followed: | |
360 | * - if message length < 112 bytes then padlen = 112 - message length | |
361 | * - else padlen = 128 + 112 - message length | |
ebc82efa NR |
362 | */ |
363 | static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) | |
364 | { | |
365 | unsigned int index, padlen; | |
d4905b38 NR |
366 | u64 bits[2]; |
367 | u64 size[2]; | |
368 | ||
369 | size[0] = ctx->digcnt[0]; | |
370 | size[1] = ctx->digcnt[1]; | |
371 | ||
372 | size[0] += ctx->bufcnt; | |
373 | if (size[0] < ctx->bufcnt) | |
374 | size[1]++; | |
375 | ||
376 | size[0] += length; | |
377 | if (size[0] < length) | |
378 | size[1]++; | |
379 | ||
380 | bits[1] = cpu_to_be64(size[0] << 3); | |
381 | bits[0] = cpu_to_be64(size[1] << 3 | size[0] >> 61); | |
382 | ||
f07cebad CP |
383 | switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { |
384 | case SHA_FLAGS_SHA384: | |
385 | case SHA_FLAGS_SHA512: | |
d4905b38 NR |
386 | index = ctx->bufcnt & 0x7f; |
387 | padlen = (index < 112) ? (112 - index) : ((128+112) - index); | |
388 | *(ctx->buffer + ctx->bufcnt) = 0x80; | |
389 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | |
390 | memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); | |
391 | ctx->bufcnt += padlen + 16; | |
392 | ctx->flags |= SHA_FLAGS_PAD; | |
f07cebad CP |
393 | break; |
394 | ||
395 | default: | |
d4905b38 NR |
396 | index = ctx->bufcnt & 0x3f; |
397 | padlen = (index < 56) ? (56 - index) : ((64+56) - index); | |
398 | *(ctx->buffer + ctx->bufcnt) = 0x80; | |
399 | memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); | |
400 | memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); | |
401 | ctx->bufcnt += padlen + 8; | |
402 | ctx->flags |= SHA_FLAGS_PAD; | |
f07cebad | 403 | break; |
d4905b38 | 404 | } |
ebc82efa NR |
405 | } |
406 | ||
8340c7fd | 407 | static struct atmel_sha_dev *atmel_sha_find_dev(struct atmel_sha_ctx *tctx) |
ebc82efa | 408 | { |
ebc82efa NR |
409 | struct atmel_sha_dev *dd = NULL; |
410 | struct atmel_sha_dev *tmp; | |
411 | ||
412 | spin_lock_bh(&atmel_sha.lock); | |
413 | if (!tctx->dd) { | |
414 | list_for_each_entry(tmp, &atmel_sha.dev_list, list) { | |
415 | dd = tmp; | |
416 | break; | |
417 | } | |
418 | tctx->dd = dd; | |
419 | } else { | |
420 | dd = tctx->dd; | |
421 | } | |
422 | ||
423 | spin_unlock_bh(&atmel_sha.lock); | |
424 | ||
8340c7fd CP |
425 | return dd; |
426 | } | |
427 | ||
428 | static int atmel_sha_init(struct ahash_request *req) | |
429 | { | |
430 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
431 | struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); | |
432 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
433 | struct atmel_sha_dev *dd = atmel_sha_find_dev(tctx); | |
434 | ||
ebc82efa NR |
435 | ctx->dd = dd; |
436 | ||
437 | ctx->flags = 0; | |
438 | ||
439 | dev_dbg(dd->dev, "init: digest size: %d\n", | |
440 | crypto_ahash_digestsize(tfm)); | |
441 | ||
d4905b38 NR |
442 | switch (crypto_ahash_digestsize(tfm)) { |
443 | case SHA1_DIGEST_SIZE: | |
ebc82efa | 444 | ctx->flags |= SHA_FLAGS_SHA1; |
d4905b38 NR |
445 | ctx->block_size = SHA1_BLOCK_SIZE; |
446 | break; | |
447 | case SHA224_DIGEST_SIZE: | |
448 | ctx->flags |= SHA_FLAGS_SHA224; | |
449 | ctx->block_size = SHA224_BLOCK_SIZE; | |
450 | break; | |
451 | case SHA256_DIGEST_SIZE: | |
ebc82efa | 452 | ctx->flags |= SHA_FLAGS_SHA256; |
d4905b38 NR |
453 | ctx->block_size = SHA256_BLOCK_SIZE; |
454 | break; | |
455 | case SHA384_DIGEST_SIZE: | |
456 | ctx->flags |= SHA_FLAGS_SHA384; | |
457 | ctx->block_size = SHA384_BLOCK_SIZE; | |
458 | break; | |
459 | case SHA512_DIGEST_SIZE: | |
460 | ctx->flags |= SHA_FLAGS_SHA512; | |
461 | ctx->block_size = SHA512_BLOCK_SIZE; | |
462 | break; | |
463 | default: | |
464 | return -EINVAL; | |
465 | break; | |
466 | } | |
ebc82efa NR |
467 | |
468 | ctx->bufcnt = 0; | |
d4905b38 NR |
469 | ctx->digcnt[0] = 0; |
470 | ctx->digcnt[1] = 0; | |
ebc82efa NR |
471 | ctx->buflen = SHA_BUFFER_LEN; |
472 | ||
473 | return 0; | |
474 | } | |
475 | ||
476 | static void atmel_sha_write_ctrl(struct atmel_sha_dev *dd, int dma) | |
477 | { | |
478 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
7cee3508 CP |
479 | u32 valmr = SHA_MR_MODE_AUTO; |
480 | unsigned int i, hashsize = 0; | |
ebc82efa NR |
481 | |
482 | if (likely(dma)) { | |
d4905b38 NR |
483 | if (!dd->caps.has_dma) |
484 | atmel_sha_write(dd, SHA_IER, SHA_INT_TXBUFE); | |
ebc82efa | 485 | valmr = SHA_MR_MODE_PDC; |
d4905b38 NR |
486 | if (dd->caps.has_dualbuff) |
487 | valmr |= SHA_MR_DUALBUFF; | |
ebc82efa NR |
488 | } else { |
489 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | |
490 | } | |
491 | ||
7cee3508 CP |
492 | switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { |
493 | case SHA_FLAGS_SHA1: | |
d4905b38 | 494 | valmr |= SHA_MR_ALGO_SHA1; |
7cee3508 CP |
495 | hashsize = SHA1_DIGEST_SIZE; |
496 | break; | |
497 | ||
498 | case SHA_FLAGS_SHA224: | |
d4905b38 | 499 | valmr |= SHA_MR_ALGO_SHA224; |
7cee3508 CP |
500 | hashsize = SHA256_DIGEST_SIZE; |
501 | break; | |
502 | ||
503 | case SHA_FLAGS_SHA256: | |
ebc82efa | 504 | valmr |= SHA_MR_ALGO_SHA256; |
7cee3508 CP |
505 | hashsize = SHA256_DIGEST_SIZE; |
506 | break; | |
507 | ||
508 | case SHA_FLAGS_SHA384: | |
d4905b38 | 509 | valmr |= SHA_MR_ALGO_SHA384; |
7cee3508 CP |
510 | hashsize = SHA512_DIGEST_SIZE; |
511 | break; | |
512 | ||
513 | case SHA_FLAGS_SHA512: | |
d4905b38 | 514 | valmr |= SHA_MR_ALGO_SHA512; |
7cee3508 CP |
515 | hashsize = SHA512_DIGEST_SIZE; |
516 | break; | |
517 | ||
518 | default: | |
519 | break; | |
520 | } | |
ebc82efa NR |
521 | |
522 | /* Setting CR_FIRST only for the first iteration */ | |
7cee3508 CP |
523 | if (!(ctx->digcnt[0] || ctx->digcnt[1])) { |
524 | atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); | |
525 | } else if (dd->caps.has_uihv && (ctx->flags & SHA_FLAGS_RESTORE)) { | |
526 | const u32 *hash = (const u32 *)ctx->digest; | |
527 | ||
528 | /* | |
529 | * Restore the hardware context: update the User Initialize | |
530 | * Hash Value (UIHV) with the value saved when the latest | |
531 | * 'update' operation completed on this very same crypto | |
532 | * request. | |
533 | */ | |
534 | ctx->flags &= ~SHA_FLAGS_RESTORE; | |
535 | atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); | |
536 | for (i = 0; i < hashsize / sizeof(u32); ++i) | |
537 | atmel_sha_write(dd, SHA_REG_DIN(i), hash[i]); | |
538 | atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); | |
539 | valmr |= SHA_MR_UIHV; | |
540 | } | |
541 | /* | |
542 | * WARNING: If the UIHV feature is not available, the hardware CANNOT | |
543 | * process concurrent requests: the internal registers used to store | |
544 | * the hash/digest are still set to the partial digest output values | |
545 | * computed during the latest round. | |
546 | */ | |
ebc82efa | 547 | |
ebc82efa NR |
548 | atmel_sha_write(dd, SHA_MR, valmr); |
549 | } | |
550 | ||
9064ed92 CP |
551 | static inline int atmel_sha_wait_for_data_ready(struct atmel_sha_dev *dd, |
552 | atmel_sha_fn_t resume) | |
553 | { | |
554 | u32 isr = atmel_sha_read(dd, SHA_ISR); | |
555 | ||
556 | if (unlikely(isr & SHA_INT_DATARDY)) | |
557 | return resume(dd); | |
558 | ||
559 | dd->resume = resume; | |
560 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | |
561 | return -EINPROGRESS; | |
562 | } | |
563 | ||
ebc82efa NR |
564 | static int atmel_sha_xmit_cpu(struct atmel_sha_dev *dd, const u8 *buf, |
565 | size_t length, int final) | |
566 | { | |
567 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
568 | int count, len32; | |
569 | const u32 *buffer = (const u32 *)buf; | |
570 | ||
4c147bcf | 571 | dev_dbg(dd->dev, "xmit_cpu: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", |
d4905b38 | 572 | ctx->digcnt[1], ctx->digcnt[0], length, final); |
ebc82efa NR |
573 | |
574 | atmel_sha_write_ctrl(dd, 0); | |
575 | ||
576 | /* should be non-zero before next lines to disable clocks later */ | |
d4905b38 NR |
577 | ctx->digcnt[0] += length; |
578 | if (ctx->digcnt[0] < length) | |
579 | ctx->digcnt[1]++; | |
ebc82efa NR |
580 | |
581 | if (final) | |
582 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | |
583 | ||
584 | len32 = DIV_ROUND_UP(length, sizeof(u32)); | |
585 | ||
586 | dd->flags |= SHA_FLAGS_CPU; | |
587 | ||
588 | for (count = 0; count < len32; count++) | |
589 | atmel_sha_write(dd, SHA_REG_DIN(count), buffer[count]); | |
590 | ||
591 | return -EINPROGRESS; | |
592 | } | |
593 | ||
594 | static int atmel_sha_xmit_pdc(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |
595 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | |
596 | { | |
597 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
598 | int len32; | |
599 | ||
4c147bcf | 600 | dev_dbg(dd->dev, "xmit_pdc: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", |
d4905b38 | 601 | ctx->digcnt[1], ctx->digcnt[0], length1, final); |
ebc82efa NR |
602 | |
603 | len32 = DIV_ROUND_UP(length1, sizeof(u32)); | |
604 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTDIS); | |
605 | atmel_sha_write(dd, SHA_TPR, dma_addr1); | |
606 | atmel_sha_write(dd, SHA_TCR, len32); | |
607 | ||
608 | len32 = DIV_ROUND_UP(length2, sizeof(u32)); | |
609 | atmel_sha_write(dd, SHA_TNPR, dma_addr2); | |
610 | atmel_sha_write(dd, SHA_TNCR, len32); | |
611 | ||
612 | atmel_sha_write_ctrl(dd, 1); | |
613 | ||
614 | /* should be non-zero before next lines to disable clocks later */ | |
d4905b38 NR |
615 | ctx->digcnt[0] += length1; |
616 | if (ctx->digcnt[0] < length1) | |
617 | ctx->digcnt[1]++; | |
ebc82efa NR |
618 | |
619 | if (final) | |
620 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | |
621 | ||
622 | dd->flags |= SHA_FLAGS_DMA_ACTIVE; | |
623 | ||
624 | /* Start DMA transfer */ | |
625 | atmel_sha_write(dd, SHA_PTCR, SHA_PTCR_TXTEN); | |
626 | ||
627 | return -EINPROGRESS; | |
628 | } | |
629 | ||
d4905b38 NR |
630 | static void atmel_sha_dma_callback(void *data) |
631 | { | |
632 | struct atmel_sha_dev *dd = data; | |
633 | ||
a29af939 CP |
634 | dd->is_async = true; |
635 | ||
d4905b38 NR |
636 | /* dma_lch_in - completed - wait DATRDY */ |
637 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | |
638 | } | |
639 | ||
640 | static int atmel_sha_xmit_dma(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |
641 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | |
642 | { | |
643 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
644 | struct dma_async_tx_descriptor *in_desc; | |
645 | struct scatterlist sg[2]; | |
646 | ||
4c147bcf | 647 | dev_dbg(dd->dev, "xmit_dma: digcnt: 0x%llx 0x%llx, length: %zd, final: %d\n", |
d4905b38 NR |
648 | ctx->digcnt[1], ctx->digcnt[0], length1, final); |
649 | ||
3f1992c0 LZ |
650 | dd->dma_lch_in.dma_conf.src_maxburst = 16; |
651 | dd->dma_lch_in.dma_conf.dst_maxburst = 16; | |
d4905b38 NR |
652 | |
653 | dmaengine_slave_config(dd->dma_lch_in.chan, &dd->dma_lch_in.dma_conf); | |
654 | ||
655 | if (length2) { | |
656 | sg_init_table(sg, 2); | |
657 | sg_dma_address(&sg[0]) = dma_addr1; | |
658 | sg_dma_len(&sg[0]) = length1; | |
659 | sg_dma_address(&sg[1]) = dma_addr2; | |
660 | sg_dma_len(&sg[1]) = length2; | |
661 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 2, | |
662 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
663 | } else { | |
664 | sg_init_table(sg, 1); | |
665 | sg_dma_address(&sg[0]) = dma_addr1; | |
666 | sg_dma_len(&sg[0]) = length1; | |
667 | in_desc = dmaengine_prep_slave_sg(dd->dma_lch_in.chan, sg, 1, | |
668 | DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
669 | } | |
670 | if (!in_desc) | |
dd3f9f40 | 671 | return atmel_sha_complete(dd, -EINVAL); |
d4905b38 NR |
672 | |
673 | in_desc->callback = atmel_sha_dma_callback; | |
674 | in_desc->callback_param = dd; | |
675 | ||
676 | atmel_sha_write_ctrl(dd, 1); | |
677 | ||
678 | /* should be non-zero before next lines to disable clocks later */ | |
679 | ctx->digcnt[0] += length1; | |
680 | if (ctx->digcnt[0] < length1) | |
681 | ctx->digcnt[1]++; | |
682 | ||
683 | if (final) | |
684 | dd->flags |= SHA_FLAGS_FINAL; /* catch last interrupt */ | |
685 | ||
686 | dd->flags |= SHA_FLAGS_DMA_ACTIVE; | |
687 | ||
688 | /* Start DMA transfer */ | |
689 | dmaengine_submit(in_desc); | |
690 | dma_async_issue_pending(dd->dma_lch_in.chan); | |
691 | ||
692 | return -EINPROGRESS; | |
693 | } | |
694 | ||
695 | static int atmel_sha_xmit_start(struct atmel_sha_dev *dd, dma_addr_t dma_addr1, | |
696 | size_t length1, dma_addr_t dma_addr2, size_t length2, int final) | |
697 | { | |
698 | if (dd->caps.has_dma) | |
699 | return atmel_sha_xmit_dma(dd, dma_addr1, length1, | |
700 | dma_addr2, length2, final); | |
701 | else | |
702 | return atmel_sha_xmit_pdc(dd, dma_addr1, length1, | |
703 | dma_addr2, length2, final); | |
704 | } | |
705 | ||
ebc82efa NR |
706 | static int atmel_sha_update_cpu(struct atmel_sha_dev *dd) |
707 | { | |
708 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
709 | int bufcnt; | |
710 | ||
711 | atmel_sha_append_sg(ctx); | |
712 | atmel_sha_fill_padding(ctx, 0); | |
ebc82efa NR |
713 | bufcnt = ctx->bufcnt; |
714 | ctx->bufcnt = 0; | |
715 | ||
716 | return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); | |
717 | } | |
718 | ||
719 | static int atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, | |
720 | struct atmel_sha_reqctx *ctx, | |
721 | size_t length, int final) | |
722 | { | |
723 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | |
d4905b38 | 724 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
ebc82efa | 725 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
4c147bcf | 726 | dev_err(dd->dev, "dma %zu bytes error\n", ctx->buflen + |
d4905b38 | 727 | ctx->block_size); |
dd3f9f40 | 728 | return atmel_sha_complete(dd, -EINVAL); |
ebc82efa NR |
729 | } |
730 | ||
731 | ctx->flags &= ~SHA_FLAGS_SG; | |
732 | ||
733 | /* next call does not fail... so no unmap in the case of error */ | |
d4905b38 | 734 | return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); |
ebc82efa NR |
735 | } |
736 | ||
737 | static int atmel_sha_update_dma_slow(struct atmel_sha_dev *dd) | |
738 | { | |
739 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
740 | unsigned int final; | |
741 | size_t count; | |
742 | ||
743 | atmel_sha_append_sg(ctx); | |
744 | ||
745 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | |
746 | ||
4c147bcf | 747 | dev_dbg(dd->dev, "slow: bufcnt: %zu, digcnt: 0x%llx 0x%llx, final: %d\n", |
d4905b38 | 748 | ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); |
ebc82efa NR |
749 | |
750 | if (final) | |
751 | atmel_sha_fill_padding(ctx, 0); | |
752 | ||
0099286b | 753 | if (final || (ctx->bufcnt == ctx->buflen)) { |
ebc82efa NR |
754 | count = ctx->bufcnt; |
755 | ctx->bufcnt = 0; | |
756 | return atmel_sha_xmit_dma_map(dd, ctx, count, final); | |
757 | } | |
758 | ||
759 | return 0; | |
760 | } | |
761 | ||
762 | static int atmel_sha_update_dma_start(struct atmel_sha_dev *dd) | |
763 | { | |
764 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
765 | unsigned int length, final, tail; | |
766 | struct scatterlist *sg; | |
767 | unsigned int count; | |
768 | ||
769 | if (!ctx->total) | |
770 | return 0; | |
771 | ||
772 | if (ctx->bufcnt || ctx->offset) | |
773 | return atmel_sha_update_dma_slow(dd); | |
774 | ||
4c147bcf | 775 | dev_dbg(dd->dev, "fast: digcnt: 0x%llx 0x%llx, bufcnt: %zd, total: %u\n", |
d4905b38 | 776 | ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); |
ebc82efa NR |
777 | |
778 | sg = ctx->sg; | |
779 | ||
780 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | |
781 | return atmel_sha_update_dma_slow(dd); | |
782 | ||
d4905b38 NR |
783 | if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) |
784 | /* size is not ctx->block_size aligned */ | |
ebc82efa NR |
785 | return atmel_sha_update_dma_slow(dd); |
786 | ||
787 | length = min(ctx->total, sg->length); | |
788 | ||
789 | if (sg_is_last(sg)) { | |
790 | if (!(ctx->flags & SHA_FLAGS_FINUP)) { | |
d4905b38 NR |
791 | /* not last sg must be ctx->block_size aligned */ |
792 | tail = length & (ctx->block_size - 1); | |
ebc82efa | 793 | length -= tail; |
ebc82efa NR |
794 | } |
795 | } | |
796 | ||
797 | ctx->total -= length; | |
798 | ctx->offset = length; /* offset where to start slow */ | |
799 | ||
800 | final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; | |
801 | ||
802 | /* Add padding */ | |
803 | if (final) { | |
d4905b38 | 804 | tail = length & (ctx->block_size - 1); |
ebc82efa NR |
805 | length -= tail; |
806 | ctx->total += tail; | |
807 | ctx->offset = length; /* offset where to start slow */ | |
808 | ||
809 | sg = ctx->sg; | |
810 | atmel_sha_append_sg(ctx); | |
811 | ||
812 | atmel_sha_fill_padding(ctx, length); | |
813 | ||
814 | ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, | |
d4905b38 | 815 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
ebc82efa | 816 | if (dma_mapping_error(dd->dev, ctx->dma_addr)) { |
4c147bcf | 817 | dev_err(dd->dev, "dma %zu bytes error\n", |
d4905b38 | 818 | ctx->buflen + ctx->block_size); |
dd3f9f40 | 819 | return atmel_sha_complete(dd, -EINVAL); |
ebc82efa NR |
820 | } |
821 | ||
822 | if (length == 0) { | |
823 | ctx->flags &= ~SHA_FLAGS_SG; | |
824 | count = ctx->bufcnt; | |
825 | ctx->bufcnt = 0; | |
d4905b38 | 826 | return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, |
ebc82efa NR |
827 | 0, final); |
828 | } else { | |
829 | ctx->sg = sg; | |
830 | if (!dma_map_sg(dd->dev, ctx->sg, 1, | |
831 | DMA_TO_DEVICE)) { | |
832 | dev_err(dd->dev, "dma_map_sg error\n"); | |
dd3f9f40 | 833 | return atmel_sha_complete(dd, -EINVAL); |
ebc82efa NR |
834 | } |
835 | ||
836 | ctx->flags |= SHA_FLAGS_SG; | |
837 | ||
838 | count = ctx->bufcnt; | |
839 | ctx->bufcnt = 0; | |
d4905b38 | 840 | return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), |
ebc82efa NR |
841 | length, ctx->dma_addr, count, final); |
842 | } | |
843 | } | |
844 | ||
845 | if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { | |
846 | dev_err(dd->dev, "dma_map_sg error\n"); | |
dd3f9f40 | 847 | return atmel_sha_complete(dd, -EINVAL); |
ebc82efa NR |
848 | } |
849 | ||
850 | ctx->flags |= SHA_FLAGS_SG; | |
851 | ||
852 | /* next call does not fail... so no unmap in the case of error */ | |
d4905b38 | 853 | return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, |
ebc82efa NR |
854 | 0, final); |
855 | } | |
856 | ||
857 | static int atmel_sha_update_dma_stop(struct atmel_sha_dev *dd) | |
858 | { | |
859 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); | |
860 | ||
861 | if (ctx->flags & SHA_FLAGS_SG) { | |
862 | dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); | |
863 | if (ctx->sg->length == ctx->offset) { | |
864 | ctx->sg = sg_next(ctx->sg); | |
865 | if (ctx->sg) | |
866 | ctx->offset = 0; | |
867 | } | |
d4905b38 | 868 | if (ctx->flags & SHA_FLAGS_PAD) { |
ebc82efa | 869 | dma_unmap_single(dd->dev, ctx->dma_addr, |
d4905b38 NR |
870 | ctx->buflen + ctx->block_size, DMA_TO_DEVICE); |
871 | } | |
ebc82efa NR |
872 | } else { |
873 | dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + | |
d4905b38 | 874 | ctx->block_size, DMA_TO_DEVICE); |
ebc82efa NR |
875 | } |
876 | ||
877 | return 0; | |
878 | } | |
879 | ||
880 | static int atmel_sha_update_req(struct atmel_sha_dev *dd) | |
881 | { | |
882 | struct ahash_request *req = dd->req; | |
883 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
884 | int err; | |
885 | ||
d4905b38 NR |
886 | dev_dbg(dd->dev, "update_req: total: %u, digcnt: 0x%llx 0x%llx\n", |
887 | ctx->total, ctx->digcnt[1], ctx->digcnt[0]); | |
ebc82efa NR |
888 | |
889 | if (ctx->flags & SHA_FLAGS_CPU) | |
890 | err = atmel_sha_update_cpu(dd); | |
891 | else | |
892 | err = atmel_sha_update_dma_start(dd); | |
893 | ||
894 | /* wait for dma completion before can take more data */ | |
d4905b38 NR |
895 | dev_dbg(dd->dev, "update: err: %d, digcnt: 0x%llx 0%llx\n", |
896 | err, ctx->digcnt[1], ctx->digcnt[0]); | |
ebc82efa NR |
897 | |
898 | return err; | |
899 | } | |
900 | ||
901 | static int atmel_sha_final_req(struct atmel_sha_dev *dd) | |
902 | { | |
903 | struct ahash_request *req = dd->req; | |
904 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
905 | int err = 0; | |
906 | int count; | |
907 | ||
908 | if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { | |
909 | atmel_sha_fill_padding(ctx, 0); | |
910 | count = ctx->bufcnt; | |
911 | ctx->bufcnt = 0; | |
912 | err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); | |
913 | } | |
914 | /* faster to handle last block with cpu */ | |
915 | else { | |
916 | atmel_sha_fill_padding(ctx, 0); | |
917 | count = ctx->bufcnt; | |
918 | ctx->bufcnt = 0; | |
919 | err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); | |
920 | } | |
921 | ||
922 | dev_dbg(dd->dev, "final_req: err: %d\n", err); | |
923 | ||
924 | return err; | |
925 | } | |
926 | ||
927 | static void atmel_sha_copy_hash(struct ahash_request *req) | |
928 | { | |
929 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
930 | u32 *hash = (u32 *)ctx->digest; | |
7cee3508 | 931 | unsigned int i, hashsize; |
ebc82efa | 932 | |
7cee3508 CP |
933 | switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { |
934 | case SHA_FLAGS_SHA1: | |
935 | hashsize = SHA1_DIGEST_SIZE; | |
936 | break; | |
937 | ||
938 | case SHA_FLAGS_SHA224: | |
939 | case SHA_FLAGS_SHA256: | |
940 | hashsize = SHA256_DIGEST_SIZE; | |
941 | break; | |
942 | ||
943 | case SHA_FLAGS_SHA384: | |
944 | case SHA_FLAGS_SHA512: | |
945 | hashsize = SHA512_DIGEST_SIZE; | |
946 | break; | |
947 | ||
948 | default: | |
949 | /* Should not happen... */ | |
950 | return; | |
951 | } | |
952 | ||
953 | for (i = 0; i < hashsize / sizeof(u32); ++i) | |
954 | hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); | |
955 | ctx->flags |= SHA_FLAGS_RESTORE; | |
ebc82efa NR |
956 | } |
957 | ||
958 | static void atmel_sha_copy_ready_hash(struct ahash_request *req) | |
959 | { | |
960 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
961 | ||
962 | if (!req->result) | |
963 | return; | |
964 | ||
f07cebad CP |
965 | switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { |
966 | default: | |
967 | case SHA_FLAGS_SHA1: | |
ebc82efa | 968 | memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); |
f07cebad CP |
969 | break; |
970 | ||
971 | case SHA_FLAGS_SHA224: | |
d4905b38 | 972 | memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); |
f07cebad CP |
973 | break; |
974 | ||
975 | case SHA_FLAGS_SHA256: | |
ebc82efa | 976 | memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); |
f07cebad CP |
977 | break; |
978 | ||
979 | case SHA_FLAGS_SHA384: | |
d4905b38 | 980 | memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); |
f07cebad CP |
981 | break; |
982 | ||
983 | case SHA_FLAGS_SHA512: | |
d4905b38 | 984 | memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); |
f07cebad CP |
985 | break; |
986 | } | |
ebc82efa NR |
987 | } |
988 | ||
989 | static int atmel_sha_finish(struct ahash_request *req) | |
990 | { | |
991 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
992 | struct atmel_sha_dev *dd = ctx->dd; | |
ebc82efa | 993 | |
d4905b38 | 994 | if (ctx->digcnt[0] || ctx->digcnt[1]) |
ebc82efa NR |
995 | atmel_sha_copy_ready_hash(req); |
996 | ||
4c147bcf | 997 | dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %zd\n", ctx->digcnt[1], |
d4905b38 | 998 | ctx->digcnt[0], ctx->bufcnt); |
ebc82efa | 999 | |
871b88a8 | 1000 | return 0; |
ebc82efa NR |
1001 | } |
1002 | ||
1003 | static void atmel_sha_finish_req(struct ahash_request *req, int err) | |
1004 | { | |
1005 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1006 | struct atmel_sha_dev *dd = ctx->dd; | |
1007 | ||
1008 | if (!err) { | |
1009 | atmel_sha_copy_hash(req); | |
1010 | if (SHA_FLAGS_FINAL & dd->flags) | |
1011 | err = atmel_sha_finish(req); | |
1012 | } else { | |
1013 | ctx->flags |= SHA_FLAGS_ERROR; | |
1014 | } | |
1015 | ||
1016 | /* atomic operation is not needed here */ | |
a29af939 | 1017 | (void)atmel_sha_complete(dd, err); |
ebc82efa NR |
1018 | } |
1019 | ||
1020 | static int atmel_sha_hw_init(struct atmel_sha_dev *dd) | |
1021 | { | |
9d83d299 LC |
1022 | int err; |
1023 | ||
c033042a | 1024 | err = clk_enable(dd->iclk); |
9d83d299 LC |
1025 | if (err) |
1026 | return err; | |
ebc82efa | 1027 | |
d4905b38 | 1028 | if (!(SHA_FLAGS_INIT & dd->flags)) { |
ebc82efa | 1029 | atmel_sha_write(dd, SHA_CR, SHA_CR_SWRST); |
ebc82efa NR |
1030 | dd->flags |= SHA_FLAGS_INIT; |
1031 | dd->err = 0; | |
1032 | } | |
1033 | ||
1034 | return 0; | |
1035 | } | |
1036 | ||
d4905b38 NR |
1037 | static inline unsigned int atmel_sha_get_version(struct atmel_sha_dev *dd) |
1038 | { | |
1039 | return atmel_sha_read(dd, SHA_HW_VERSION) & 0x00000fff; | |
1040 | } | |
1041 | ||
1042 | static void atmel_sha_hw_version_init(struct atmel_sha_dev *dd) | |
1043 | { | |
1044 | atmel_sha_hw_init(dd); | |
1045 | ||
1046 | dd->hw_version = atmel_sha_get_version(dd); | |
1047 | ||
1048 | dev_info(dd->dev, | |
1049 | "version: 0x%x\n", dd->hw_version); | |
1050 | ||
c033042a | 1051 | clk_disable(dd->iclk); |
d4905b38 NR |
1052 | } |
1053 | ||
ebc82efa NR |
1054 | static int atmel_sha_handle_queue(struct atmel_sha_dev *dd, |
1055 | struct ahash_request *req) | |
1056 | { | |
1057 | struct crypto_async_request *async_req, *backlog; | |
a29af939 | 1058 | struct atmel_sha_ctx *ctx; |
ebc82efa | 1059 | unsigned long flags; |
a29af939 | 1060 | bool start_async; |
ebc82efa NR |
1061 | int err = 0, ret = 0; |
1062 | ||
1063 | spin_lock_irqsave(&dd->lock, flags); | |
1064 | if (req) | |
1065 | ret = ahash_enqueue_request(&dd->queue, req); | |
1066 | ||
1067 | if (SHA_FLAGS_BUSY & dd->flags) { | |
1068 | spin_unlock_irqrestore(&dd->lock, flags); | |
1069 | return ret; | |
1070 | } | |
1071 | ||
1072 | backlog = crypto_get_backlog(&dd->queue); | |
1073 | async_req = crypto_dequeue_request(&dd->queue); | |
1074 | if (async_req) | |
1075 | dd->flags |= SHA_FLAGS_BUSY; | |
1076 | ||
1077 | spin_unlock_irqrestore(&dd->lock, flags); | |
1078 | ||
1079 | if (!async_req) | |
1080 | return ret; | |
1081 | ||
1082 | if (backlog) | |
1083 | backlog->complete(backlog, -EINPROGRESS); | |
1084 | ||
a29af939 CP |
1085 | ctx = crypto_tfm_ctx(async_req->tfm); |
1086 | ||
1087 | dd->req = ahash_request_cast(async_req); | |
1088 | start_async = (dd->req != req); | |
1089 | dd->is_async = start_async; | |
89a82ef8 | 1090 | dd->force_complete = false; |
a29af939 CP |
1091 | |
1092 | /* WARNING: ctx->start() MAY change dd->is_async. */ | |
1093 | err = ctx->start(dd); | |
1094 | return (start_async) ? ret : err; | |
1095 | } | |
1096 | ||
b5ce82a7 CP |
1097 | static int atmel_sha_done(struct atmel_sha_dev *dd); |
1098 | ||
a29af939 CP |
1099 | static int atmel_sha_start(struct atmel_sha_dev *dd) |
1100 | { | |
1101 | struct ahash_request *req = dd->req; | |
1102 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1103 | int err; | |
ebc82efa NR |
1104 | |
1105 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", | |
1106 | ctx->op, req->nbytes); | |
1107 | ||
1108 | err = atmel_sha_hw_init(dd); | |
ebc82efa | 1109 | if (err) |
19998acb CP |
1110 | return atmel_sha_complete(dd, err); |
1111 | ||
1112 | /* | |
1113 | * atmel_sha_update_req() and atmel_sha_final_req() can return either: | |
1114 | * -EINPROGRESS: the hardware is busy and the SHA driver will resume | |
1115 | * its job later in the done_task. | |
1116 | * This is the main path. | |
1117 | * | |
1118 | * 0: the SHA driver can continue its job then release the hardware | |
1119 | * later, if needed, with atmel_sha_finish_req(). | |
1120 | * This is the alternate path. | |
1121 | * | |
1122 | * < 0: an error has occurred so atmel_sha_complete(dd, err) has already | |
1123 | * been called, hence the hardware has been released. | |
1124 | * The SHA driver must stop its job without calling | |
1125 | * atmel_sha_finish_req(), otherwise atmel_sha_complete() would be | |
1126 | * called a second time. | |
1127 | * | |
1128 | * Please note that currently, atmel_sha_final_req() never returns 0. | |
1129 | */ | |
ebc82efa | 1130 | |
b5ce82a7 | 1131 | dd->resume = atmel_sha_done; |
ebc82efa NR |
1132 | if (ctx->op == SHA_OP_UPDATE) { |
1133 | err = atmel_sha_update_req(dd); | |
19998acb | 1134 | if (!err && (ctx->flags & SHA_FLAGS_FINUP)) |
ebc82efa NR |
1135 | /* no final() after finup() */ |
1136 | err = atmel_sha_final_req(dd); | |
ebc82efa NR |
1137 | } else if (ctx->op == SHA_OP_FINAL) { |
1138 | err = atmel_sha_final_req(dd); | |
1139 | } | |
1140 | ||
19998acb | 1141 | if (!err) |
ebc82efa NR |
1142 | /* done_task will not finish it, so do it here */ |
1143 | atmel_sha_finish_req(req, err); | |
1144 | ||
1145 | dev_dbg(dd->dev, "exit, err: %d\n", err); | |
1146 | ||
a29af939 | 1147 | return err; |
ebc82efa NR |
1148 | } |
1149 | ||
1150 | static int atmel_sha_enqueue(struct ahash_request *req, unsigned int op) | |
1151 | { | |
1152 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1153 | struct atmel_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); | |
1154 | struct atmel_sha_dev *dd = tctx->dd; | |
1155 | ||
1156 | ctx->op = op; | |
1157 | ||
1158 | return atmel_sha_handle_queue(dd, req); | |
1159 | } | |
1160 | ||
1161 | static int atmel_sha_update(struct ahash_request *req) | |
1162 | { | |
1163 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1164 | ||
1165 | if (!req->nbytes) | |
1166 | return 0; | |
1167 | ||
1168 | ctx->total = req->nbytes; | |
1169 | ctx->sg = req->src; | |
1170 | ctx->offset = 0; | |
1171 | ||
1172 | if (ctx->flags & SHA_FLAGS_FINUP) { | |
1173 | if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) | |
1174 | /* faster to use CPU for short transfers */ | |
1175 | ctx->flags |= SHA_FLAGS_CPU; | |
1176 | } else if (ctx->bufcnt + ctx->total < ctx->buflen) { | |
1177 | atmel_sha_append_sg(ctx); | |
1178 | return 0; | |
1179 | } | |
1180 | return atmel_sha_enqueue(req, SHA_OP_UPDATE); | |
1181 | } | |
1182 | ||
1183 | static int atmel_sha_final(struct ahash_request *req) | |
1184 | { | |
1185 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
ebc82efa NR |
1186 | |
1187 | ctx->flags |= SHA_FLAGS_FINUP; | |
1188 | ||
1189 | if (ctx->flags & SHA_FLAGS_ERROR) | |
1190 | return 0; /* uncompleted hash is not needed */ | |
1191 | ||
ad84112a | 1192 | if (ctx->flags & SHA_FLAGS_PAD) |
ebc82efa NR |
1193 | /* copy ready hash (+ finalize hmac) */ |
1194 | return atmel_sha_finish(req); | |
ebc82efa | 1195 | |
ad84112a | 1196 | return atmel_sha_enqueue(req, SHA_OP_FINAL); |
ebc82efa NR |
1197 | } |
1198 | ||
1199 | static int atmel_sha_finup(struct ahash_request *req) | |
1200 | { | |
1201 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1202 | int err1, err2; | |
1203 | ||
1204 | ctx->flags |= SHA_FLAGS_FINUP; | |
1205 | ||
1206 | err1 = atmel_sha_update(req); | |
1207 | if (err1 == -EINPROGRESS || err1 == -EBUSY) | |
1208 | return err1; | |
1209 | ||
1210 | /* | |
1211 | * final() has to be always called to cleanup resources | |
1212 | * even if udpate() failed, except EINPROGRESS | |
1213 | */ | |
1214 | err2 = atmel_sha_final(req); | |
1215 | ||
1216 | return err1 ?: err2; | |
1217 | } | |
1218 | ||
1219 | static int atmel_sha_digest(struct ahash_request *req) | |
1220 | { | |
1221 | return atmel_sha_init(req) ?: atmel_sha_finup(req); | |
1222 | } | |
1223 | ||
cc831d32 CP |
1224 | |
1225 | static int atmel_sha_export(struct ahash_request *req, void *out) | |
1226 | { | |
1227 | const struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
cc831d32 | 1228 | |
9c4274d9 | 1229 | memcpy(out, ctx, sizeof(*ctx)); |
cc831d32 CP |
1230 | return 0; |
1231 | } | |
1232 | ||
1233 | static int atmel_sha_import(struct ahash_request *req, const void *in) | |
1234 | { | |
1235 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
cc831d32 | 1236 | |
9c4274d9 | 1237 | memcpy(ctx, in, sizeof(*ctx)); |
cc831d32 CP |
1238 | return 0; |
1239 | } | |
1240 | ||
be95f0fa | 1241 | static int atmel_sha_cra_init(struct crypto_tfm *tfm) |
ebc82efa | 1242 | { |
a29af939 CP |
1243 | struct atmel_sha_ctx *ctx = crypto_tfm_ctx(tfm); |
1244 | ||
ebc82efa | 1245 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), |
9c4274d9 | 1246 | sizeof(struct atmel_sha_reqctx)); |
a29af939 | 1247 | ctx->start = atmel_sha_start; |
ebc82efa NR |
1248 | |
1249 | return 0; | |
1250 | } | |
1251 | ||
d4905b38 | 1252 | static struct ahash_alg sha_1_256_algs[] = { |
ebc82efa NR |
1253 | { |
1254 | .init = atmel_sha_init, | |
1255 | .update = atmel_sha_update, | |
1256 | .final = atmel_sha_final, | |
1257 | .finup = atmel_sha_finup, | |
1258 | .digest = atmel_sha_digest, | |
cc831d32 CP |
1259 | .export = atmel_sha_export, |
1260 | .import = atmel_sha_import, | |
ebc82efa NR |
1261 | .halg = { |
1262 | .digestsize = SHA1_DIGEST_SIZE, | |
9c4274d9 | 1263 | .statesize = sizeof(struct atmel_sha_reqctx), |
ebc82efa NR |
1264 | .base = { |
1265 | .cra_name = "sha1", | |
1266 | .cra_driver_name = "atmel-sha1", | |
1267 | .cra_priority = 100, | |
be95f0fa | 1268 | .cra_flags = CRYPTO_ALG_ASYNC, |
ebc82efa NR |
1269 | .cra_blocksize = SHA1_BLOCK_SIZE, |
1270 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1271 | .cra_alignmask = 0, | |
1272 | .cra_module = THIS_MODULE, | |
1273 | .cra_init = atmel_sha_cra_init, | |
ebc82efa NR |
1274 | } |
1275 | } | |
1276 | }, | |
1277 | { | |
1278 | .init = atmel_sha_init, | |
1279 | .update = atmel_sha_update, | |
1280 | .final = atmel_sha_final, | |
1281 | .finup = atmel_sha_finup, | |
1282 | .digest = atmel_sha_digest, | |
cc831d32 CP |
1283 | .export = atmel_sha_export, |
1284 | .import = atmel_sha_import, | |
ebc82efa NR |
1285 | .halg = { |
1286 | .digestsize = SHA256_DIGEST_SIZE, | |
9c4274d9 | 1287 | .statesize = sizeof(struct atmel_sha_reqctx), |
ebc82efa NR |
1288 | .base = { |
1289 | .cra_name = "sha256", | |
1290 | .cra_driver_name = "atmel-sha256", | |
1291 | .cra_priority = 100, | |
be95f0fa | 1292 | .cra_flags = CRYPTO_ALG_ASYNC, |
ebc82efa NR |
1293 | .cra_blocksize = SHA256_BLOCK_SIZE, |
1294 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1295 | .cra_alignmask = 0, | |
1296 | .cra_module = THIS_MODULE, | |
1297 | .cra_init = atmel_sha_cra_init, | |
ebc82efa NR |
1298 | } |
1299 | } | |
1300 | }, | |
1301 | }; | |
1302 | ||
d4905b38 NR |
1303 | static struct ahash_alg sha_224_alg = { |
1304 | .init = atmel_sha_init, | |
1305 | .update = atmel_sha_update, | |
1306 | .final = atmel_sha_final, | |
1307 | .finup = atmel_sha_finup, | |
1308 | .digest = atmel_sha_digest, | |
cc831d32 CP |
1309 | .export = atmel_sha_export, |
1310 | .import = atmel_sha_import, | |
d4905b38 NR |
1311 | .halg = { |
1312 | .digestsize = SHA224_DIGEST_SIZE, | |
9c4274d9 | 1313 | .statesize = sizeof(struct atmel_sha_reqctx), |
d4905b38 NR |
1314 | .base = { |
1315 | .cra_name = "sha224", | |
1316 | .cra_driver_name = "atmel-sha224", | |
1317 | .cra_priority = 100, | |
be95f0fa | 1318 | .cra_flags = CRYPTO_ALG_ASYNC, |
d4905b38 NR |
1319 | .cra_blocksize = SHA224_BLOCK_SIZE, |
1320 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1321 | .cra_alignmask = 0, | |
1322 | .cra_module = THIS_MODULE, | |
1323 | .cra_init = atmel_sha_cra_init, | |
d4905b38 NR |
1324 | } |
1325 | } | |
1326 | }; | |
1327 | ||
1328 | static struct ahash_alg sha_384_512_algs[] = { | |
1329 | { | |
1330 | .init = atmel_sha_init, | |
1331 | .update = atmel_sha_update, | |
1332 | .final = atmel_sha_final, | |
1333 | .finup = atmel_sha_finup, | |
1334 | .digest = atmel_sha_digest, | |
cc831d32 CP |
1335 | .export = atmel_sha_export, |
1336 | .import = atmel_sha_import, | |
d4905b38 NR |
1337 | .halg = { |
1338 | .digestsize = SHA384_DIGEST_SIZE, | |
9c4274d9 | 1339 | .statesize = sizeof(struct atmel_sha_reqctx), |
d4905b38 NR |
1340 | .base = { |
1341 | .cra_name = "sha384", | |
1342 | .cra_driver_name = "atmel-sha384", | |
1343 | .cra_priority = 100, | |
be95f0fa | 1344 | .cra_flags = CRYPTO_ALG_ASYNC, |
d4905b38 NR |
1345 | .cra_blocksize = SHA384_BLOCK_SIZE, |
1346 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1347 | .cra_alignmask = 0x3, | |
1348 | .cra_module = THIS_MODULE, | |
1349 | .cra_init = atmel_sha_cra_init, | |
d4905b38 NR |
1350 | } |
1351 | } | |
1352 | }, | |
1353 | { | |
1354 | .init = atmel_sha_init, | |
1355 | .update = atmel_sha_update, | |
1356 | .final = atmel_sha_final, | |
1357 | .finup = atmel_sha_finup, | |
1358 | .digest = atmel_sha_digest, | |
cc831d32 CP |
1359 | .export = atmel_sha_export, |
1360 | .import = atmel_sha_import, | |
d4905b38 NR |
1361 | .halg = { |
1362 | .digestsize = SHA512_DIGEST_SIZE, | |
9c4274d9 | 1363 | .statesize = sizeof(struct atmel_sha_reqctx), |
d4905b38 NR |
1364 | .base = { |
1365 | .cra_name = "sha512", | |
1366 | .cra_driver_name = "atmel-sha512", | |
1367 | .cra_priority = 100, | |
be95f0fa | 1368 | .cra_flags = CRYPTO_ALG_ASYNC, |
d4905b38 NR |
1369 | .cra_blocksize = SHA512_BLOCK_SIZE, |
1370 | .cra_ctxsize = sizeof(struct atmel_sha_ctx), | |
1371 | .cra_alignmask = 0x3, | |
1372 | .cra_module = THIS_MODULE, | |
1373 | .cra_init = atmel_sha_cra_init, | |
d4905b38 NR |
1374 | } |
1375 | } | |
1376 | }, | |
1377 | }; | |
1378 | ||
f56809c3 CP |
1379 | static void atmel_sha_queue_task(unsigned long data) |
1380 | { | |
1381 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; | |
1382 | ||
1383 | atmel_sha_handle_queue(dd, NULL); | |
1384 | } | |
1385 | ||
b5ce82a7 | 1386 | static int atmel_sha_done(struct atmel_sha_dev *dd) |
ebc82efa | 1387 | { |
ebc82efa NR |
1388 | int err = 0; |
1389 | ||
ebc82efa NR |
1390 | if (SHA_FLAGS_CPU & dd->flags) { |
1391 | if (SHA_FLAGS_OUTPUT_READY & dd->flags) { | |
1392 | dd->flags &= ~SHA_FLAGS_OUTPUT_READY; | |
1393 | goto finish; | |
1394 | } | |
1395 | } else if (SHA_FLAGS_DMA_READY & dd->flags) { | |
1396 | if (SHA_FLAGS_DMA_ACTIVE & dd->flags) { | |
1397 | dd->flags &= ~SHA_FLAGS_DMA_ACTIVE; | |
1398 | atmel_sha_update_dma_stop(dd); | |
1399 | if (dd->err) { | |
1400 | err = dd->err; | |
1401 | goto finish; | |
1402 | } | |
1403 | } | |
1404 | if (SHA_FLAGS_OUTPUT_READY & dd->flags) { | |
1405 | /* hash or semi-hash ready */ | |
1406 | dd->flags &= ~(SHA_FLAGS_DMA_READY | | |
1407 | SHA_FLAGS_OUTPUT_READY); | |
1408 | err = atmel_sha_update_dma_start(dd); | |
1409 | if (err != -EINPROGRESS) | |
1410 | goto finish; | |
1411 | } | |
1412 | } | |
b5ce82a7 | 1413 | return err; |
ebc82efa NR |
1414 | |
1415 | finish: | |
1416 | /* finish curent request */ | |
1417 | atmel_sha_finish_req(dd->req, err); | |
b5ce82a7 CP |
1418 | |
1419 | return err; | |
1420 | } | |
1421 | ||
1422 | static void atmel_sha_done_task(unsigned long data) | |
1423 | { | |
1424 | struct atmel_sha_dev *dd = (struct atmel_sha_dev *)data; | |
1425 | ||
1426 | dd->is_async = true; | |
1427 | (void)dd->resume(dd); | |
ebc82efa NR |
1428 | } |
1429 | ||
1430 | static irqreturn_t atmel_sha_irq(int irq, void *dev_id) | |
1431 | { | |
1432 | struct atmel_sha_dev *sha_dd = dev_id; | |
1433 | u32 reg; | |
1434 | ||
1435 | reg = atmel_sha_read(sha_dd, SHA_ISR); | |
1436 | if (reg & atmel_sha_read(sha_dd, SHA_IMR)) { | |
1437 | atmel_sha_write(sha_dd, SHA_IDR, reg); | |
1438 | if (SHA_FLAGS_BUSY & sha_dd->flags) { | |
1439 | sha_dd->flags |= SHA_FLAGS_OUTPUT_READY; | |
1440 | if (!(SHA_FLAGS_CPU & sha_dd->flags)) | |
1441 | sha_dd->flags |= SHA_FLAGS_DMA_READY; | |
1442 | tasklet_schedule(&sha_dd->done_task); | |
1443 | } else { | |
1444 | dev_warn(sha_dd->dev, "SHA interrupt when no active requests.\n"); | |
1445 | } | |
1446 | return IRQ_HANDLED; | |
1447 | } | |
1448 | ||
1449 | return IRQ_NONE; | |
1450 | } | |
1451 | ||
eec12f66 | 1452 | |
69303cf0 CP |
1453 | /* DMA transfer functions */ |
1454 | ||
1455 | static bool atmel_sha_dma_check_aligned(struct atmel_sha_dev *dd, | |
1456 | struct scatterlist *sg, | |
1457 | size_t len) | |
1458 | { | |
1459 | struct atmel_sha_dma *dma = &dd->dma_lch_in; | |
1460 | struct ahash_request *req = dd->req; | |
1461 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1462 | size_t bs = ctx->block_size; | |
1463 | int nents; | |
1464 | ||
1465 | for (nents = 0; sg; sg = sg_next(sg), ++nents) { | |
1466 | if (!IS_ALIGNED(sg->offset, sizeof(u32))) | |
1467 | return false; | |
1468 | ||
1469 | /* | |
1470 | * This is the last sg, the only one that is allowed to | |
1471 | * have an unaligned length. | |
1472 | */ | |
1473 | if (len <= sg->length) { | |
1474 | dma->nents = nents + 1; | |
1475 | dma->last_sg_length = sg->length; | |
1476 | sg->length = ALIGN(len, sizeof(u32)); | |
1477 | return true; | |
1478 | } | |
1479 | ||
1480 | /* All other sg lengths MUST be aligned to the block size. */ | |
1481 | if (!IS_ALIGNED(sg->length, bs)) | |
1482 | return false; | |
1483 | ||
1484 | len -= sg->length; | |
1485 | } | |
1486 | ||
1487 | return false; | |
1488 | } | |
1489 | ||
1490 | static void atmel_sha_dma_callback2(void *data) | |
1491 | { | |
1492 | struct atmel_sha_dev *dd = data; | |
1493 | struct atmel_sha_dma *dma = &dd->dma_lch_in; | |
1494 | struct scatterlist *sg; | |
1495 | int nents; | |
1496 | ||
1497 | dmaengine_terminate_all(dma->chan); | |
1498 | dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); | |
1499 | ||
1500 | sg = dma->sg; | |
1501 | for (nents = 0; nents < dma->nents - 1; ++nents) | |
1502 | sg = sg_next(sg); | |
1503 | sg->length = dma->last_sg_length; | |
1504 | ||
1505 | dd->is_async = true; | |
1506 | (void)atmel_sha_wait_for_data_ready(dd, dd->resume); | |
1507 | } | |
1508 | ||
1509 | static int atmel_sha_dma_start(struct atmel_sha_dev *dd, | |
1510 | struct scatterlist *src, | |
1511 | size_t len, | |
1512 | atmel_sha_fn_t resume) | |
1513 | { | |
1514 | struct atmel_sha_dma *dma = &dd->dma_lch_in; | |
1515 | struct dma_slave_config *config = &dma->dma_conf; | |
1516 | struct dma_chan *chan = dma->chan; | |
1517 | struct dma_async_tx_descriptor *desc; | |
1518 | dma_cookie_t cookie; | |
1519 | unsigned int sg_len; | |
1520 | int err; | |
1521 | ||
1522 | dd->resume = resume; | |
1523 | ||
1524 | /* | |
1525 | * dma->nents has already been initialized by | |
1526 | * atmel_sha_dma_check_aligned(). | |
1527 | */ | |
1528 | dma->sg = src; | |
1529 | sg_len = dma_map_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); | |
1530 | if (!sg_len) { | |
1531 | err = -ENOMEM; | |
1532 | goto exit; | |
1533 | } | |
1534 | ||
1535 | config->src_maxburst = 16; | |
1536 | config->dst_maxburst = 16; | |
1537 | err = dmaengine_slave_config(chan, config); | |
1538 | if (err) | |
1539 | goto unmap_sg; | |
1540 | ||
1541 | desc = dmaengine_prep_slave_sg(chan, dma->sg, sg_len, DMA_MEM_TO_DEV, | |
1542 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); | |
1543 | if (!desc) { | |
1544 | err = -ENOMEM; | |
1545 | goto unmap_sg; | |
1546 | } | |
1547 | ||
1548 | desc->callback = atmel_sha_dma_callback2; | |
1549 | desc->callback_param = dd; | |
1550 | cookie = dmaengine_submit(desc); | |
1551 | err = dma_submit_error(cookie); | |
1552 | if (err) | |
1553 | goto unmap_sg; | |
1554 | ||
1555 | dma_async_issue_pending(chan); | |
1556 | ||
1557 | return -EINPROGRESS; | |
1558 | ||
1559 | unmap_sg: | |
1560 | dma_unmap_sg(dd->dev, dma->sg, dma->nents, DMA_TO_DEVICE); | |
1561 | exit: | |
1562 | return atmel_sha_complete(dd, err); | |
1563 | } | |
1564 | ||
1565 | ||
eec12f66 CP |
1566 | /* CPU transfer functions */ |
1567 | ||
1568 | static int atmel_sha_cpu_transfer(struct atmel_sha_dev *dd) | |
1569 | { | |
1570 | struct ahash_request *req = dd->req; | |
1571 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1572 | const u32 *words = (const u32 *)ctx->buffer; | |
1573 | size_t i, num_words; | |
1574 | u32 isr, din, din_inc; | |
1575 | ||
1576 | din_inc = (ctx->flags & SHA_FLAGS_IDATAR0) ? 0 : 1; | |
1577 | for (;;) { | |
1578 | /* Write data into the Input Data Registers. */ | |
1579 | num_words = DIV_ROUND_UP(ctx->bufcnt, sizeof(u32)); | |
1580 | for (i = 0, din = 0; i < num_words; ++i, din += din_inc) | |
1581 | atmel_sha_write(dd, SHA_REG_DIN(din), words[i]); | |
1582 | ||
1583 | ctx->offset += ctx->bufcnt; | |
1584 | ctx->total -= ctx->bufcnt; | |
1585 | ||
1586 | if (!ctx->total) | |
1587 | break; | |
1588 | ||
1589 | /* | |
1590 | * Prepare next block: | |
1591 | * Fill ctx->buffer now with the next data to be written into | |
1592 | * IDATARx: it gives time for the SHA hardware to process | |
1593 | * the current data so the SHA_INT_DATARDY flag might be set | |
1594 | * in SHA_ISR when polling this register at the beginning of | |
1595 | * the next loop. | |
1596 | */ | |
1597 | ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); | |
1598 | scatterwalk_map_and_copy(ctx->buffer, ctx->sg, | |
1599 | ctx->offset, ctx->bufcnt, 0); | |
1600 | ||
1601 | /* Wait for hardware to be ready again. */ | |
1602 | isr = atmel_sha_read(dd, SHA_ISR); | |
1603 | if (!(isr & SHA_INT_DATARDY)) { | |
1604 | /* Not ready yet. */ | |
1605 | dd->resume = atmel_sha_cpu_transfer; | |
1606 | atmel_sha_write(dd, SHA_IER, SHA_INT_DATARDY); | |
1607 | return -EINPROGRESS; | |
1608 | } | |
1609 | } | |
1610 | ||
1611 | if (unlikely(!(ctx->flags & SHA_FLAGS_WAIT_DATARDY))) | |
1612 | return dd->cpu_transfer_complete(dd); | |
1613 | ||
1614 | return atmel_sha_wait_for_data_ready(dd, dd->cpu_transfer_complete); | |
1615 | } | |
1616 | ||
1617 | static int atmel_sha_cpu_start(struct atmel_sha_dev *dd, | |
1618 | struct scatterlist *sg, | |
1619 | unsigned int len, | |
1620 | bool idatar0_only, | |
1621 | bool wait_data_ready, | |
1622 | atmel_sha_fn_t resume) | |
1623 | { | |
1624 | struct ahash_request *req = dd->req; | |
1625 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1626 | ||
1627 | if (!len) | |
1628 | return resume(dd); | |
1629 | ||
1630 | ctx->flags &= ~(SHA_FLAGS_IDATAR0 | SHA_FLAGS_WAIT_DATARDY); | |
1631 | ||
1632 | if (idatar0_only) | |
1633 | ctx->flags |= SHA_FLAGS_IDATAR0; | |
1634 | ||
1635 | if (wait_data_ready) | |
1636 | ctx->flags |= SHA_FLAGS_WAIT_DATARDY; | |
1637 | ||
1638 | ctx->sg = sg; | |
1639 | ctx->total = len; | |
1640 | ctx->offset = 0; | |
1641 | ||
1642 | /* Prepare the first block to be written. */ | |
1643 | ctx->bufcnt = min_t(size_t, ctx->block_size, ctx->total); | |
1644 | scatterwalk_map_and_copy(ctx->buffer, ctx->sg, | |
1645 | ctx->offset, ctx->bufcnt, 0); | |
1646 | ||
1647 | dd->cpu_transfer_complete = resume; | |
1648 | return atmel_sha_cpu_transfer(dd); | |
1649 | } | |
1650 | ||
81d8750b CP |
1651 | static int atmel_sha_cpu_hash(struct atmel_sha_dev *dd, |
1652 | const void *data, unsigned int datalen, | |
1653 | bool auto_padding, | |
1654 | atmel_sha_fn_t resume) | |
1655 | { | |
1656 | struct ahash_request *req = dd->req; | |
1657 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1658 | u32 msglen = (auto_padding) ? datalen : 0; | |
1659 | u32 mr = SHA_MR_MODE_AUTO; | |
1660 | ||
1661 | if (!(IS_ALIGNED(datalen, ctx->block_size) || auto_padding)) | |
1662 | return atmel_sha_complete(dd, -EINVAL); | |
1663 | ||
1664 | mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); | |
1665 | atmel_sha_write(dd, SHA_MR, mr); | |
1666 | atmel_sha_write(dd, SHA_MSR, msglen); | |
1667 | atmel_sha_write(dd, SHA_BCR, msglen); | |
1668 | atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); | |
1669 | ||
1670 | sg_init_one(&dd->tmp, data, datalen); | |
1671 | return atmel_sha_cpu_start(dd, &dd->tmp, datalen, false, true, resume); | |
1672 | } | |
1673 | ||
1674 | ||
1675 | /* hmac functions */ | |
1676 | ||
1677 | struct atmel_sha_hmac_key { | |
1678 | bool valid; | |
1679 | unsigned int keylen; | |
1680 | u8 buffer[SHA512_BLOCK_SIZE]; | |
1681 | u8 *keydup; | |
1682 | }; | |
1683 | ||
1684 | static inline void atmel_sha_hmac_key_init(struct atmel_sha_hmac_key *hkey) | |
1685 | { | |
1686 | memset(hkey, 0, sizeof(*hkey)); | |
1687 | } | |
1688 | ||
1689 | static inline void atmel_sha_hmac_key_release(struct atmel_sha_hmac_key *hkey) | |
1690 | { | |
1691 | kfree(hkey->keydup); | |
1692 | memset(hkey, 0, sizeof(*hkey)); | |
1693 | } | |
1694 | ||
1695 | static inline int atmel_sha_hmac_key_set(struct atmel_sha_hmac_key *hkey, | |
1696 | const u8 *key, | |
1697 | unsigned int keylen) | |
1698 | { | |
1699 | atmel_sha_hmac_key_release(hkey); | |
1700 | ||
1701 | if (keylen > sizeof(hkey->buffer)) { | |
1702 | hkey->keydup = kmemdup(key, keylen, GFP_KERNEL); | |
1703 | if (!hkey->keydup) | |
1704 | return -ENOMEM; | |
1705 | ||
1706 | } else { | |
1707 | memcpy(hkey->buffer, key, keylen); | |
1708 | } | |
1709 | ||
1710 | hkey->valid = true; | |
1711 | hkey->keylen = keylen; | |
1712 | return 0; | |
1713 | } | |
1714 | ||
1715 | static inline bool atmel_sha_hmac_key_get(const struct atmel_sha_hmac_key *hkey, | |
1716 | const u8 **key, | |
1717 | unsigned int *keylen) | |
1718 | { | |
1719 | if (!hkey->valid) | |
1720 | return false; | |
1721 | ||
1722 | *keylen = hkey->keylen; | |
1723 | *key = (hkey->keydup) ? hkey->keydup : hkey->buffer; | |
1724 | return true; | |
1725 | } | |
1726 | ||
1727 | ||
1728 | struct atmel_sha_hmac_ctx { | |
1729 | struct atmel_sha_ctx base; | |
1730 | ||
1731 | struct atmel_sha_hmac_key hkey; | |
1732 | u32 ipad[SHA512_BLOCK_SIZE / sizeof(u32)]; | |
1733 | u32 opad[SHA512_BLOCK_SIZE / sizeof(u32)]; | |
1734 | atmel_sha_fn_t resume; | |
1735 | }; | |
1736 | ||
1737 | static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd, | |
1738 | atmel_sha_fn_t resume); | |
1739 | static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd, | |
1740 | const u8 *key, unsigned int keylen); | |
1741 | static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd); | |
1742 | static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd); | |
1743 | static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd); | |
1744 | static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd); | |
1745 | ||
1746 | static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd); | |
1747 | static int atmel_sha_hmac_final(struct atmel_sha_dev *dd); | |
1748 | static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd); | |
1749 | static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd); | |
1750 | ||
1751 | static int atmel_sha_hmac_setup(struct atmel_sha_dev *dd, | |
1752 | atmel_sha_fn_t resume) | |
1753 | { | |
1754 | struct ahash_request *req = dd->req; | |
1755 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1756 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
1757 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
1758 | unsigned int keylen; | |
1759 | const u8 *key; | |
1760 | size_t bs; | |
1761 | ||
1762 | hmac->resume = resume; | |
1763 | switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { | |
1764 | case SHA_FLAGS_SHA1: | |
1765 | ctx->block_size = SHA1_BLOCK_SIZE; | |
1766 | ctx->hash_size = SHA1_DIGEST_SIZE; | |
1767 | break; | |
1768 | ||
1769 | case SHA_FLAGS_SHA224: | |
1770 | ctx->block_size = SHA224_BLOCK_SIZE; | |
1771 | ctx->hash_size = SHA256_DIGEST_SIZE; | |
1772 | break; | |
1773 | ||
1774 | case SHA_FLAGS_SHA256: | |
1775 | ctx->block_size = SHA256_BLOCK_SIZE; | |
1776 | ctx->hash_size = SHA256_DIGEST_SIZE; | |
1777 | break; | |
1778 | ||
1779 | case SHA_FLAGS_SHA384: | |
1780 | ctx->block_size = SHA384_BLOCK_SIZE; | |
1781 | ctx->hash_size = SHA512_DIGEST_SIZE; | |
1782 | break; | |
1783 | ||
1784 | case SHA_FLAGS_SHA512: | |
1785 | ctx->block_size = SHA512_BLOCK_SIZE; | |
1786 | ctx->hash_size = SHA512_DIGEST_SIZE; | |
1787 | break; | |
1788 | ||
1789 | default: | |
1790 | return atmel_sha_complete(dd, -EINVAL); | |
1791 | } | |
1792 | bs = ctx->block_size; | |
1793 | ||
1794 | if (likely(!atmel_sha_hmac_key_get(&hmac->hkey, &key, &keylen))) | |
1795 | return resume(dd); | |
1796 | ||
1797 | /* Compute K' from K. */ | |
1798 | if (unlikely(keylen > bs)) | |
1799 | return atmel_sha_hmac_prehash_key(dd, key, keylen); | |
1800 | ||
1801 | /* Prepare ipad. */ | |
1802 | memcpy((u8 *)hmac->ipad, key, keylen); | |
1803 | memset((u8 *)hmac->ipad + keylen, 0, bs - keylen); | |
1804 | return atmel_sha_hmac_compute_ipad_hash(dd); | |
1805 | } | |
1806 | ||
1807 | static int atmel_sha_hmac_prehash_key(struct atmel_sha_dev *dd, | |
1808 | const u8 *key, unsigned int keylen) | |
1809 | { | |
1810 | return atmel_sha_cpu_hash(dd, key, keylen, true, | |
1811 | atmel_sha_hmac_prehash_key_done); | |
1812 | } | |
1813 | ||
1814 | static int atmel_sha_hmac_prehash_key_done(struct atmel_sha_dev *dd) | |
1815 | { | |
1816 | struct ahash_request *req = dd->req; | |
1817 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
1818 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
1819 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1820 | size_t ds = crypto_ahash_digestsize(tfm); | |
1821 | size_t bs = ctx->block_size; | |
1822 | size_t i, num_words = ds / sizeof(u32); | |
1823 | ||
1824 | /* Prepare ipad. */ | |
1825 | for (i = 0; i < num_words; ++i) | |
1826 | hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); | |
1827 | memset((u8 *)hmac->ipad + ds, 0, bs - ds); | |
1828 | return atmel_sha_hmac_compute_ipad_hash(dd); | |
1829 | } | |
1830 | ||
1831 | static int atmel_sha_hmac_compute_ipad_hash(struct atmel_sha_dev *dd) | |
1832 | { | |
1833 | struct ahash_request *req = dd->req; | |
1834 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
1835 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
1836 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1837 | size_t bs = ctx->block_size; | |
1838 | size_t i, num_words = bs / sizeof(u32); | |
1839 | ||
1840 | memcpy(hmac->opad, hmac->ipad, bs); | |
1841 | for (i = 0; i < num_words; ++i) { | |
1842 | hmac->ipad[i] ^= 0x36363636; | |
1843 | hmac->opad[i] ^= 0x5c5c5c5c; | |
1844 | } | |
1845 | ||
1846 | return atmel_sha_cpu_hash(dd, hmac->ipad, bs, false, | |
1847 | atmel_sha_hmac_compute_opad_hash); | |
1848 | } | |
1849 | ||
1850 | static int atmel_sha_hmac_compute_opad_hash(struct atmel_sha_dev *dd) | |
1851 | { | |
1852 | struct ahash_request *req = dd->req; | |
1853 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
1854 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
1855 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1856 | size_t bs = ctx->block_size; | |
1857 | size_t hs = ctx->hash_size; | |
1858 | size_t i, num_words = hs / sizeof(u32); | |
1859 | ||
1860 | for (i = 0; i < num_words; ++i) | |
1861 | hmac->ipad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); | |
1862 | return atmel_sha_cpu_hash(dd, hmac->opad, bs, false, | |
1863 | atmel_sha_hmac_setup_done); | |
1864 | } | |
1865 | ||
1866 | static int atmel_sha_hmac_setup_done(struct atmel_sha_dev *dd) | |
1867 | { | |
1868 | struct ahash_request *req = dd->req; | |
1869 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
1870 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
1871 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1872 | size_t hs = ctx->hash_size; | |
1873 | size_t i, num_words = hs / sizeof(u32); | |
1874 | ||
1875 | for (i = 0; i < num_words; ++i) | |
1876 | hmac->opad[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); | |
1877 | atmel_sha_hmac_key_release(&hmac->hkey); | |
1878 | return hmac->resume(dd); | |
1879 | } | |
1880 | ||
1881 | static int atmel_sha_hmac_start(struct atmel_sha_dev *dd) | |
1882 | { | |
1883 | struct ahash_request *req = dd->req; | |
1884 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1885 | int err; | |
1886 | ||
1887 | err = atmel_sha_hw_init(dd); | |
1888 | if (err) | |
1889 | return atmel_sha_complete(dd, err); | |
1890 | ||
1891 | switch (ctx->op) { | |
1892 | case SHA_OP_INIT: | |
1893 | err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_init_done); | |
1894 | break; | |
1895 | ||
1896 | case SHA_OP_UPDATE: | |
1897 | dd->resume = atmel_sha_done; | |
1898 | err = atmel_sha_update_req(dd); | |
1899 | break; | |
1900 | ||
1901 | case SHA_OP_FINAL: | |
1902 | dd->resume = atmel_sha_hmac_final; | |
1903 | err = atmel_sha_final_req(dd); | |
1904 | break; | |
1905 | ||
1906 | case SHA_OP_DIGEST: | |
1907 | err = atmel_sha_hmac_setup(dd, atmel_sha_hmac_digest2); | |
1908 | break; | |
1909 | ||
1910 | default: | |
1911 | return atmel_sha_complete(dd, -EINVAL); | |
1912 | } | |
1913 | ||
1914 | return err; | |
1915 | } | |
1916 | ||
1917 | static int atmel_sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, | |
1918 | unsigned int keylen) | |
1919 | { | |
1920 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
1921 | ||
1922 | if (atmel_sha_hmac_key_set(&hmac->hkey, key, keylen)) { | |
1923 | crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); | |
1924 | return -EINVAL; | |
1925 | } | |
1926 | ||
1927 | return 0; | |
1928 | } | |
1929 | ||
1930 | static int atmel_sha_hmac_init(struct ahash_request *req) | |
1931 | { | |
1932 | int err; | |
1933 | ||
1934 | err = atmel_sha_init(req); | |
1935 | if (err) | |
1936 | return err; | |
1937 | ||
1938 | return atmel_sha_enqueue(req, SHA_OP_INIT); | |
1939 | } | |
1940 | ||
1941 | static int atmel_sha_hmac_init_done(struct atmel_sha_dev *dd) | |
1942 | { | |
1943 | struct ahash_request *req = dd->req; | |
1944 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1945 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
1946 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
1947 | size_t bs = ctx->block_size; | |
1948 | size_t hs = ctx->hash_size; | |
1949 | ||
1950 | ctx->bufcnt = 0; | |
1951 | ctx->digcnt[0] = bs; | |
1952 | ctx->digcnt[1] = 0; | |
1953 | ctx->flags |= SHA_FLAGS_RESTORE; | |
1954 | memcpy(ctx->digest, hmac->ipad, hs); | |
1955 | return atmel_sha_complete(dd, 0); | |
1956 | } | |
1957 | ||
1958 | static int atmel_sha_hmac_final(struct atmel_sha_dev *dd) | |
1959 | { | |
1960 | struct ahash_request *req = dd->req; | |
1961 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
1962 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
1963 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
1964 | u32 *digest = (u32 *)ctx->digest; | |
1965 | size_t ds = crypto_ahash_digestsize(tfm); | |
1966 | size_t bs = ctx->block_size; | |
1967 | size_t hs = ctx->hash_size; | |
1968 | size_t i, num_words; | |
1969 | u32 mr; | |
1970 | ||
1971 | /* Save d = SHA((K' + ipad) | msg). */ | |
1972 | num_words = ds / sizeof(u32); | |
1973 | for (i = 0; i < num_words; ++i) | |
1974 | digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); | |
1975 | ||
1976 | /* Restore context to finish computing SHA((K' + opad) | d). */ | |
1977 | atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); | |
1978 | num_words = hs / sizeof(u32); | |
1979 | for (i = 0; i < num_words; ++i) | |
1980 | atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); | |
1981 | ||
1982 | mr = SHA_MR_MODE_AUTO | SHA_MR_UIHV; | |
1983 | mr |= (ctx->flags & SHA_FLAGS_ALGO_MASK); | |
1984 | atmel_sha_write(dd, SHA_MR, mr); | |
1985 | atmel_sha_write(dd, SHA_MSR, bs + ds); | |
1986 | atmel_sha_write(dd, SHA_BCR, ds); | |
1987 | atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); | |
1988 | ||
1989 | sg_init_one(&dd->tmp, digest, ds); | |
1990 | return atmel_sha_cpu_start(dd, &dd->tmp, ds, false, true, | |
1991 | atmel_sha_hmac_final_done); | |
1992 | } | |
1993 | ||
1994 | static int atmel_sha_hmac_final_done(struct atmel_sha_dev *dd) | |
1995 | { | |
1996 | /* | |
1997 | * req->result might not be sizeof(u32) aligned, so copy the | |
1998 | * digest into ctx->digest[] before memcpy() the data into | |
1999 | * req->result. | |
2000 | */ | |
2001 | atmel_sha_copy_hash(dd->req); | |
2002 | atmel_sha_copy_ready_hash(dd->req); | |
2003 | return atmel_sha_complete(dd, 0); | |
2004 | } | |
2005 | ||
2006 | static int atmel_sha_hmac_digest(struct ahash_request *req) | |
2007 | { | |
2008 | int err; | |
2009 | ||
2010 | err = atmel_sha_init(req); | |
2011 | if (err) | |
2012 | return err; | |
2013 | ||
2014 | return atmel_sha_enqueue(req, SHA_OP_DIGEST); | |
2015 | } | |
2016 | ||
2017 | static int atmel_sha_hmac_digest2(struct atmel_sha_dev *dd) | |
2018 | { | |
2019 | struct ahash_request *req = dd->req; | |
2020 | struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); | |
2021 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
2022 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
2023 | size_t hs = ctx->hash_size; | |
2024 | size_t i, num_words = hs / sizeof(u32); | |
2025 | bool use_dma = false; | |
2026 | u32 mr; | |
2027 | ||
2028 | /* Special case for empty message. */ | |
2029 | if (!req->nbytes) | |
2030 | return atmel_sha_complete(dd, -EINVAL); // TODO: | |
2031 | ||
2032 | /* Check DMA threshold and alignment. */ | |
2033 | if (req->nbytes > ATMEL_SHA_DMA_THRESHOLD && | |
2034 | atmel_sha_dma_check_aligned(dd, req->src, req->nbytes)) | |
2035 | use_dma = true; | |
2036 | ||
2037 | /* Write both initial hash values to compute a HMAC. */ | |
2038 | atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); | |
2039 | for (i = 0; i < num_words; ++i) | |
2040 | atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); | |
2041 | ||
2042 | atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV); | |
2043 | for (i = 0; i < num_words; ++i) | |
2044 | atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); | |
2045 | ||
2046 | /* Write the Mode, Message Size, Bytes Count then Control Registers. */ | |
2047 | mr = (SHA_MR_HMAC | SHA_MR_DUALBUFF); | |
2048 | mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; | |
2049 | if (use_dma) | |
2050 | mr |= SHA_MR_MODE_IDATAR0; | |
2051 | else | |
2052 | mr |= SHA_MR_MODE_AUTO; | |
2053 | atmel_sha_write(dd, SHA_MR, mr); | |
2054 | ||
2055 | atmel_sha_write(dd, SHA_MSR, req->nbytes); | |
2056 | atmel_sha_write(dd, SHA_BCR, req->nbytes); | |
2057 | ||
2058 | atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); | |
2059 | ||
2060 | /* Process data. */ | |
2061 | if (use_dma) | |
2062 | return atmel_sha_dma_start(dd, req->src, req->nbytes, | |
2063 | atmel_sha_hmac_final_done); | |
2064 | ||
2065 | return atmel_sha_cpu_start(dd, req->src, req->nbytes, false, true, | |
2066 | atmel_sha_hmac_final_done); | |
2067 | } | |
2068 | ||
2069 | static int atmel_sha_hmac_cra_init(struct crypto_tfm *tfm) | |
2070 | { | |
2071 | struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm); | |
2072 | ||
2073 | crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), | |
2074 | sizeof(struct atmel_sha_reqctx)); | |
2075 | hmac->base.start = atmel_sha_hmac_start; | |
2076 | atmel_sha_hmac_key_init(&hmac->hkey); | |
2077 | ||
2078 | return 0; | |
2079 | } | |
2080 | ||
2081 | static void atmel_sha_hmac_cra_exit(struct crypto_tfm *tfm) | |
2082 | { | |
2083 | struct atmel_sha_hmac_ctx *hmac = crypto_tfm_ctx(tfm); | |
2084 | ||
2085 | atmel_sha_hmac_key_release(&hmac->hkey); | |
2086 | } | |
2087 | ||
2088 | static struct ahash_alg sha_hmac_algs[] = { | |
2089 | { | |
2090 | .init = atmel_sha_hmac_init, | |
2091 | .update = atmel_sha_update, | |
2092 | .final = atmel_sha_final, | |
2093 | .digest = atmel_sha_hmac_digest, | |
2094 | .setkey = atmel_sha_hmac_setkey, | |
2095 | .export = atmel_sha_export, | |
2096 | .import = atmel_sha_import, | |
2097 | .halg = { | |
2098 | .digestsize = SHA1_DIGEST_SIZE, | |
2099 | .statesize = sizeof(struct atmel_sha_reqctx), | |
2100 | .base = { | |
2101 | .cra_name = "hmac(sha1)", | |
2102 | .cra_driver_name = "atmel-hmac-sha1", | |
2103 | .cra_priority = 100, | |
2104 | .cra_flags = CRYPTO_ALG_ASYNC, | |
2105 | .cra_blocksize = SHA1_BLOCK_SIZE, | |
2106 | .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), | |
2107 | .cra_alignmask = 0, | |
2108 | .cra_module = THIS_MODULE, | |
2109 | .cra_init = atmel_sha_hmac_cra_init, | |
2110 | .cra_exit = atmel_sha_hmac_cra_exit, | |
2111 | } | |
2112 | } | |
2113 | }, | |
2114 | { | |
2115 | .init = atmel_sha_hmac_init, | |
2116 | .update = atmel_sha_update, | |
2117 | .final = atmel_sha_final, | |
2118 | .digest = atmel_sha_hmac_digest, | |
2119 | .setkey = atmel_sha_hmac_setkey, | |
2120 | .export = atmel_sha_export, | |
2121 | .import = atmel_sha_import, | |
2122 | .halg = { | |
2123 | .digestsize = SHA224_DIGEST_SIZE, | |
2124 | .statesize = sizeof(struct atmel_sha_reqctx), | |
2125 | .base = { | |
2126 | .cra_name = "hmac(sha224)", | |
2127 | .cra_driver_name = "atmel-hmac-sha224", | |
2128 | .cra_priority = 100, | |
2129 | .cra_flags = CRYPTO_ALG_ASYNC, | |
2130 | .cra_blocksize = SHA224_BLOCK_SIZE, | |
2131 | .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), | |
2132 | .cra_alignmask = 0, | |
2133 | .cra_module = THIS_MODULE, | |
2134 | .cra_init = atmel_sha_hmac_cra_init, | |
2135 | .cra_exit = atmel_sha_hmac_cra_exit, | |
2136 | } | |
2137 | } | |
2138 | }, | |
2139 | { | |
2140 | .init = atmel_sha_hmac_init, | |
2141 | .update = atmel_sha_update, | |
2142 | .final = atmel_sha_final, | |
2143 | .digest = atmel_sha_hmac_digest, | |
2144 | .setkey = atmel_sha_hmac_setkey, | |
2145 | .export = atmel_sha_export, | |
2146 | .import = atmel_sha_import, | |
2147 | .halg = { | |
2148 | .digestsize = SHA256_DIGEST_SIZE, | |
2149 | .statesize = sizeof(struct atmel_sha_reqctx), | |
2150 | .base = { | |
2151 | .cra_name = "hmac(sha256)", | |
2152 | .cra_driver_name = "atmel-hmac-sha256", | |
2153 | .cra_priority = 100, | |
2154 | .cra_flags = CRYPTO_ALG_ASYNC, | |
2155 | .cra_blocksize = SHA256_BLOCK_SIZE, | |
2156 | .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), | |
2157 | .cra_alignmask = 0, | |
2158 | .cra_module = THIS_MODULE, | |
2159 | .cra_init = atmel_sha_hmac_cra_init, | |
2160 | .cra_exit = atmel_sha_hmac_cra_exit, | |
2161 | } | |
2162 | } | |
2163 | }, | |
2164 | { | |
2165 | .init = atmel_sha_hmac_init, | |
2166 | .update = atmel_sha_update, | |
2167 | .final = atmel_sha_final, | |
2168 | .digest = atmel_sha_hmac_digest, | |
2169 | .setkey = atmel_sha_hmac_setkey, | |
2170 | .export = atmel_sha_export, | |
2171 | .import = atmel_sha_import, | |
2172 | .halg = { | |
2173 | .digestsize = SHA384_DIGEST_SIZE, | |
2174 | .statesize = sizeof(struct atmel_sha_reqctx), | |
2175 | .base = { | |
2176 | .cra_name = "hmac(sha384)", | |
2177 | .cra_driver_name = "atmel-hmac-sha384", | |
2178 | .cra_priority = 100, | |
2179 | .cra_flags = CRYPTO_ALG_ASYNC, | |
2180 | .cra_blocksize = SHA384_BLOCK_SIZE, | |
2181 | .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), | |
2182 | .cra_alignmask = 0, | |
2183 | .cra_module = THIS_MODULE, | |
2184 | .cra_init = atmel_sha_hmac_cra_init, | |
2185 | .cra_exit = atmel_sha_hmac_cra_exit, | |
2186 | } | |
2187 | } | |
2188 | }, | |
2189 | { | |
2190 | .init = atmel_sha_hmac_init, | |
2191 | .update = atmel_sha_update, | |
2192 | .final = atmel_sha_final, | |
2193 | .digest = atmel_sha_hmac_digest, | |
2194 | .setkey = atmel_sha_hmac_setkey, | |
2195 | .export = atmel_sha_export, | |
2196 | .import = atmel_sha_import, | |
2197 | .halg = { | |
2198 | .digestsize = SHA512_DIGEST_SIZE, | |
2199 | .statesize = sizeof(struct atmel_sha_reqctx), | |
2200 | .base = { | |
2201 | .cra_name = "hmac(sha512)", | |
2202 | .cra_driver_name = "atmel-hmac-sha512", | |
2203 | .cra_priority = 100, | |
2204 | .cra_flags = CRYPTO_ALG_ASYNC, | |
2205 | .cra_blocksize = SHA512_BLOCK_SIZE, | |
2206 | .cra_ctxsize = sizeof(struct atmel_sha_hmac_ctx), | |
2207 | .cra_alignmask = 0, | |
2208 | .cra_module = THIS_MODULE, | |
2209 | .cra_init = atmel_sha_hmac_cra_init, | |
2210 | .cra_exit = atmel_sha_hmac_cra_exit, | |
2211 | } | |
2212 | } | |
2213 | }, | |
2214 | }; | |
eec12f66 | 2215 | |
89a82ef8 CP |
2216 | #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC |
2217 | /* authenc functions */ | |
2218 | ||
2219 | static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd); | |
2220 | static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd); | |
2221 | static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd); | |
2222 | ||
2223 | ||
2224 | struct atmel_sha_authenc_ctx { | |
2225 | struct crypto_ahash *tfm; | |
2226 | }; | |
2227 | ||
2228 | struct atmel_sha_authenc_reqctx { | |
2229 | struct atmel_sha_reqctx base; | |
2230 | ||
2231 | atmel_aes_authenc_fn_t cb; | |
2232 | struct atmel_aes_dev *aes_dev; | |
2233 | ||
2234 | /* _init() parameters. */ | |
2235 | struct scatterlist *assoc; | |
2236 | u32 assoclen; | |
2237 | u32 textlen; | |
2238 | ||
2239 | /* _final() parameters. */ | |
2240 | u32 *digest; | |
2241 | unsigned int digestlen; | |
2242 | }; | |
2243 | ||
2244 | static void atmel_sha_authenc_complete(struct crypto_async_request *areq, | |
2245 | int err) | |
2246 | { | |
2247 | struct ahash_request *req = areq->data; | |
2248 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2249 | ||
2250 | authctx->cb(authctx->aes_dev, err, authctx->base.dd->is_async); | |
2251 | } | |
2252 | ||
2253 | static int atmel_sha_authenc_start(struct atmel_sha_dev *dd) | |
2254 | { | |
2255 | struct ahash_request *req = dd->req; | |
2256 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2257 | int err; | |
2258 | ||
2259 | /* | |
2260 | * Force atmel_sha_complete() to call req->base.complete(), ie | |
2261 | * atmel_sha_authenc_complete(), which in turn calls authctx->cb(). | |
2262 | */ | |
2263 | dd->force_complete = true; | |
2264 | ||
2265 | err = atmel_sha_hw_init(dd); | |
2266 | return authctx->cb(authctx->aes_dev, err, dd->is_async); | |
2267 | } | |
2268 | ||
2269 | bool atmel_sha_authenc_is_ready(void) | |
2270 | { | |
2271 | struct atmel_sha_ctx dummy; | |
2272 | ||
2273 | dummy.dd = NULL; | |
2274 | return (atmel_sha_find_dev(&dummy) != NULL); | |
2275 | } | |
2276 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_is_ready); | |
2277 | ||
2278 | unsigned int atmel_sha_authenc_get_reqsize(void) | |
2279 | { | |
2280 | return sizeof(struct atmel_sha_authenc_reqctx); | |
2281 | } | |
2282 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_get_reqsize); | |
2283 | ||
2284 | struct atmel_sha_authenc_ctx *atmel_sha_authenc_spawn(unsigned long mode) | |
2285 | { | |
2286 | struct atmel_sha_authenc_ctx *auth; | |
2287 | struct crypto_ahash *tfm; | |
2288 | struct atmel_sha_ctx *tctx; | |
2289 | const char *name; | |
2290 | int err = -EINVAL; | |
2291 | ||
2292 | switch (mode & SHA_FLAGS_MODE_MASK) { | |
2293 | case SHA_FLAGS_HMAC_SHA1: | |
2294 | name = "atmel-hmac-sha1"; | |
2295 | break; | |
2296 | ||
2297 | case SHA_FLAGS_HMAC_SHA224: | |
2298 | name = "atmel-hmac-sha224"; | |
2299 | break; | |
2300 | ||
2301 | case SHA_FLAGS_HMAC_SHA256: | |
2302 | name = "atmel-hmac-sha256"; | |
2303 | break; | |
2304 | ||
2305 | case SHA_FLAGS_HMAC_SHA384: | |
2306 | name = "atmel-hmac-sha384"; | |
2307 | break; | |
2308 | ||
2309 | case SHA_FLAGS_HMAC_SHA512: | |
2310 | name = "atmel-hmac-sha512"; | |
2311 | break; | |
2312 | ||
2313 | default: | |
2314 | goto error; | |
2315 | } | |
2316 | ||
2317 | tfm = crypto_alloc_ahash(name, | |
2318 | CRYPTO_ALG_TYPE_AHASH, | |
2319 | CRYPTO_ALG_TYPE_AHASH_MASK); | |
2320 | if (IS_ERR(tfm)) { | |
2321 | err = PTR_ERR(tfm); | |
2322 | goto error; | |
2323 | } | |
2324 | tctx = crypto_ahash_ctx(tfm); | |
2325 | tctx->start = atmel_sha_authenc_start; | |
2326 | tctx->flags = mode; | |
2327 | ||
2328 | auth = kzalloc(sizeof(*auth), GFP_KERNEL); | |
2329 | if (!auth) { | |
2330 | err = -ENOMEM; | |
2331 | goto err_free_ahash; | |
2332 | } | |
2333 | auth->tfm = tfm; | |
2334 | ||
2335 | return auth; | |
2336 | ||
2337 | err_free_ahash: | |
2338 | crypto_free_ahash(tfm); | |
2339 | error: | |
2340 | return ERR_PTR(err); | |
2341 | } | |
2342 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_spawn); | |
2343 | ||
2344 | void atmel_sha_authenc_free(struct atmel_sha_authenc_ctx *auth) | |
2345 | { | |
2346 | if (auth) | |
2347 | crypto_free_ahash(auth->tfm); | |
2348 | kfree(auth); | |
2349 | } | |
2350 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_free); | |
2351 | ||
2352 | int atmel_sha_authenc_setkey(struct atmel_sha_authenc_ctx *auth, | |
2353 | const u8 *key, unsigned int keylen, | |
2354 | u32 *flags) | |
2355 | { | |
2356 | struct crypto_ahash *tfm = auth->tfm; | |
2357 | int err; | |
2358 | ||
2359 | crypto_ahash_clear_flags(tfm, CRYPTO_TFM_REQ_MASK); | |
2360 | crypto_ahash_set_flags(tfm, *flags & CRYPTO_TFM_REQ_MASK); | |
2361 | err = crypto_ahash_setkey(tfm, key, keylen); | |
2362 | *flags = crypto_ahash_get_flags(tfm); | |
2363 | ||
2364 | return err; | |
2365 | } | |
2366 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_setkey); | |
2367 | ||
2368 | int atmel_sha_authenc_schedule(struct ahash_request *req, | |
2369 | struct atmel_sha_authenc_ctx *auth, | |
2370 | atmel_aes_authenc_fn_t cb, | |
2371 | struct atmel_aes_dev *aes_dev) | |
2372 | { | |
2373 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2374 | struct atmel_sha_reqctx *ctx = &authctx->base; | |
2375 | struct crypto_ahash *tfm = auth->tfm; | |
2376 | struct atmel_sha_ctx *tctx = crypto_ahash_ctx(tfm); | |
2377 | struct atmel_sha_dev *dd; | |
2378 | ||
2379 | /* Reset request context (MUST be done first). */ | |
2380 | memset(authctx, 0, sizeof(*authctx)); | |
2381 | ||
2382 | /* Get SHA device. */ | |
2383 | dd = atmel_sha_find_dev(tctx); | |
2384 | if (!dd) | |
2385 | return cb(aes_dev, -ENODEV, false); | |
2386 | ||
2387 | /* Init request context. */ | |
2388 | ctx->dd = dd; | |
2389 | ctx->buflen = SHA_BUFFER_LEN; | |
2390 | authctx->cb = cb; | |
2391 | authctx->aes_dev = aes_dev; | |
2392 | ahash_request_set_tfm(req, tfm); | |
2393 | ahash_request_set_callback(req, 0, atmel_sha_authenc_complete, req); | |
2394 | ||
2395 | return atmel_sha_handle_queue(dd, req); | |
2396 | } | |
2397 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_schedule); | |
2398 | ||
2399 | int atmel_sha_authenc_init(struct ahash_request *req, | |
2400 | struct scatterlist *assoc, unsigned int assoclen, | |
2401 | unsigned int textlen, | |
2402 | atmel_aes_authenc_fn_t cb, | |
2403 | struct atmel_aes_dev *aes_dev) | |
2404 | { | |
2405 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2406 | struct atmel_sha_reqctx *ctx = &authctx->base; | |
2407 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
2408 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
2409 | struct atmel_sha_dev *dd = ctx->dd; | |
2410 | ||
2411 | if (unlikely(!IS_ALIGNED(assoclen, sizeof(u32)))) | |
2412 | return atmel_sha_complete(dd, -EINVAL); | |
2413 | ||
2414 | authctx->cb = cb; | |
2415 | authctx->aes_dev = aes_dev; | |
2416 | authctx->assoc = assoc; | |
2417 | authctx->assoclen = assoclen; | |
2418 | authctx->textlen = textlen; | |
2419 | ||
2420 | ctx->flags = hmac->base.flags; | |
2421 | return atmel_sha_hmac_setup(dd, atmel_sha_authenc_init2); | |
2422 | } | |
2423 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_init); | |
2424 | ||
2425 | static int atmel_sha_authenc_init2(struct atmel_sha_dev *dd) | |
2426 | { | |
2427 | struct ahash_request *req = dd->req; | |
2428 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2429 | struct atmel_sha_reqctx *ctx = &authctx->base; | |
2430 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
2431 | struct atmel_sha_hmac_ctx *hmac = crypto_ahash_ctx(tfm); | |
2432 | size_t hs = ctx->hash_size; | |
2433 | size_t i, num_words = hs / sizeof(u32); | |
2434 | u32 mr, msg_size; | |
2435 | ||
2436 | atmel_sha_write(dd, SHA_CR, SHA_CR_WUIHV); | |
2437 | for (i = 0; i < num_words; ++i) | |
2438 | atmel_sha_write(dd, SHA_REG_DIN(i), hmac->ipad[i]); | |
2439 | ||
2440 | atmel_sha_write(dd, SHA_CR, SHA_CR_WUIEHV); | |
2441 | for (i = 0; i < num_words; ++i) | |
2442 | atmel_sha_write(dd, SHA_REG_DIN(i), hmac->opad[i]); | |
2443 | ||
2444 | mr = (SHA_MR_MODE_IDATAR0 | | |
2445 | SHA_MR_HMAC | | |
2446 | SHA_MR_DUALBUFF); | |
2447 | mr |= ctx->flags & SHA_FLAGS_ALGO_MASK; | |
2448 | atmel_sha_write(dd, SHA_MR, mr); | |
2449 | ||
2450 | msg_size = authctx->assoclen + authctx->textlen; | |
2451 | atmel_sha_write(dd, SHA_MSR, msg_size); | |
2452 | atmel_sha_write(dd, SHA_BCR, msg_size); | |
2453 | ||
2454 | atmel_sha_write(dd, SHA_CR, SHA_CR_FIRST); | |
2455 | ||
2456 | /* Process assoc data. */ | |
2457 | return atmel_sha_cpu_start(dd, authctx->assoc, authctx->assoclen, | |
2458 | true, false, | |
2459 | atmel_sha_authenc_init_done); | |
2460 | } | |
2461 | ||
2462 | static int atmel_sha_authenc_init_done(struct atmel_sha_dev *dd) | |
2463 | { | |
2464 | struct ahash_request *req = dd->req; | |
2465 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2466 | ||
2467 | return authctx->cb(authctx->aes_dev, 0, dd->is_async); | |
2468 | } | |
2469 | ||
2470 | int atmel_sha_authenc_final(struct ahash_request *req, | |
2471 | u32 *digest, unsigned int digestlen, | |
2472 | atmel_aes_authenc_fn_t cb, | |
2473 | struct atmel_aes_dev *aes_dev) | |
2474 | { | |
2475 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2476 | struct atmel_sha_reqctx *ctx = &authctx->base; | |
2477 | struct atmel_sha_dev *dd = ctx->dd; | |
2478 | ||
2479 | switch (ctx->flags & SHA_FLAGS_ALGO_MASK) { | |
2480 | case SHA_FLAGS_SHA1: | |
2481 | authctx->digestlen = SHA1_DIGEST_SIZE; | |
2482 | break; | |
2483 | ||
2484 | case SHA_FLAGS_SHA224: | |
2485 | authctx->digestlen = SHA224_DIGEST_SIZE; | |
2486 | break; | |
2487 | ||
2488 | case SHA_FLAGS_SHA256: | |
2489 | authctx->digestlen = SHA256_DIGEST_SIZE; | |
2490 | break; | |
2491 | ||
2492 | case SHA_FLAGS_SHA384: | |
2493 | authctx->digestlen = SHA384_DIGEST_SIZE; | |
2494 | break; | |
2495 | ||
2496 | case SHA_FLAGS_SHA512: | |
2497 | authctx->digestlen = SHA512_DIGEST_SIZE; | |
2498 | break; | |
2499 | ||
2500 | default: | |
2501 | return atmel_sha_complete(dd, -EINVAL); | |
2502 | } | |
2503 | if (authctx->digestlen > digestlen) | |
2504 | authctx->digestlen = digestlen; | |
2505 | ||
2506 | authctx->cb = cb; | |
2507 | authctx->aes_dev = aes_dev; | |
2508 | authctx->digest = digest; | |
2509 | return atmel_sha_wait_for_data_ready(dd, | |
2510 | atmel_sha_authenc_final_done); | |
2511 | } | |
2512 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_final); | |
2513 | ||
2514 | static int atmel_sha_authenc_final_done(struct atmel_sha_dev *dd) | |
2515 | { | |
2516 | struct ahash_request *req = dd->req; | |
2517 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2518 | size_t i, num_words = authctx->digestlen / sizeof(u32); | |
2519 | ||
2520 | for (i = 0; i < num_words; ++i) | |
2521 | authctx->digest[i] = atmel_sha_read(dd, SHA_REG_DIGEST(i)); | |
2522 | ||
2523 | return atmel_sha_complete(dd, 0); | |
2524 | } | |
2525 | ||
2526 | void atmel_sha_authenc_abort(struct ahash_request *req) | |
2527 | { | |
2528 | struct atmel_sha_authenc_reqctx *authctx = ahash_request_ctx(req); | |
2529 | struct atmel_sha_reqctx *ctx = &authctx->base; | |
2530 | struct atmel_sha_dev *dd = ctx->dd; | |
2531 | ||
2532 | /* Prevent atmel_sha_complete() from calling req->base.complete(). */ | |
2533 | dd->is_async = false; | |
2534 | dd->force_complete = false; | |
2535 | (void)atmel_sha_complete(dd, 0); | |
2536 | } | |
2537 | EXPORT_SYMBOL_GPL(atmel_sha_authenc_abort); | |
2538 | ||
2539 | #endif /* CONFIG_CRYPTO_DEV_ATMEL_AUTHENC */ | |
2540 | ||
2541 | ||
ebc82efa NR |
2542 | static void atmel_sha_unregister_algs(struct atmel_sha_dev *dd) |
2543 | { | |
2544 | int i; | |
2545 | ||
81d8750b CP |
2546 | if (dd->caps.has_hmac) |
2547 | for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) | |
2548 | crypto_unregister_ahash(&sha_hmac_algs[i]); | |
2549 | ||
d4905b38 NR |
2550 | for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) |
2551 | crypto_unregister_ahash(&sha_1_256_algs[i]); | |
2552 | ||
2553 | if (dd->caps.has_sha224) | |
2554 | crypto_unregister_ahash(&sha_224_alg); | |
2555 | ||
2556 | if (dd->caps.has_sha_384_512) { | |
2557 | for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) | |
2558 | crypto_unregister_ahash(&sha_384_512_algs[i]); | |
2559 | } | |
ebc82efa NR |
2560 | } |
2561 | ||
2562 | static int atmel_sha_register_algs(struct atmel_sha_dev *dd) | |
2563 | { | |
2564 | int err, i, j; | |
2565 | ||
d4905b38 NR |
2566 | for (i = 0; i < ARRAY_SIZE(sha_1_256_algs); i++) { |
2567 | err = crypto_register_ahash(&sha_1_256_algs[i]); | |
ebc82efa | 2568 | if (err) |
d4905b38 NR |
2569 | goto err_sha_1_256_algs; |
2570 | } | |
2571 | ||
2572 | if (dd->caps.has_sha224) { | |
2573 | err = crypto_register_ahash(&sha_224_alg); | |
2574 | if (err) | |
2575 | goto err_sha_224_algs; | |
2576 | } | |
2577 | ||
2578 | if (dd->caps.has_sha_384_512) { | |
2579 | for (i = 0; i < ARRAY_SIZE(sha_384_512_algs); i++) { | |
2580 | err = crypto_register_ahash(&sha_384_512_algs[i]); | |
2581 | if (err) | |
2582 | goto err_sha_384_512_algs; | |
2583 | } | |
ebc82efa NR |
2584 | } |
2585 | ||
81d8750b CP |
2586 | if (dd->caps.has_hmac) { |
2587 | for (i = 0; i < ARRAY_SIZE(sha_hmac_algs); i++) { | |
2588 | err = crypto_register_ahash(&sha_hmac_algs[i]); | |
2589 | if (err) | |
2590 | goto err_sha_hmac_algs; | |
2591 | } | |
2592 | } | |
2593 | ||
ebc82efa NR |
2594 | return 0; |
2595 | ||
81d8750b CP |
2596 | /*i = ARRAY_SIZE(sha_hmac_algs);*/ |
2597 | err_sha_hmac_algs: | |
2598 | for (j = 0; j < i; j++) | |
2599 | crypto_unregister_ahash(&sha_hmac_algs[j]); | |
2600 | i = ARRAY_SIZE(sha_384_512_algs); | |
d4905b38 NR |
2601 | err_sha_384_512_algs: |
2602 | for (j = 0; j < i; j++) | |
2603 | crypto_unregister_ahash(&sha_384_512_algs[j]); | |
2604 | crypto_unregister_ahash(&sha_224_alg); | |
2605 | err_sha_224_algs: | |
2606 | i = ARRAY_SIZE(sha_1_256_algs); | |
2607 | err_sha_1_256_algs: | |
ebc82efa | 2608 | for (j = 0; j < i; j++) |
d4905b38 | 2609 | crypto_unregister_ahash(&sha_1_256_algs[j]); |
ebc82efa NR |
2610 | |
2611 | return err; | |
2612 | } | |
2613 | ||
d4905b38 NR |
2614 | static bool atmel_sha_filter(struct dma_chan *chan, void *slave) |
2615 | { | |
2616 | struct at_dma_slave *sl = slave; | |
2617 | ||
2618 | if (sl && sl->dma_dev == chan->device->dev) { | |
2619 | chan->private = sl; | |
2620 | return true; | |
2621 | } else { | |
2622 | return false; | |
2623 | } | |
2624 | } | |
2625 | ||
2626 | static int atmel_sha_dma_init(struct atmel_sha_dev *dd, | |
2627 | struct crypto_platform_data *pdata) | |
2628 | { | |
2629 | int err = -ENOMEM; | |
2630 | dma_cap_mask_t mask_in; | |
2631 | ||
abfe7ae4 NF |
2632 | /* Try to grab DMA channel */ |
2633 | dma_cap_zero(mask_in); | |
2634 | dma_cap_set(DMA_SLAVE, mask_in); | |
d4905b38 | 2635 | |
abfe7ae4 NF |
2636 | dd->dma_lch_in.chan = dma_request_slave_channel_compat(mask_in, |
2637 | atmel_sha_filter, &pdata->dma_slave->rxdata, dd->dev, "tx"); | |
2638 | if (!dd->dma_lch_in.chan) { | |
2639 | dev_warn(dd->dev, "no DMA channel available\n"); | |
2640 | return err; | |
d4905b38 NR |
2641 | } |
2642 | ||
abfe7ae4 NF |
2643 | dd->dma_lch_in.dma_conf.direction = DMA_MEM_TO_DEV; |
2644 | dd->dma_lch_in.dma_conf.dst_addr = dd->phys_base + | |
2645 | SHA_REG_DIN(0); | |
2646 | dd->dma_lch_in.dma_conf.src_maxburst = 1; | |
2647 | dd->dma_lch_in.dma_conf.src_addr_width = | |
2648 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
2649 | dd->dma_lch_in.dma_conf.dst_maxburst = 1; | |
2650 | dd->dma_lch_in.dma_conf.dst_addr_width = | |
2651 | DMA_SLAVE_BUSWIDTH_4_BYTES; | |
2652 | dd->dma_lch_in.dma_conf.device_fc = false; | |
2653 | ||
2654 | return 0; | |
d4905b38 NR |
2655 | } |
2656 | ||
2657 | static void atmel_sha_dma_cleanup(struct atmel_sha_dev *dd) | |
2658 | { | |
2659 | dma_release_channel(dd->dma_lch_in.chan); | |
2660 | } | |
2661 | ||
2662 | static void atmel_sha_get_cap(struct atmel_sha_dev *dd) | |
2663 | { | |
2664 | ||
2665 | dd->caps.has_dma = 0; | |
2666 | dd->caps.has_dualbuff = 0; | |
2667 | dd->caps.has_sha224 = 0; | |
2668 | dd->caps.has_sha_384_512 = 0; | |
7cee3508 | 2669 | dd->caps.has_uihv = 0; |
81d8750b | 2670 | dd->caps.has_hmac = 0; |
d4905b38 NR |
2671 | |
2672 | /* keep only major version number */ | |
2673 | switch (dd->hw_version & 0xff0) { | |
507c5cc2 CP |
2674 | case 0x510: |
2675 | dd->caps.has_dma = 1; | |
2676 | dd->caps.has_dualbuff = 1; | |
2677 | dd->caps.has_sha224 = 1; | |
2678 | dd->caps.has_sha_384_512 = 1; | |
7cee3508 | 2679 | dd->caps.has_uihv = 1; |
81d8750b | 2680 | dd->caps.has_hmac = 1; |
507c5cc2 | 2681 | break; |
141824d0 LZ |
2682 | case 0x420: |
2683 | dd->caps.has_dma = 1; | |
2684 | dd->caps.has_dualbuff = 1; | |
2685 | dd->caps.has_sha224 = 1; | |
2686 | dd->caps.has_sha_384_512 = 1; | |
7cee3508 | 2687 | dd->caps.has_uihv = 1; |
141824d0 | 2688 | break; |
d4905b38 NR |
2689 | case 0x410: |
2690 | dd->caps.has_dma = 1; | |
2691 | dd->caps.has_dualbuff = 1; | |
2692 | dd->caps.has_sha224 = 1; | |
2693 | dd->caps.has_sha_384_512 = 1; | |
2694 | break; | |
2695 | case 0x400: | |
2696 | dd->caps.has_dma = 1; | |
2697 | dd->caps.has_dualbuff = 1; | |
2698 | dd->caps.has_sha224 = 1; | |
2699 | break; | |
2700 | case 0x320: | |
2701 | break; | |
2702 | default: | |
2703 | dev_warn(dd->dev, | |
2704 | "Unmanaged sha version, set minimum capabilities\n"); | |
2705 | break; | |
2706 | } | |
2707 | } | |
2708 | ||
abfe7ae4 NF |
2709 | #if defined(CONFIG_OF) |
2710 | static const struct of_device_id atmel_sha_dt_ids[] = { | |
2711 | { .compatible = "atmel,at91sam9g46-sha" }, | |
2712 | { /* sentinel */ } | |
2713 | }; | |
2714 | ||
2715 | MODULE_DEVICE_TABLE(of, atmel_sha_dt_ids); | |
2716 | ||
2717 | static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pdev) | |
2718 | { | |
2719 | struct device_node *np = pdev->dev.of_node; | |
2720 | struct crypto_platform_data *pdata; | |
2721 | ||
2722 | if (!np) { | |
2723 | dev_err(&pdev->dev, "device node not found\n"); | |
2724 | return ERR_PTR(-EINVAL); | |
2725 | } | |
2726 | ||
2727 | pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); | |
2728 | if (!pdata) { | |
2729 | dev_err(&pdev->dev, "could not allocate memory for pdata\n"); | |
2730 | return ERR_PTR(-ENOMEM); | |
2731 | } | |
2732 | ||
2733 | pdata->dma_slave = devm_kzalloc(&pdev->dev, | |
2734 | sizeof(*(pdata->dma_slave)), | |
2735 | GFP_KERNEL); | |
2736 | if (!pdata->dma_slave) { | |
2737 | dev_err(&pdev->dev, "could not allocate memory for dma_slave\n"); | |
abfe7ae4 NF |
2738 | return ERR_PTR(-ENOMEM); |
2739 | } | |
2740 | ||
2741 | return pdata; | |
2742 | } | |
2743 | #else /* CONFIG_OF */ | |
2744 | static inline struct crypto_platform_data *atmel_sha_of_init(struct platform_device *dev) | |
2745 | { | |
2746 | return ERR_PTR(-EINVAL); | |
2747 | } | |
2748 | #endif | |
2749 | ||
49cfe4db | 2750 | static int atmel_sha_probe(struct platform_device *pdev) |
ebc82efa NR |
2751 | { |
2752 | struct atmel_sha_dev *sha_dd; | |
d4905b38 | 2753 | struct crypto_platform_data *pdata; |
ebc82efa NR |
2754 | struct device *dev = &pdev->dev; |
2755 | struct resource *sha_res; | |
ebc82efa NR |
2756 | int err; |
2757 | ||
b0e8b341 | 2758 | sha_dd = devm_kzalloc(&pdev->dev, sizeof(*sha_dd), GFP_KERNEL); |
ebc82efa NR |
2759 | if (sha_dd == NULL) { |
2760 | dev_err(dev, "unable to alloc data struct.\n"); | |
2761 | err = -ENOMEM; | |
2762 | goto sha_dd_err; | |
2763 | } | |
2764 | ||
2765 | sha_dd->dev = dev; | |
2766 | ||
2767 | platform_set_drvdata(pdev, sha_dd); | |
2768 | ||
2769 | INIT_LIST_HEAD(&sha_dd->list); | |
62728e82 | 2770 | spin_lock_init(&sha_dd->lock); |
ebc82efa NR |
2771 | |
2772 | tasklet_init(&sha_dd->done_task, atmel_sha_done_task, | |
2773 | (unsigned long)sha_dd); | |
f56809c3 CP |
2774 | tasklet_init(&sha_dd->queue_task, atmel_sha_queue_task, |
2775 | (unsigned long)sha_dd); | |
ebc82efa NR |
2776 | |
2777 | crypto_init_queue(&sha_dd->queue, ATMEL_SHA_QUEUE_LENGTH); | |
2778 | ||
2779 | sha_dd->irq = -1; | |
2780 | ||
2781 | /* Get the base address */ | |
2782 | sha_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
2783 | if (!sha_res) { | |
2784 | dev_err(dev, "no MEM resource info\n"); | |
2785 | err = -ENODEV; | |
2786 | goto res_err; | |
2787 | } | |
2788 | sha_dd->phys_base = sha_res->start; | |
ebc82efa NR |
2789 | |
2790 | /* Get the IRQ */ | |
2791 | sha_dd->irq = platform_get_irq(pdev, 0); | |
2792 | if (sha_dd->irq < 0) { | |
2793 | dev_err(dev, "no IRQ resource info\n"); | |
2794 | err = sha_dd->irq; | |
2795 | goto res_err; | |
2796 | } | |
2797 | ||
b0e8b341 LC |
2798 | err = devm_request_irq(&pdev->dev, sha_dd->irq, atmel_sha_irq, |
2799 | IRQF_SHARED, "atmel-sha", sha_dd); | |
ebc82efa NR |
2800 | if (err) { |
2801 | dev_err(dev, "unable to request sha irq.\n"); | |
2802 | goto res_err; | |
2803 | } | |
2804 | ||
2805 | /* Initializing the clock */ | |
b0e8b341 | 2806 | sha_dd->iclk = devm_clk_get(&pdev->dev, "sha_clk"); |
ebc82efa | 2807 | if (IS_ERR(sha_dd->iclk)) { |
be208356 | 2808 | dev_err(dev, "clock initialization failed.\n"); |
ebc82efa | 2809 | err = PTR_ERR(sha_dd->iclk); |
b0e8b341 | 2810 | goto res_err; |
ebc82efa NR |
2811 | } |
2812 | ||
b0e8b341 | 2813 | sha_dd->io_base = devm_ioremap_resource(&pdev->dev, sha_res); |
9b52d55f | 2814 | if (IS_ERR(sha_dd->io_base)) { |
ebc82efa | 2815 | dev_err(dev, "can't ioremap\n"); |
9b52d55f | 2816 | err = PTR_ERR(sha_dd->io_base); |
b0e8b341 | 2817 | goto res_err; |
ebc82efa NR |
2818 | } |
2819 | ||
c033042a CP |
2820 | err = clk_prepare(sha_dd->iclk); |
2821 | if (err) | |
2822 | goto res_err; | |
2823 | ||
d4905b38 NR |
2824 | atmel_sha_hw_version_init(sha_dd); |
2825 | ||
2826 | atmel_sha_get_cap(sha_dd); | |
2827 | ||
2828 | if (sha_dd->caps.has_dma) { | |
2829 | pdata = pdev->dev.platform_data; | |
2830 | if (!pdata) { | |
abfe7ae4 NF |
2831 | pdata = atmel_sha_of_init(pdev); |
2832 | if (IS_ERR(pdata)) { | |
2833 | dev_err(&pdev->dev, "platform data not available\n"); | |
2834 | err = PTR_ERR(pdata); | |
c033042a | 2835 | goto iclk_unprepare; |
abfe7ae4 NF |
2836 | } |
2837 | } | |
2838 | if (!pdata->dma_slave) { | |
d4905b38 | 2839 | err = -ENXIO; |
c033042a | 2840 | goto iclk_unprepare; |
d4905b38 NR |
2841 | } |
2842 | err = atmel_sha_dma_init(sha_dd, pdata); | |
2843 | if (err) | |
2844 | goto err_sha_dma; | |
abfe7ae4 NF |
2845 | |
2846 | dev_info(dev, "using %s for DMA transfers\n", | |
2847 | dma_chan_name(sha_dd->dma_lch_in.chan)); | |
d4905b38 NR |
2848 | } |
2849 | ||
ebc82efa NR |
2850 | spin_lock(&atmel_sha.lock); |
2851 | list_add_tail(&sha_dd->list, &atmel_sha.dev_list); | |
2852 | spin_unlock(&atmel_sha.lock); | |
2853 | ||
2854 | err = atmel_sha_register_algs(sha_dd); | |
2855 | if (err) | |
2856 | goto err_algs; | |
2857 | ||
1ca5b7d9 NF |
2858 | dev_info(dev, "Atmel SHA1/SHA256%s%s\n", |
2859 | sha_dd->caps.has_sha224 ? "/SHA224" : "", | |
2860 | sha_dd->caps.has_sha_384_512 ? "/SHA384/SHA512" : ""); | |
ebc82efa NR |
2861 | |
2862 | return 0; | |
2863 | ||
2864 | err_algs: | |
2865 | spin_lock(&atmel_sha.lock); | |
2866 | list_del(&sha_dd->list); | |
2867 | spin_unlock(&atmel_sha.lock); | |
d4905b38 NR |
2868 | if (sha_dd->caps.has_dma) |
2869 | atmel_sha_dma_cleanup(sha_dd); | |
2870 | err_sha_dma: | |
c033042a CP |
2871 | iclk_unprepare: |
2872 | clk_unprepare(sha_dd->iclk); | |
ebc82efa | 2873 | res_err: |
f56809c3 | 2874 | tasklet_kill(&sha_dd->queue_task); |
ebc82efa | 2875 | tasklet_kill(&sha_dd->done_task); |
ebc82efa NR |
2876 | sha_dd_err: |
2877 | dev_err(dev, "initialization failed.\n"); | |
2878 | ||
2879 | return err; | |
2880 | } | |
2881 | ||
49cfe4db | 2882 | static int atmel_sha_remove(struct platform_device *pdev) |
ebc82efa NR |
2883 | { |
2884 | static struct atmel_sha_dev *sha_dd; | |
2885 | ||
2886 | sha_dd = platform_get_drvdata(pdev); | |
2887 | if (!sha_dd) | |
2888 | return -ENODEV; | |
2889 | spin_lock(&atmel_sha.lock); | |
2890 | list_del(&sha_dd->list); | |
2891 | spin_unlock(&atmel_sha.lock); | |
2892 | ||
2893 | atmel_sha_unregister_algs(sha_dd); | |
2894 | ||
f56809c3 | 2895 | tasklet_kill(&sha_dd->queue_task); |
ebc82efa NR |
2896 | tasklet_kill(&sha_dd->done_task); |
2897 | ||
d4905b38 NR |
2898 | if (sha_dd->caps.has_dma) |
2899 | atmel_sha_dma_cleanup(sha_dd); | |
2900 | ||
c033042a CP |
2901 | clk_unprepare(sha_dd->iclk); |
2902 | ||
ebc82efa NR |
2903 | return 0; |
2904 | } | |
2905 | ||
2906 | static struct platform_driver atmel_sha_driver = { | |
2907 | .probe = atmel_sha_probe, | |
49cfe4db | 2908 | .remove = atmel_sha_remove, |
ebc82efa NR |
2909 | .driver = { |
2910 | .name = "atmel_sha", | |
abfe7ae4 | 2911 | .of_match_table = of_match_ptr(atmel_sha_dt_ids), |
ebc82efa NR |
2912 | }, |
2913 | }; | |
2914 | ||
2915 | module_platform_driver(atmel_sha_driver); | |
2916 | ||
d4905b38 | 2917 | MODULE_DESCRIPTION("Atmel SHA (1/256/224/384/512) hw acceleration support."); |
ebc82efa NR |
2918 | MODULE_LICENSE("GPL v2"); |
2919 | MODULE_AUTHOR("Nicolas Royer - Eukréa Electromatique"); |