]>
Commit | Line | Data |
---|---|---|
915e4e84 JC |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2016-2017 Hisilicon Limited. */ | |
3 | #include <linux/crypto.h> | |
4 | #include <linux/dma-mapping.h> | |
5 | #include <linux/dmapool.h> | |
6 | #include <linux/module.h> | |
7 | #include <linux/mutex.h> | |
8 | #include <linux/slab.h> | |
9 | ||
10 | #include <crypto/aes.h> | |
11 | #include <crypto/algapi.h> | |
12 | #include <crypto/des.h> | |
13 | #include <crypto/skcipher.h> | |
14 | #include <crypto/xts.h> | |
15 | #include <crypto/internal/skcipher.h> | |
16 | ||
17 | #include "sec_drv.h" | |
18 | ||
19 | #define SEC_MAX_CIPHER_KEY 64 | |
20 | #define SEC_REQ_LIMIT SZ_32M | |
21 | ||
22 | struct sec_c_alg_cfg { | |
23 | unsigned c_alg : 3; | |
24 | unsigned c_mode : 3; | |
25 | unsigned key_len : 2; | |
26 | unsigned c_width : 2; | |
27 | }; | |
28 | ||
29 | static const struct sec_c_alg_cfg sec_c_alg_cfgs[] = { | |
30 | [SEC_C_DES_ECB_64] = { | |
31 | .c_alg = SEC_C_ALG_DES, | |
32 | .c_mode = SEC_C_MODE_ECB, | |
33 | .key_len = SEC_KEY_LEN_DES, | |
34 | }, | |
35 | [SEC_C_DES_CBC_64] = { | |
36 | .c_alg = SEC_C_ALG_DES, | |
37 | .c_mode = SEC_C_MODE_CBC, | |
38 | .key_len = SEC_KEY_LEN_DES, | |
39 | }, | |
40 | [SEC_C_3DES_ECB_192_3KEY] = { | |
41 | .c_alg = SEC_C_ALG_3DES, | |
42 | .c_mode = SEC_C_MODE_ECB, | |
43 | .key_len = SEC_KEY_LEN_3DES_3_KEY, | |
44 | }, | |
45 | [SEC_C_3DES_ECB_192_2KEY] = { | |
46 | .c_alg = SEC_C_ALG_3DES, | |
47 | .c_mode = SEC_C_MODE_ECB, | |
48 | .key_len = SEC_KEY_LEN_3DES_2_KEY, | |
49 | }, | |
50 | [SEC_C_3DES_CBC_192_3KEY] = { | |
51 | .c_alg = SEC_C_ALG_3DES, | |
52 | .c_mode = SEC_C_MODE_CBC, | |
53 | .key_len = SEC_KEY_LEN_3DES_3_KEY, | |
54 | }, | |
55 | [SEC_C_3DES_CBC_192_2KEY] = { | |
56 | .c_alg = SEC_C_ALG_3DES, | |
57 | .c_mode = SEC_C_MODE_CBC, | |
58 | .key_len = SEC_KEY_LEN_3DES_2_KEY, | |
59 | }, | |
60 | [SEC_C_AES_ECB_128] = { | |
61 | .c_alg = SEC_C_ALG_AES, | |
62 | .c_mode = SEC_C_MODE_ECB, | |
63 | .key_len = SEC_KEY_LEN_AES_128, | |
64 | }, | |
65 | [SEC_C_AES_ECB_192] = { | |
66 | .c_alg = SEC_C_ALG_AES, | |
67 | .c_mode = SEC_C_MODE_ECB, | |
68 | .key_len = SEC_KEY_LEN_AES_192, | |
69 | }, | |
70 | [SEC_C_AES_ECB_256] = { | |
71 | .c_alg = SEC_C_ALG_AES, | |
72 | .c_mode = SEC_C_MODE_ECB, | |
73 | .key_len = SEC_KEY_LEN_AES_256, | |
74 | }, | |
75 | [SEC_C_AES_CBC_128] = { | |
76 | .c_alg = SEC_C_ALG_AES, | |
77 | .c_mode = SEC_C_MODE_CBC, | |
78 | .key_len = SEC_KEY_LEN_AES_128, | |
79 | }, | |
80 | [SEC_C_AES_CBC_192] = { | |
81 | .c_alg = SEC_C_ALG_AES, | |
82 | .c_mode = SEC_C_MODE_CBC, | |
83 | .key_len = SEC_KEY_LEN_AES_192, | |
84 | }, | |
85 | [SEC_C_AES_CBC_256] = { | |
86 | .c_alg = SEC_C_ALG_AES, | |
87 | .c_mode = SEC_C_MODE_CBC, | |
88 | .key_len = SEC_KEY_LEN_AES_256, | |
89 | }, | |
90 | [SEC_C_AES_CTR_128] = { | |
91 | .c_alg = SEC_C_ALG_AES, | |
92 | .c_mode = SEC_C_MODE_CTR, | |
93 | .key_len = SEC_KEY_LEN_AES_128, | |
94 | }, | |
95 | [SEC_C_AES_CTR_192] = { | |
96 | .c_alg = SEC_C_ALG_AES, | |
97 | .c_mode = SEC_C_MODE_CTR, | |
98 | .key_len = SEC_KEY_LEN_AES_192, | |
99 | }, | |
100 | [SEC_C_AES_CTR_256] = { | |
101 | .c_alg = SEC_C_ALG_AES, | |
102 | .c_mode = SEC_C_MODE_CTR, | |
103 | .key_len = SEC_KEY_LEN_AES_256, | |
104 | }, | |
105 | [SEC_C_AES_XTS_128] = { | |
106 | .c_alg = SEC_C_ALG_AES, | |
107 | .c_mode = SEC_C_MODE_XTS, | |
108 | .key_len = SEC_KEY_LEN_AES_128, | |
109 | }, | |
110 | [SEC_C_AES_XTS_256] = { | |
111 | .c_alg = SEC_C_ALG_AES, | |
112 | .c_mode = SEC_C_MODE_XTS, | |
113 | .key_len = SEC_KEY_LEN_AES_256, | |
114 | }, | |
115 | [SEC_C_NULL] = { | |
116 | }, | |
117 | }; | |
118 | ||
119 | /* | |
120 | * Mutex used to ensure safe operation of reference count of | |
121 | * alg providers | |
122 | */ | |
123 | static DEFINE_MUTEX(algs_lock); | |
124 | static unsigned int active_devs; | |
125 | ||
126 | static void sec_alg_skcipher_init_template(struct sec_alg_tfm_ctx *ctx, | |
127 | struct sec_bd_info *req, | |
128 | enum sec_cipher_alg alg) | |
129 | { | |
130 | const struct sec_c_alg_cfg *cfg = &sec_c_alg_cfgs[alg]; | |
131 | ||
132 | memset(req, 0, sizeof(*req)); | |
133 | req->w0 |= cfg->c_mode << SEC_BD_W0_C_MODE_S; | |
134 | req->w1 |= cfg->c_alg << SEC_BD_W1_C_ALG_S; | |
135 | req->w3 |= cfg->key_len << SEC_BD_W3_C_KEY_LEN_S; | |
136 | req->w0 |= cfg->c_width << SEC_BD_W0_C_WIDTH_S; | |
137 | ||
138 | req->cipher_key_addr_lo = lower_32_bits(ctx->pkey); | |
139 | req->cipher_key_addr_hi = upper_32_bits(ctx->pkey); | |
140 | } | |
141 | ||
142 | static void sec_alg_skcipher_init_context(struct crypto_skcipher *atfm, | |
143 | const u8 *key, | |
144 | unsigned int keylen, | |
145 | enum sec_cipher_alg alg) | |
146 | { | |
147 | struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm); | |
148 | struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm); | |
149 | ||
150 | ctx->cipher_alg = alg; | |
151 | memcpy(ctx->key, key, keylen); | |
152 | sec_alg_skcipher_init_template(ctx, &ctx->req_template, | |
153 | ctx->cipher_alg); | |
154 | } | |
155 | ||
156 | static int sec_alloc_and_fill_hw_sgl(struct sec_hw_sgl **sec_sgl, | |
157 | dma_addr_t *psec_sgl, | |
158 | struct scatterlist *sgl, | |
159 | int count, | |
160 | struct sec_dev_info *info) | |
161 | { | |
162 | struct sec_hw_sgl *sgl_current = NULL; | |
163 | struct sec_hw_sgl *sgl_next; | |
164 | dma_addr_t sgl_next_dma; | |
165 | struct scatterlist *sg; | |
166 | int ret, sge_index, i; | |
167 | ||
168 | if (!count) | |
169 | return -EINVAL; | |
170 | ||
171 | for_each_sg(sgl, sg, count, i) { | |
172 | sge_index = i % SEC_MAX_SGE_NUM; | |
173 | if (sge_index == 0) { | |
174 | sgl_next = dma_pool_zalloc(info->hw_sgl_pool, | |
175 | GFP_KERNEL, &sgl_next_dma); | |
176 | if (!sgl_next) { | |
177 | ret = -ENOMEM; | |
178 | goto err_free_hw_sgls; | |
179 | } | |
180 | ||
181 | if (!sgl_current) { /* First one */ | |
182 | *psec_sgl = sgl_next_dma; | |
183 | *sec_sgl = sgl_next; | |
184 | } else { /* Chained */ | |
185 | sgl_current->entry_sum_in_sgl = SEC_MAX_SGE_NUM; | |
186 | sgl_current->next_sgl = sgl_next_dma; | |
187 | sgl_current->next = sgl_next; | |
188 | } | |
189 | sgl_current = sgl_next; | |
190 | } | |
191 | sgl_current->sge_entries[sge_index].buf = sg_dma_address(sg); | |
192 | sgl_current->sge_entries[sge_index].len = sg_dma_len(sg); | |
193 | sgl_current->data_bytes_in_sgl += sg_dma_len(sg); | |
194 | } | |
195 | sgl_current->entry_sum_in_sgl = count % SEC_MAX_SGE_NUM; | |
196 | sgl_current->next_sgl = 0; | |
197 | (*sec_sgl)->entry_sum_in_chain = count; | |
198 | ||
199 | return 0; | |
200 | ||
201 | err_free_hw_sgls: | |
202 | sgl_current = *sec_sgl; | |
203 | while (sgl_current) { | |
204 | sgl_next = sgl_current->next; | |
205 | dma_pool_free(info->hw_sgl_pool, sgl_current, | |
206 | sgl_current->next_sgl); | |
207 | sgl_current = sgl_next; | |
208 | } | |
209 | *psec_sgl = 0; | |
210 | ||
211 | return ret; | |
212 | } | |
213 | ||
214 | static void sec_free_hw_sgl(struct sec_hw_sgl *hw_sgl, | |
215 | dma_addr_t psec_sgl, struct sec_dev_info *info) | |
216 | { | |
217 | struct sec_hw_sgl *sgl_current, *sgl_next; | |
218 | ||
219 | if (!hw_sgl) | |
220 | return; | |
221 | sgl_current = hw_sgl; | |
222 | while (sgl_current->next) { | |
223 | sgl_next = sgl_current->next; | |
224 | dma_pool_free(info->hw_sgl_pool, sgl_current, | |
225 | sgl_current->next_sgl); | |
226 | sgl_current = sgl_next; | |
227 | } | |
228 | dma_pool_free(info->hw_sgl_pool, hw_sgl, psec_sgl); | |
229 | } | |
230 | ||
231 | static int sec_alg_skcipher_setkey(struct crypto_skcipher *tfm, | |
232 | const u8 *key, unsigned int keylen, | |
233 | enum sec_cipher_alg alg) | |
234 | { | |
235 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); | |
236 | struct device *dev = ctx->queue->dev_info->dev; | |
237 | ||
238 | mutex_lock(&ctx->lock); | |
239 | if (ctx->key) { | |
240 | /* rekeying */ | |
241 | memset(ctx->key, 0, SEC_MAX_CIPHER_KEY); | |
242 | } else { | |
243 | /* new key */ | |
244 | ctx->key = dma_zalloc_coherent(dev, SEC_MAX_CIPHER_KEY, | |
245 | &ctx->pkey, GFP_KERNEL); | |
246 | if (!ctx->key) { | |
247 | mutex_unlock(&ctx->lock); | |
248 | return -ENOMEM; | |
249 | } | |
250 | } | |
251 | mutex_unlock(&ctx->lock); | |
252 | sec_alg_skcipher_init_context(tfm, key, keylen, alg); | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | static int sec_alg_skcipher_setkey_aes_ecb(struct crypto_skcipher *tfm, | |
258 | const u8 *key, unsigned int keylen) | |
259 | { | |
260 | enum sec_cipher_alg alg; | |
261 | ||
262 | switch (keylen) { | |
263 | case AES_KEYSIZE_128: | |
264 | alg = SEC_C_AES_ECB_128; | |
265 | break; | |
266 | case AES_KEYSIZE_192: | |
267 | alg = SEC_C_AES_ECB_192; | |
268 | break; | |
269 | case AES_KEYSIZE_256: | |
270 | alg = SEC_C_AES_ECB_256; | |
271 | break; | |
272 | default: | |
273 | return -EINVAL; | |
274 | } | |
275 | ||
276 | return sec_alg_skcipher_setkey(tfm, key, keylen, alg); | |
277 | } | |
278 | ||
279 | static int sec_alg_skcipher_setkey_aes_cbc(struct crypto_skcipher *tfm, | |
280 | const u8 *key, unsigned int keylen) | |
281 | { | |
282 | enum sec_cipher_alg alg; | |
283 | ||
284 | switch (keylen) { | |
285 | case AES_KEYSIZE_128: | |
286 | alg = SEC_C_AES_CBC_128; | |
287 | break; | |
288 | case AES_KEYSIZE_192: | |
289 | alg = SEC_C_AES_CBC_192; | |
290 | break; | |
291 | case AES_KEYSIZE_256: | |
292 | alg = SEC_C_AES_CBC_256; | |
293 | break; | |
294 | default: | |
295 | return -EINVAL; | |
296 | } | |
297 | ||
298 | return sec_alg_skcipher_setkey(tfm, key, keylen, alg); | |
299 | } | |
300 | ||
301 | static int sec_alg_skcipher_setkey_aes_ctr(struct crypto_skcipher *tfm, | |
302 | const u8 *key, unsigned int keylen) | |
303 | { | |
304 | enum sec_cipher_alg alg; | |
305 | ||
306 | switch (keylen) { | |
307 | case AES_KEYSIZE_128: | |
308 | alg = SEC_C_AES_CTR_128; | |
309 | break; | |
310 | case AES_KEYSIZE_192: | |
311 | alg = SEC_C_AES_CTR_192; | |
312 | break; | |
313 | case AES_KEYSIZE_256: | |
314 | alg = SEC_C_AES_CTR_256; | |
315 | break; | |
316 | default: | |
317 | return -EINVAL; | |
318 | } | |
319 | ||
320 | return sec_alg_skcipher_setkey(tfm, key, keylen, alg); | |
321 | } | |
322 | ||
323 | static int sec_alg_skcipher_setkey_aes_xts(struct crypto_skcipher *tfm, | |
324 | const u8 *key, unsigned int keylen) | |
325 | { | |
326 | enum sec_cipher_alg alg; | |
327 | int ret; | |
328 | ||
329 | ret = xts_verify_key(tfm, key, keylen); | |
330 | if (ret) | |
331 | return ret; | |
332 | ||
333 | switch (keylen) { | |
334 | case AES_KEYSIZE_128 * 2: | |
335 | alg = SEC_C_AES_XTS_128; | |
336 | break; | |
337 | case AES_KEYSIZE_256 * 2: | |
338 | alg = SEC_C_AES_XTS_256; | |
339 | break; | |
340 | default: | |
341 | return -EINVAL; | |
342 | } | |
343 | ||
344 | return sec_alg_skcipher_setkey(tfm, key, keylen, alg); | |
345 | } | |
346 | ||
347 | static int sec_alg_skcipher_setkey_des_ecb(struct crypto_skcipher *tfm, | |
348 | const u8 *key, unsigned int keylen) | |
349 | { | |
350 | if (keylen != DES_KEY_SIZE) | |
351 | return -EINVAL; | |
352 | ||
353 | return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_ECB_64); | |
354 | } | |
355 | ||
356 | static int sec_alg_skcipher_setkey_des_cbc(struct crypto_skcipher *tfm, | |
357 | const u8 *key, unsigned int keylen) | |
358 | { | |
359 | if (keylen != DES_KEY_SIZE) | |
360 | return -EINVAL; | |
361 | ||
362 | return sec_alg_skcipher_setkey(tfm, key, keylen, SEC_C_DES_CBC_64); | |
363 | } | |
364 | ||
365 | static int sec_alg_skcipher_setkey_3des_ecb(struct crypto_skcipher *tfm, | |
366 | const u8 *key, unsigned int keylen) | |
367 | { | |
368 | if (keylen != DES_KEY_SIZE * 3) | |
369 | return -EINVAL; | |
370 | ||
371 | return sec_alg_skcipher_setkey(tfm, key, keylen, | |
372 | SEC_C_3DES_ECB_192_3KEY); | |
373 | } | |
374 | ||
375 | static int sec_alg_skcipher_setkey_3des_cbc(struct crypto_skcipher *tfm, | |
376 | const u8 *key, unsigned int keylen) | |
377 | { | |
378 | if (keylen != DES3_EDE_KEY_SIZE) | |
379 | return -EINVAL; | |
380 | ||
381 | return sec_alg_skcipher_setkey(tfm, key, keylen, | |
382 | SEC_C_3DES_CBC_192_3KEY); | |
383 | } | |
384 | ||
385 | static void sec_alg_free_el(struct sec_request_el *el, | |
386 | struct sec_dev_info *info) | |
387 | { | |
388 | sec_free_hw_sgl(el->out, el->dma_out, info); | |
389 | sec_free_hw_sgl(el->in, el->dma_in, info); | |
390 | kfree(el->sgl_in); | |
391 | kfree(el->sgl_out); | |
392 | kfree(el); | |
393 | } | |
394 | ||
395 | /* queuelock must be held */ | |
396 | int sec_send_request(struct sec_request *sec_req, struct sec_queue *queue) | |
397 | { | |
398 | struct sec_request_el *el, *temp; | |
399 | int ret = 0; | |
400 | ||
401 | mutex_lock(&sec_req->lock); | |
402 | list_for_each_entry_safe(el, temp, &sec_req->elements, head) { | |
403 | /* | |
404 | * Add to hardware queue only under following circumstances | |
405 | * 1) Software and hardware queue empty so no chain dependencies | |
406 | * 2) No dependencies as new IV - (check software queue empty | |
407 | * to maintain order) | |
408 | * 3) No dependencies because the mode does no chaining. | |
409 | * | |
410 | * In other cases first insert onto the software queue which | |
411 | * is then emptied as requests complete | |
412 | */ | |
413 | if (!queue->havesoftqueue || | |
414 | (kfifo_is_empty(&queue->softqueue) && | |
415 | sec_queue_empty(queue))) { | |
416 | ret = sec_queue_send(queue, &el->req, sec_req); | |
417 | if (ret == -EAGAIN) { | |
418 | /* Wait unti we can send then try again */ | |
419 | /* DEAD if here - should not happen */ | |
420 | ret = -EBUSY; | |
421 | goto err_unlock; | |
422 | } | |
423 | } else { | |
424 | kfifo_put(&queue->softqueue, el); | |
425 | } | |
426 | } | |
427 | err_unlock: | |
428 | mutex_unlock(&sec_req->lock); | |
429 | ||
430 | return ret; | |
431 | } | |
432 | ||
433 | static void sec_skcipher_alg_callback(struct sec_bd_info *sec_resp, | |
434 | struct crypto_async_request *req_base) | |
435 | { | |
436 | struct skcipher_request *skreq = container_of(req_base, | |
437 | struct skcipher_request, | |
438 | base); | |
439 | struct sec_request *sec_req = skcipher_request_ctx(skreq); | |
440 | struct sec_request *backlog_req; | |
441 | struct sec_request_el *sec_req_el, *nextrequest; | |
442 | struct sec_alg_tfm_ctx *ctx = sec_req->tfm_ctx; | |
443 | struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq); | |
444 | struct device *dev = ctx->queue->dev_info->dev; | |
445 | int icv_or_skey_en, ret; | |
446 | bool done; | |
447 | ||
448 | sec_req_el = list_first_entry(&sec_req->elements, struct sec_request_el, | |
449 | head); | |
450 | icv_or_skey_en = (sec_resp->w0 & SEC_BD_W0_ICV_OR_SKEY_EN_M) >> | |
451 | SEC_BD_W0_ICV_OR_SKEY_EN_S; | |
452 | if (sec_resp->w1 & SEC_BD_W1_BD_INVALID || icv_or_skey_en == 3) { | |
453 | dev_err(dev, "Got an invalid answer %lu %d\n", | |
454 | sec_resp->w1 & SEC_BD_W1_BD_INVALID, | |
455 | icv_or_skey_en); | |
456 | sec_req->err = -EINVAL; | |
457 | /* | |
458 | * We need to muddle on to avoid getting stuck with elements | |
459 | * on the queue. Error will be reported so requester so | |
460 | * it should be able to handle appropriately. | |
461 | */ | |
462 | } | |
463 | ||
464 | mutex_lock(&ctx->queue->queuelock); | |
465 | /* Put the IV in place for chained cases */ | |
466 | switch (ctx->cipher_alg) { | |
467 | case SEC_C_AES_CBC_128: | |
468 | case SEC_C_AES_CBC_192: | |
469 | case SEC_C_AES_CBC_256: | |
470 | if (sec_req_el->req.w0 & SEC_BD_W0_DE) | |
471 | sg_pcopy_to_buffer(sec_req_el->sgl_out, | |
472 | sg_nents(sec_req_el->sgl_out), | |
473 | skreq->iv, | |
474 | crypto_skcipher_ivsize(atfm), | |
475 | sec_req_el->el_length - | |
476 | crypto_skcipher_ivsize(atfm)); | |
477 | else | |
478 | sg_pcopy_to_buffer(sec_req_el->sgl_in, | |
479 | sg_nents(sec_req_el->sgl_in), | |
480 | skreq->iv, | |
481 | crypto_skcipher_ivsize(atfm), | |
482 | sec_req_el->el_length - | |
483 | crypto_skcipher_ivsize(atfm)); | |
484 | /* No need to sync to the device as coherent DMA */ | |
485 | break; | |
486 | case SEC_C_AES_CTR_128: | |
487 | case SEC_C_AES_CTR_192: | |
488 | case SEC_C_AES_CTR_256: | |
489 | crypto_inc(skreq->iv, 16); | |
490 | break; | |
491 | default: | |
492 | /* Do not update */ | |
493 | break; | |
494 | } | |
495 | ||
496 | if (ctx->queue->havesoftqueue && | |
497 | !kfifo_is_empty(&ctx->queue->softqueue) && | |
498 | sec_queue_empty(ctx->queue)) { | |
499 | ret = kfifo_get(&ctx->queue->softqueue, &nextrequest); | |
500 | if (ret <= 0) | |
501 | dev_err(dev, | |
502 | "Error getting next element from kfifo %d\n", | |
503 | ret); | |
504 | else | |
505 | /* We know there is space so this cannot fail */ | |
506 | sec_queue_send(ctx->queue, &nextrequest->req, | |
507 | nextrequest->sec_req); | |
508 | } else if (!list_empty(&ctx->backlog)) { | |
509 | /* Need to verify there is room first */ | |
510 | backlog_req = list_first_entry(&ctx->backlog, | |
511 | typeof(*backlog_req), | |
512 | backlog_head); | |
513 | if (sec_queue_can_enqueue(ctx->queue, | |
514 | backlog_req->num_elements) || | |
515 | (ctx->queue->havesoftqueue && | |
516 | kfifo_avail(&ctx->queue->softqueue) > | |
517 | backlog_req->num_elements)) { | |
518 | sec_send_request(backlog_req, ctx->queue); | |
519 | backlog_req->req_base->complete(backlog_req->req_base, | |
520 | -EINPROGRESS); | |
521 | list_del(&backlog_req->backlog_head); | |
522 | } | |
523 | } | |
524 | mutex_unlock(&ctx->queue->queuelock); | |
525 | ||
526 | mutex_lock(&sec_req->lock); | |
527 | list_del(&sec_req_el->head); | |
528 | mutex_unlock(&sec_req->lock); | |
529 | sec_alg_free_el(sec_req_el, ctx->queue->dev_info); | |
530 | ||
531 | /* | |
532 | * Request is done. | |
533 | * The dance is needed as the lock is freed in the completion | |
534 | */ | |
535 | mutex_lock(&sec_req->lock); | |
536 | done = list_empty(&sec_req->elements); | |
537 | mutex_unlock(&sec_req->lock); | |
538 | if (done) { | |
539 | if (crypto_skcipher_ivsize(atfm)) { | |
540 | dma_unmap_single(dev, sec_req->dma_iv, | |
541 | crypto_skcipher_ivsize(atfm), | |
542 | DMA_TO_DEVICE); | |
543 | } | |
544 | dma_unmap_sg(dev, skreq->src, sec_req->len_in, | |
545 | DMA_BIDIRECTIONAL); | |
546 | if (skreq->src != skreq->dst) | |
547 | dma_unmap_sg(dev, skreq->dst, sec_req->len_out, | |
548 | DMA_BIDIRECTIONAL); | |
549 | skreq->base.complete(&skreq->base, sec_req->err); | |
550 | } | |
551 | } | |
552 | ||
553 | void sec_alg_callback(struct sec_bd_info *resp, void *shadow) | |
554 | { | |
555 | struct sec_request *sec_req = shadow; | |
556 | ||
557 | sec_req->cb(resp, sec_req->req_base); | |
558 | } | |
559 | ||
560 | static int sec_alg_alloc_and_calc_split_sizes(int length, size_t **split_sizes, | |
561 | int *steps) | |
562 | { | |
563 | size_t *sizes; | |
564 | int i; | |
565 | ||
566 | /* Split into suitable sized blocks */ | |
567 | *steps = roundup(length, SEC_REQ_LIMIT) / SEC_REQ_LIMIT; | |
568 | sizes = kcalloc(*steps, sizeof(*sizes), GFP_KERNEL); | |
569 | if (!sizes) | |
570 | return -ENOMEM; | |
571 | ||
572 | for (i = 0; i < *steps - 1; i++) | |
573 | sizes[i] = SEC_REQ_LIMIT; | |
574 | sizes[*steps - 1] = length - SEC_REQ_LIMIT * (*steps - 1); | |
575 | *split_sizes = sizes; | |
576 | ||
577 | return 0; | |
578 | } | |
579 | ||
580 | static int sec_map_and_split_sg(struct scatterlist *sgl, size_t *split_sizes, | |
581 | int steps, struct scatterlist ***splits, | |
582 | int **splits_nents, | |
583 | int sgl_len_in, | |
584 | struct device *dev) | |
585 | { | |
586 | int ret, count; | |
587 | ||
588 | count = dma_map_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); | |
589 | if (!count) | |
590 | return -EINVAL; | |
591 | ||
592 | *splits = kcalloc(steps, sizeof(struct scatterlist *), GFP_KERNEL); | |
593 | if (!*splits) { | |
594 | ret = -ENOMEM; | |
595 | goto err_unmap_sg; | |
596 | } | |
597 | *splits_nents = kcalloc(steps, sizeof(int), GFP_KERNEL); | |
598 | if (!*splits_nents) { | |
599 | ret = -ENOMEM; | |
600 | goto err_free_splits; | |
601 | } | |
602 | ||
603 | /* output the scatter list before and after this */ | |
604 | ret = sg_split(sgl, count, 0, steps, split_sizes, | |
605 | *splits, *splits_nents, GFP_KERNEL); | |
606 | if (ret) { | |
607 | ret = -ENOMEM; | |
608 | goto err_free_splits_nents; | |
609 | } | |
610 | ||
611 | return 0; | |
612 | ||
613 | err_free_splits_nents: | |
614 | kfree(*splits_nents); | |
615 | err_free_splits: | |
616 | kfree(*splits); | |
617 | err_unmap_sg: | |
618 | dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); | |
619 | ||
620 | return ret; | |
621 | } | |
622 | ||
623 | /* | |
624 | * Reverses the sec_map_and_split_sg call for messages not yet added to | |
625 | * the queues. | |
626 | */ | |
627 | static void sec_unmap_sg_on_err(struct scatterlist *sgl, int steps, | |
628 | struct scatterlist **splits, int *splits_nents, | |
629 | int sgl_len_in, struct device *dev) | |
630 | { | |
631 | int i; | |
632 | ||
633 | for (i = 0; i < steps; i++) | |
634 | kfree(splits[i]); | |
635 | kfree(splits_nents); | |
636 | kfree(splits); | |
637 | ||
638 | dma_unmap_sg(dev, sgl, sgl_len_in, DMA_BIDIRECTIONAL); | |
639 | } | |
640 | ||
641 | static struct sec_request_el | |
642 | *sec_alg_alloc_and_fill_el(struct sec_bd_info *template, int encrypt, | |
643 | int el_size, bool different_dest, | |
644 | struct scatterlist *sgl_in, int n_ents_in, | |
645 | struct scatterlist *sgl_out, int n_ents_out, | |
646 | struct sec_dev_info *info) | |
647 | { | |
648 | struct sec_request_el *el; | |
649 | struct sec_bd_info *req; | |
650 | int ret; | |
651 | ||
652 | el = kzalloc(sizeof(*el), GFP_KERNEL); | |
653 | if (!el) | |
654 | return ERR_PTR(-ENOMEM); | |
655 | el->el_length = el_size; | |
656 | req = &el->req; | |
657 | memcpy(req, template, sizeof(*req)); | |
658 | ||
659 | req->w0 &= ~SEC_BD_W0_CIPHER_M; | |
660 | if (encrypt) | |
661 | req->w0 |= SEC_CIPHER_ENCRYPT << SEC_BD_W0_CIPHER_S; | |
662 | else | |
663 | req->w0 |= SEC_CIPHER_DECRYPT << SEC_BD_W0_CIPHER_S; | |
664 | ||
665 | req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_19_16_M; | |
666 | req->w0 |= ((el_size >> 16) << SEC_BD_W0_C_GRAN_SIZE_19_16_S) & | |
667 | SEC_BD_W0_C_GRAN_SIZE_19_16_M; | |
668 | ||
669 | req->w0 &= ~SEC_BD_W0_C_GRAN_SIZE_21_20_M; | |
670 | req->w0 |= ((el_size >> 20) << SEC_BD_W0_C_GRAN_SIZE_21_20_S) & | |
671 | SEC_BD_W0_C_GRAN_SIZE_21_20_M; | |
672 | ||
673 | /* Writing whole u32 so no need to take care of masking */ | |
674 | req->w2 = ((1 << SEC_BD_W2_GRAN_NUM_S) & SEC_BD_W2_GRAN_NUM_M) | | |
675 | ((el_size << SEC_BD_W2_C_GRAN_SIZE_15_0_S) & | |
676 | SEC_BD_W2_C_GRAN_SIZE_15_0_M); | |
677 | ||
678 | req->w3 &= ~SEC_BD_W3_CIPHER_LEN_OFFSET_M; | |
679 | req->w1 |= SEC_BD_W1_ADDR_TYPE; | |
680 | ||
681 | el->sgl_in = sgl_in; | |
682 | ||
683 | ret = sec_alloc_and_fill_hw_sgl(&el->in, &el->dma_in, el->sgl_in, | |
684 | n_ents_in, info); | |
685 | if (ret) | |
686 | goto err_free_el; | |
687 | ||
688 | req->data_addr_lo = lower_32_bits(el->dma_in); | |
689 | req->data_addr_hi = upper_32_bits(el->dma_in); | |
690 | ||
691 | if (different_dest) { | |
692 | el->sgl_out = sgl_out; | |
693 | ret = sec_alloc_and_fill_hw_sgl(&el->out, &el->dma_out, | |
694 | el->sgl_out, | |
695 | n_ents_out, info); | |
696 | if (ret) | |
697 | goto err_free_hw_sgl_in; | |
698 | ||
699 | req->w0 |= SEC_BD_W0_DE; | |
700 | req->cipher_destin_addr_lo = lower_32_bits(el->dma_out); | |
701 | req->cipher_destin_addr_hi = upper_32_bits(el->dma_out); | |
702 | ||
703 | } else { | |
704 | req->w0 &= ~SEC_BD_W0_DE; | |
705 | req->cipher_destin_addr_lo = lower_32_bits(el->dma_in); | |
706 | req->cipher_destin_addr_hi = upper_32_bits(el->dma_in); | |
707 | } | |
708 | ||
709 | return el; | |
710 | ||
711 | err_free_hw_sgl_in: | |
712 | sec_free_hw_sgl(el->in, el->dma_in, info); | |
713 | err_free_el: | |
714 | kfree(el); | |
715 | ||
716 | return ERR_PTR(ret); | |
717 | } | |
718 | ||
719 | static int sec_alg_skcipher_crypto(struct skcipher_request *skreq, | |
720 | bool encrypt) | |
721 | { | |
722 | struct crypto_skcipher *atfm = crypto_skcipher_reqtfm(skreq); | |
723 | struct crypto_tfm *tfm = crypto_skcipher_tfm(atfm); | |
724 | struct sec_alg_tfm_ctx *ctx = crypto_tfm_ctx(tfm); | |
725 | struct sec_queue *queue = ctx->queue; | |
726 | struct sec_request *sec_req = skcipher_request_ctx(skreq); | |
727 | struct sec_dev_info *info = queue->dev_info; | |
728 | int i, ret, steps; | |
729 | size_t *split_sizes; | |
730 | struct scatterlist **splits_in; | |
731 | struct scatterlist **splits_out = NULL; | |
732 | int *splits_in_nents; | |
733 | int *splits_out_nents = NULL; | |
734 | struct sec_request_el *el, *temp; | |
735 | ||
736 | mutex_init(&sec_req->lock); | |
737 | sec_req->req_base = &skreq->base; | |
738 | sec_req->err = 0; | |
739 | /* SGL mapping out here to allow us to break it up as necessary */ | |
740 | sec_req->len_in = sg_nents(skreq->src); | |
741 | ||
742 | ret = sec_alg_alloc_and_calc_split_sizes(skreq->cryptlen, &split_sizes, | |
743 | &steps); | |
744 | if (ret) | |
745 | return ret; | |
746 | sec_req->num_elements = steps; | |
747 | ret = sec_map_and_split_sg(skreq->src, split_sizes, steps, &splits_in, | |
748 | &splits_in_nents, sec_req->len_in, | |
749 | info->dev); | |
750 | if (ret) | |
751 | goto err_free_split_sizes; | |
752 | ||
753 | if (skreq->src != skreq->dst) { | |
754 | sec_req->len_out = sg_nents(skreq->dst); | |
755 | ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps, | |
756 | &splits_out, &splits_out_nents, | |
757 | sec_req->len_out, info->dev); | |
758 | if (ret) | |
759 | goto err_unmap_in_sg; | |
760 | } | |
761 | /* Shared info stored in seq_req - applies to all BDs */ | |
762 | sec_req->tfm_ctx = ctx; | |
763 | sec_req->cb = sec_skcipher_alg_callback; | |
764 | INIT_LIST_HEAD(&sec_req->elements); | |
765 | ||
766 | /* | |
767 | * Future optimization. | |
768 | * In the chaining case we can't use a dma pool bounce buffer | |
769 | * but in the case where we know there is no chaining we can | |
770 | */ | |
771 | if (crypto_skcipher_ivsize(atfm)) { | |
772 | sec_req->dma_iv = dma_map_single(info->dev, skreq->iv, | |
773 | crypto_skcipher_ivsize(atfm), | |
774 | DMA_TO_DEVICE); | |
775 | if (dma_mapping_error(info->dev, sec_req->dma_iv)) { | |
776 | ret = -ENOMEM; | |
777 | goto err_unmap_out_sg; | |
778 | } | |
779 | } | |
780 | ||
781 | /* Set them all up then queue - cleaner error handling. */ | |
782 | for (i = 0; i < steps; i++) { | |
783 | el = sec_alg_alloc_and_fill_el(&ctx->req_template, | |
784 | encrypt ? 1 : 0, | |
785 | split_sizes[i], | |
786 | skreq->src != skreq->dst, | |
787 | splits_in[i], splits_in_nents[i], | |
788 | splits_out[i], | |
789 | splits_out_nents[i], info); | |
790 | if (IS_ERR(el)) { | |
791 | ret = PTR_ERR(el); | |
792 | goto err_free_elements; | |
793 | } | |
794 | el->req.cipher_iv_addr_lo = lower_32_bits(sec_req->dma_iv); | |
795 | el->req.cipher_iv_addr_hi = upper_32_bits(sec_req->dma_iv); | |
796 | el->sec_req = sec_req; | |
797 | list_add_tail(&el->head, &sec_req->elements); | |
798 | } | |
799 | ||
800 | /* | |
801 | * Only attempt to queue if the whole lot can fit in the queue - | |
802 | * we can't successfully cleanup after a partial queing so this | |
803 | * must succeed or fail atomically. | |
804 | * | |
805 | * Big hammer test of both software and hardware queues - could be | |
806 | * more refined but this is unlikely to happen so no need. | |
807 | */ | |
808 | ||
809 | /* Cleanup - all elements in pointer arrays have been coppied */ | |
810 | kfree(splits_in_nents); | |
811 | kfree(splits_in); | |
812 | kfree(splits_out_nents); | |
813 | kfree(splits_out); | |
814 | kfree(split_sizes); | |
815 | ||
816 | /* Grab a big lock for a long time to avoid concurrency issues */ | |
817 | mutex_lock(&queue->queuelock); | |
818 | ||
819 | /* | |
820 | * Can go on to queue if we have space in either: | |
821 | * 1) The hardware queue and no software queue | |
822 | * 2) The software queue | |
823 | * AND there is nothing in the backlog. If there is backlog we | |
824 | * have to only queue to the backlog queue and return busy. | |
825 | */ | |
826 | if ((!sec_queue_can_enqueue(queue, steps) && | |
827 | (!queue->havesoftqueue || | |
828 | kfifo_avail(&queue->softqueue) > steps)) || | |
829 | !list_empty(&ctx->backlog)) { | |
830 | if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { | |
831 | list_add_tail(&sec_req->backlog_head, &ctx->backlog); | |
832 | mutex_unlock(&queue->queuelock); | |
833 | return -EBUSY; | |
834 | } | |
835 | ||
836 | ret = -EBUSY; | |
837 | mutex_unlock(&queue->queuelock); | |
838 | goto err_free_elements; | |
839 | } | |
840 | ret = sec_send_request(sec_req, queue); | |
841 | mutex_unlock(&queue->queuelock); | |
842 | if (ret) | |
843 | goto err_free_elements; | |
844 | ||
845 | return -EINPROGRESS; | |
846 | ||
847 | err_free_elements: | |
848 | list_for_each_entry_safe(el, temp, &sec_req->elements, head) { | |
849 | list_del(&el->head); | |
850 | sec_alg_free_el(el, info); | |
851 | } | |
852 | if (crypto_skcipher_ivsize(atfm)) | |
853 | dma_unmap_single(info->dev, sec_req->dma_iv, | |
854 | crypto_skcipher_ivsize(atfm), | |
855 | DMA_BIDIRECTIONAL); | |
856 | err_unmap_out_sg: | |
857 | if (skreq->src != skreq->dst) | |
858 | sec_unmap_sg_on_err(skreq->dst, steps, splits_out, | |
859 | splits_out_nents, sec_req->len_out, | |
860 | info->dev); | |
861 | err_unmap_in_sg: | |
862 | sec_unmap_sg_on_err(skreq->src, steps, splits_in, splits_in_nents, | |
863 | sec_req->len_in, info->dev); | |
864 | err_free_split_sizes: | |
865 | kfree(split_sizes); | |
866 | ||
867 | return ret; | |
868 | } | |
869 | ||
870 | static int sec_alg_skcipher_encrypt(struct skcipher_request *req) | |
871 | { | |
872 | return sec_alg_skcipher_crypto(req, true); | |
873 | } | |
874 | ||
875 | static int sec_alg_skcipher_decrypt(struct skcipher_request *req) | |
876 | { | |
877 | return sec_alg_skcipher_crypto(req, false); | |
878 | } | |
879 | ||
880 | static int sec_alg_skcipher_init(struct crypto_skcipher *tfm) | |
881 | { | |
882 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); | |
883 | ||
884 | mutex_init(&ctx->lock); | |
885 | INIT_LIST_HEAD(&ctx->backlog); | |
886 | crypto_skcipher_set_reqsize(tfm, sizeof(struct sec_request)); | |
887 | ||
888 | ctx->queue = sec_queue_alloc_start_safe(); | |
889 | if (IS_ERR(ctx->queue)) | |
890 | return PTR_ERR(ctx->queue); | |
891 | ||
892 | mutex_init(&ctx->queue->queuelock); | |
893 | ctx->queue->havesoftqueue = false; | |
894 | ||
895 | return 0; | |
896 | } | |
897 | ||
898 | static void sec_alg_skcipher_exit(struct crypto_skcipher *tfm) | |
899 | { | |
900 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); | |
901 | struct device *dev = ctx->queue->dev_info->dev; | |
902 | ||
903 | if (ctx->key) { | |
904 | memzero_explicit(ctx->key, SEC_MAX_CIPHER_KEY); | |
905 | dma_free_coherent(dev, SEC_MAX_CIPHER_KEY, ctx->key, | |
906 | ctx->pkey); | |
907 | } | |
908 | sec_queue_stop_release(ctx->queue); | |
909 | } | |
910 | ||
911 | static int sec_alg_skcipher_init_with_queue(struct crypto_skcipher *tfm) | |
912 | { | |
913 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); | |
914 | int ret; | |
915 | ||
916 | ret = sec_alg_skcipher_init(tfm); | |
917 | if (ret) | |
918 | return ret; | |
919 | ||
920 | INIT_KFIFO(ctx->queue->softqueue); | |
921 | ret = kfifo_alloc(&ctx->queue->softqueue, 512, GFP_KERNEL); | |
922 | if (ret) { | |
923 | sec_alg_skcipher_exit(tfm); | |
924 | return ret; | |
925 | } | |
926 | ctx->queue->havesoftqueue = true; | |
927 | ||
928 | return 0; | |
929 | } | |
930 | ||
931 | static void sec_alg_skcipher_exit_with_queue(struct crypto_skcipher *tfm) | |
932 | { | |
933 | struct sec_alg_tfm_ctx *ctx = crypto_skcipher_ctx(tfm); | |
934 | ||
935 | kfifo_free(&ctx->queue->softqueue); | |
936 | sec_alg_skcipher_exit(tfm); | |
937 | } | |
938 | ||
939 | static struct skcipher_alg sec_algs[] = { | |
940 | { | |
941 | .base = { | |
942 | .cra_name = "ecb(aes)", | |
943 | .cra_driver_name = "hisi_sec_aes_ecb", | |
944 | .cra_priority = 4001, | |
945 | .cra_flags = CRYPTO_ALG_ASYNC, | |
946 | .cra_blocksize = AES_BLOCK_SIZE, | |
947 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), | |
948 | .cra_alignmask = 0, | |
949 | .cra_module = THIS_MODULE, | |
950 | }, | |
951 | .init = sec_alg_skcipher_init, | |
952 | .exit = sec_alg_skcipher_exit, | |
953 | .setkey = sec_alg_skcipher_setkey_aes_ecb, | |
954 | .decrypt = sec_alg_skcipher_decrypt, | |
955 | .encrypt = sec_alg_skcipher_encrypt, | |
956 | .min_keysize = AES_MIN_KEY_SIZE, | |
957 | .max_keysize = AES_MAX_KEY_SIZE, | |
958 | .ivsize = 0, | |
959 | }, { | |
960 | .base = { | |
961 | .cra_name = "cbc(aes)", | |
962 | .cra_driver_name = "hisi_sec_aes_cbc", | |
963 | .cra_priority = 4001, | |
964 | .cra_flags = CRYPTO_ALG_ASYNC, | |
965 | .cra_blocksize = AES_BLOCK_SIZE, | |
966 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), | |
967 | .cra_alignmask = 0, | |
968 | .cra_module = THIS_MODULE, | |
969 | }, | |
970 | .init = sec_alg_skcipher_init_with_queue, | |
971 | .exit = sec_alg_skcipher_exit_with_queue, | |
972 | .setkey = sec_alg_skcipher_setkey_aes_cbc, | |
973 | .decrypt = sec_alg_skcipher_decrypt, | |
974 | .encrypt = sec_alg_skcipher_encrypt, | |
975 | .min_keysize = AES_MIN_KEY_SIZE, | |
976 | .max_keysize = AES_MAX_KEY_SIZE, | |
977 | .ivsize = AES_BLOCK_SIZE, | |
978 | }, { | |
979 | .base = { | |
980 | .cra_name = "ctr(aes)", | |
981 | .cra_driver_name = "hisi_sec_aes_ctr", | |
982 | .cra_priority = 4001, | |
983 | .cra_flags = CRYPTO_ALG_ASYNC, | |
984 | .cra_blocksize = AES_BLOCK_SIZE, | |
985 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), | |
986 | .cra_alignmask = 0, | |
987 | .cra_module = THIS_MODULE, | |
988 | }, | |
989 | .init = sec_alg_skcipher_init_with_queue, | |
990 | .exit = sec_alg_skcipher_exit_with_queue, | |
991 | .setkey = sec_alg_skcipher_setkey_aes_ctr, | |
992 | .decrypt = sec_alg_skcipher_decrypt, | |
993 | .encrypt = sec_alg_skcipher_encrypt, | |
994 | .min_keysize = AES_MIN_KEY_SIZE, | |
995 | .max_keysize = AES_MAX_KEY_SIZE, | |
996 | .ivsize = AES_BLOCK_SIZE, | |
997 | }, { | |
998 | .base = { | |
999 | .cra_name = "xts(aes)", | |
1000 | .cra_driver_name = "hisi_sec_aes_xts", | |
1001 | .cra_priority = 4001, | |
1002 | .cra_flags = CRYPTO_ALG_ASYNC, | |
1003 | .cra_blocksize = AES_BLOCK_SIZE, | |
1004 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), | |
1005 | .cra_alignmask = 0, | |
1006 | .cra_module = THIS_MODULE, | |
1007 | }, | |
1008 | .init = sec_alg_skcipher_init, | |
1009 | .exit = sec_alg_skcipher_exit, | |
1010 | .setkey = sec_alg_skcipher_setkey_aes_xts, | |
1011 | .decrypt = sec_alg_skcipher_decrypt, | |
1012 | .encrypt = sec_alg_skcipher_encrypt, | |
1013 | .min_keysize = 2 * AES_MIN_KEY_SIZE, | |
1014 | .max_keysize = 2 * AES_MAX_KEY_SIZE, | |
1015 | .ivsize = AES_BLOCK_SIZE, | |
1016 | }, { | |
1017 | /* Unable to find any test vectors so untested */ | |
1018 | .base = { | |
1019 | .cra_name = "ecb(des)", | |
1020 | .cra_driver_name = "hisi_sec_des_ecb", | |
1021 | .cra_priority = 4001, | |
1022 | .cra_flags = CRYPTO_ALG_ASYNC, | |
1023 | .cra_blocksize = DES_BLOCK_SIZE, | |
1024 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), | |
1025 | .cra_alignmask = 0, | |
1026 | .cra_module = THIS_MODULE, | |
1027 | }, | |
1028 | .init = sec_alg_skcipher_init, | |
1029 | .exit = sec_alg_skcipher_exit, | |
1030 | .setkey = sec_alg_skcipher_setkey_des_ecb, | |
1031 | .decrypt = sec_alg_skcipher_decrypt, | |
1032 | .encrypt = sec_alg_skcipher_encrypt, | |
1033 | .min_keysize = DES_KEY_SIZE, | |
1034 | .max_keysize = DES_KEY_SIZE, | |
1035 | .ivsize = 0, | |
1036 | }, { | |
1037 | .base = { | |
1038 | .cra_name = "cbc(des)", | |
1039 | .cra_driver_name = "hisi_sec_des_cbc", | |
1040 | .cra_priority = 4001, | |
1041 | .cra_flags = CRYPTO_ALG_ASYNC, | |
1042 | .cra_blocksize = DES_BLOCK_SIZE, | |
1043 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), | |
1044 | .cra_alignmask = 0, | |
1045 | .cra_module = THIS_MODULE, | |
1046 | }, | |
1047 | .init = sec_alg_skcipher_init_with_queue, | |
1048 | .exit = sec_alg_skcipher_exit_with_queue, | |
1049 | .setkey = sec_alg_skcipher_setkey_des_cbc, | |
1050 | .decrypt = sec_alg_skcipher_decrypt, | |
1051 | .encrypt = sec_alg_skcipher_encrypt, | |
1052 | .min_keysize = DES_KEY_SIZE, | |
1053 | .max_keysize = DES_KEY_SIZE, | |
1054 | .ivsize = DES_BLOCK_SIZE, | |
1055 | }, { | |
1056 | .base = { | |
1057 | .cra_name = "cbc(des3_ede)", | |
1058 | .cra_driver_name = "hisi_sec_3des_cbc", | |
1059 | .cra_priority = 4001, | |
1060 | .cra_flags = CRYPTO_ALG_ASYNC, | |
1061 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1062 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), | |
1063 | .cra_alignmask = 0, | |
1064 | .cra_module = THIS_MODULE, | |
1065 | }, | |
1066 | .init = sec_alg_skcipher_init_with_queue, | |
1067 | .exit = sec_alg_skcipher_exit_with_queue, | |
1068 | .setkey = sec_alg_skcipher_setkey_3des_cbc, | |
1069 | .decrypt = sec_alg_skcipher_decrypt, | |
1070 | .encrypt = sec_alg_skcipher_encrypt, | |
1071 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1072 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1073 | .ivsize = DES3_EDE_BLOCK_SIZE, | |
1074 | }, { | |
1075 | .base = { | |
1076 | .cra_name = "ecb(des3_ede)", | |
1077 | .cra_driver_name = "hisi_sec_3des_ecb", | |
1078 | .cra_priority = 4001, | |
1079 | .cra_flags = CRYPTO_ALG_ASYNC, | |
1080 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | |
1081 | .cra_ctxsize = sizeof(struct sec_alg_tfm_ctx), | |
1082 | .cra_alignmask = 0, | |
1083 | .cra_module = THIS_MODULE, | |
1084 | }, | |
1085 | .init = sec_alg_skcipher_init, | |
1086 | .exit = sec_alg_skcipher_exit, | |
1087 | .setkey = sec_alg_skcipher_setkey_3des_ecb, | |
1088 | .decrypt = sec_alg_skcipher_decrypt, | |
1089 | .encrypt = sec_alg_skcipher_encrypt, | |
1090 | .min_keysize = DES3_EDE_KEY_SIZE, | |
1091 | .max_keysize = DES3_EDE_KEY_SIZE, | |
1092 | .ivsize = 0, | |
1093 | } | |
1094 | }; | |
1095 | ||
1096 | int sec_algs_register(void) | |
1097 | { | |
1098 | int ret = 0; | |
1099 | ||
1100 | mutex_lock(&algs_lock); | |
1101 | if (++active_devs != 1) | |
1102 | goto unlock; | |
1103 | ||
1104 | ret = crypto_register_skciphers(sec_algs, ARRAY_SIZE(sec_algs)); | |
1105 | if (ret) | |
1106 | --active_devs; | |
1107 | unlock: | |
1108 | mutex_unlock(&algs_lock); | |
1109 | ||
1110 | return ret; | |
1111 | } | |
1112 | ||
1113 | void sec_algs_unregister(void) | |
1114 | { | |
1115 | mutex_lock(&algs_lock); | |
1116 | if (--active_devs != 0) | |
1117 | goto unlock; | |
1118 | crypto_unregister_skciphers(sec_algs, ARRAY_SIZE(sec_algs)); | |
1119 | ||
1120 | unlock: | |
1121 | mutex_unlock(&algs_lock); | |
1122 | } |