]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
735d37b5 BW |
2 | /* |
3 | * Handle async block request by crypto hardware engine. | |
4 | * | |
5 | * Copyright (C) 2016 Linaro, Inc. | |
6 | * | |
7 | * Author: Baolin Wang <[email protected]> | |
735d37b5 BW |
8 | */ |
9 | ||
e5e7eb02 HX |
10 | #include <crypto/internal/aead.h> |
11 | #include <crypto/internal/akcipher.h> | |
45c461c5 | 12 | #include <crypto/internal/engine.h> |
e5e7eb02 HX |
13 | #include <crypto/internal/hash.h> |
14 | #include <crypto/internal/kpp.h> | |
15 | #include <crypto/internal/skcipher.h> | |
735d37b5 BW |
16 | #include <linux/err.h> |
17 | #include <linux/delay.h> | |
0c3dc787 | 18 | #include <linux/device.h> |
e5e7eb02 HX |
19 | #include <linux/kernel.h> |
20 | #include <linux/module.h> | |
ae7e81c0 | 21 | #include <uapi/linux/sched/types.h> |
735d37b5 BW |
22 | #include "internal.h" |
23 | ||
24 | #define CRYPTO_ENGINE_MAX_QLEN 10 | |
25 | ||
e5e7eb02 HX |
26 | /* Temporary algorithm flag used to indicate an updated driver. */ |
27 | #define CRYPTO_ALG_ENGINE 0x200 | |
28 | ||
29 | struct crypto_engine_alg { | |
30 | struct crypto_alg base; | |
31 | struct crypto_engine_op op; | |
32 | }; | |
33 | ||
218d1cc1 CL |
34 | /** |
35 | * crypto_finalize_request - finalize one request if the request is done | |
36 | * @engine: the hardware engine | |
37 | * @req: the request need to be finalized | |
38 | * @err: error number | |
39 | */ | |
40 | static void crypto_finalize_request(struct crypto_engine *engine, | |
6a89f492 | 41 | struct crypto_async_request *req, int err) |
218d1cc1 CL |
42 | { |
43 | unsigned long flags; | |
218d1cc1 | 44 | |
6a89f492 IP |
45 | /* |
46 | * If hardware cannot enqueue more requests | |
47 | * and retry mechanism is not supported | |
48 | * make sure we are completing the current request | |
49 | */ | |
50 | if (!engine->retry_support) { | |
51 | spin_lock_irqsave(&engine->queue_lock, flags); | |
52 | if (engine->cur_req == req) { | |
6a89f492 IP |
53 | engine->cur_req = NULL; |
54 | } | |
55 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
56 | } | |
218d1cc1 | 57 | |
4058cf08 | 58 | lockdep_assert_in_softirq(); |
6909823d | 59 | crypto_request_complete(req, err); |
218d1cc1 CL |
60 | |
61 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
62 | } | |
63 | ||
735d37b5 BW |
64 | /** |
65 | * crypto_pump_requests - dequeue one request from engine queue to process | |
66 | * @engine: the hardware engine | |
67 | * @in_kthread: true if we are in the context of the request pump thread | |
68 | * | |
69 | * This function checks if there is any request in the engine queue that | |
70 | * needs processing and if so call out to the driver to initialize hardware | |
71 | * and handle each request. | |
72 | */ | |
73 | static void crypto_pump_requests(struct crypto_engine *engine, | |
74 | bool in_kthread) | |
75 | { | |
76 | struct crypto_async_request *async_req, *backlog; | |
e5e7eb02 HX |
77 | struct crypto_engine_alg *alg; |
78 | struct crypto_engine_op *op; | |
735d37b5 BW |
79 | unsigned long flags; |
80 | bool was_busy = false; | |
218d1cc1 | 81 | int ret; |
735d37b5 BW |
82 | |
83 | spin_lock_irqsave(&engine->queue_lock, flags); | |
84 | ||
85 | /* Make sure we are not already running a request */ | |
6a89f492 | 86 | if (!engine->retry_support && engine->cur_req) |
735d37b5 BW |
87 | goto out; |
88 | ||
89 | /* If another context is idling then defer */ | |
90 | if (engine->idling) { | |
c4ca2b0b | 91 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b5 BW |
92 | goto out; |
93 | } | |
94 | ||
95 | /* Check if the engine queue is idle */ | |
96 | if (!crypto_queue_len(&engine->queue) || !engine->running) { | |
97 | if (!engine->busy) | |
98 | goto out; | |
99 | ||
100 | /* Only do teardown in the thread */ | |
101 | if (!in_kthread) { | |
c4ca2b0b | 102 | kthread_queue_work(engine->kworker, |
735d37b5 BW |
103 | &engine->pump_requests); |
104 | goto out; | |
105 | } | |
106 | ||
107 | engine->busy = false; | |
108 | engine->idling = true; | |
109 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
110 | ||
111 | if (engine->unprepare_crypt_hardware && | |
112 | engine->unprepare_crypt_hardware(engine)) | |
88d58ef8 | 113 | dev_err(engine->dev, "failed to unprepare crypt hardware\n"); |
735d37b5 BW |
114 | |
115 | spin_lock_irqsave(&engine->queue_lock, flags); | |
116 | engine->idling = false; | |
117 | goto out; | |
118 | } | |
119 | ||
6a89f492 | 120 | start_request: |
735d37b5 BW |
121 | /* Get the fist request from the engine queue to handle */ |
122 | backlog = crypto_get_backlog(&engine->queue); | |
123 | async_req = crypto_dequeue_request(&engine->queue); | |
124 | if (!async_req) | |
125 | goto out; | |
126 | ||
6a89f492 IP |
127 | /* |
128 | * If hardware doesn't support the retry mechanism, | |
129 | * keep track of the request we are processing now. | |
130 | * We'll need it on completion (crypto_finalize_request). | |
131 | */ | |
132 | if (!engine->retry_support) | |
133 | engine->cur_req = async_req; | |
134 | ||
735d37b5 BW |
135 | if (engine->busy) |
136 | was_busy = true; | |
137 | else | |
138 | engine->busy = true; | |
139 | ||
140 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
141 | ||
142 | /* Until here we get the request need to be encrypted successfully */ | |
143 | if (!was_busy && engine->prepare_crypt_hardware) { | |
144 | ret = engine->prepare_crypt_hardware(engine); | |
145 | if (ret) { | |
88d58ef8 | 146 | dev_err(engine->dev, "failed to prepare crypt hardware\n"); |
bcd6e41d | 147 | goto req_err_1; |
735d37b5 BW |
148 | } |
149 | } | |
150 | ||
e5e7eb02 HX |
151 | if (async_req->tfm->__crt_alg->cra_flags & CRYPTO_ALG_ENGINE) { |
152 | alg = container_of(async_req->tfm->__crt_alg, | |
153 | struct crypto_engine_alg, base); | |
154 | op = &alg->op; | |
155 | } else { | |
5ce0bc68 HX |
156 | dev_err(engine->dev, "failed to do request\n"); |
157 | ret = -EINVAL; | |
158 | goto req_err_1; | |
735d37b5 | 159 | } |
6a89f492 | 160 | |
e5e7eb02 | 161 | ret = op->do_one_request(engine, async_req); |
6a89f492 IP |
162 | |
163 | /* Request unsuccessfully executed by hardware */ | |
164 | if (ret < 0) { | |
165 | /* | |
166 | * If hardware queue is full (-ENOSPC), requeue request | |
167 | * regardless of backlog flag. | |
6a89f492 IP |
168 | * Otherwise, unprepare and complete the request. |
169 | */ | |
170 | if (!engine->retry_support || | |
d1c72f6e | 171 | (ret != -ENOSPC)) { |
6a89f492 IP |
172 | dev_err(engine->dev, |
173 | "Failed to do one request from queue: %d\n", | |
174 | ret); | |
175 | goto req_err_1; | |
176 | } | |
6a89f492 IP |
177 | spin_lock_irqsave(&engine->queue_lock, flags); |
178 | /* | |
179 | * If hardware was unable to execute request, enqueue it | |
180 | * back in front of crypto-engine queue, to keep the order | |
181 | * of requests. | |
182 | */ | |
183 | crypto_enqueue_request_head(&engine->queue, async_req); | |
184 | ||
185 | kthread_queue_work(engine->kworker, &engine->pump_requests); | |
186 | goto out; | |
218d1cc1 | 187 | } |
735d37b5 | 188 | |
6a89f492 IP |
189 | goto retry; |
190 | ||
191 | req_err_1: | |
6909823d | 192 | crypto_request_complete(async_req, ret); |
6a89f492 IP |
193 | |
194 | retry: | |
4140aafc OB |
195 | if (backlog) |
196 | crypto_request_complete(backlog, -EINPROGRESS); | |
197 | ||
6a89f492 IP |
198 | /* If retry mechanism is supported, send new requests to engine */ |
199 | if (engine->retry_support) { | |
200 | spin_lock_irqsave(&engine->queue_lock, flags); | |
201 | goto start_request; | |
202 | } | |
735d37b5 BW |
203 | return; |
204 | ||
205 | out: | |
206 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
8d908226 IP |
207 | |
208 | /* | |
209 | * Batch requests is possible only if | |
210 | * hardware can enqueue multiple requests | |
211 | */ | |
212 | if (engine->do_batch_requests) { | |
213 | ret = engine->do_batch_requests(engine); | |
214 | if (ret) | |
215 | dev_err(engine->dev, "failed to do batch requests: %d\n", | |
216 | ret); | |
217 | } | |
218 | ||
6a89f492 | 219 | return; |
735d37b5 BW |
220 | } |
221 | ||
222 | static void crypto_pump_work(struct kthread_work *work) | |
223 | { | |
224 | struct crypto_engine *engine = | |
225 | container_of(work, struct crypto_engine, pump_requests); | |
226 | ||
227 | crypto_pump_requests(engine, true); | |
228 | } | |
229 | ||
230 | /** | |
218d1cc1 | 231 | * crypto_transfer_request - transfer the new request into the engine queue |
735d37b5 BW |
232 | * @engine: the hardware engine |
233 | * @req: the request need to be listed into the engine queue | |
d5db91d2 | 234 | * @need_pump: indicates whether queue the pump of request to kthread_work |
735d37b5 | 235 | */ |
218d1cc1 CL |
236 | static int crypto_transfer_request(struct crypto_engine *engine, |
237 | struct crypto_async_request *req, | |
4cba7cf0 | 238 | bool need_pump) |
735d37b5 BW |
239 | { |
240 | unsigned long flags; | |
241 | int ret; | |
242 | ||
243 | spin_lock_irqsave(&engine->queue_lock, flags); | |
244 | ||
245 | if (!engine->running) { | |
246 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
247 | return -ESHUTDOWN; | |
248 | } | |
249 | ||
218d1cc1 | 250 | ret = crypto_enqueue_request(&engine->queue, req); |
735d37b5 BW |
251 | |
252 | if (!engine->busy && need_pump) | |
c4ca2b0b | 253 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b5 BW |
254 | |
255 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
256 | return ret; | |
257 | } | |
4cba7cf0 CL |
258 | |
259 | /** | |
218d1cc1 | 260 | * crypto_transfer_request_to_engine - transfer one request to list |
4cba7cf0 CL |
261 | * into the engine queue |
262 | * @engine: the hardware engine | |
263 | * @req: the request need to be listed into the engine queue | |
264 | */ | |
218d1cc1 CL |
265 | static int crypto_transfer_request_to_engine(struct crypto_engine *engine, |
266 | struct crypto_async_request *req) | |
4cba7cf0 | 267 | { |
218d1cc1 | 268 | return crypto_transfer_request(engine, req, true); |
4cba7cf0 | 269 | } |
4cba7cf0 | 270 | |
218d1cc1 CL |
271 | /** |
272 | * crypto_transfer_aead_request_to_engine - transfer one aead_request | |
273 | * to list into the engine queue | |
274 | * @engine: the hardware engine | |
275 | * @req: the request need to be listed into the engine queue | |
276 | */ | |
277 | int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine, | |
278 | struct aead_request *req) | |
279 | { | |
280 | return crypto_transfer_request_to_engine(engine, &req->base); | |
281 | } | |
282 | EXPORT_SYMBOL_GPL(crypto_transfer_aead_request_to_engine); | |
4cba7cf0 | 283 | |
218d1cc1 CL |
284 | /** |
285 | * crypto_transfer_akcipher_request_to_engine - transfer one akcipher_request | |
286 | * to list into the engine queue | |
287 | * @engine: the hardware engine | |
288 | * @req: the request need to be listed into the engine queue | |
289 | */ | |
290 | int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine, | |
291 | struct akcipher_request *req) | |
292 | { | |
293 | return crypto_transfer_request_to_engine(engine, &req->base); | |
4cba7cf0 | 294 | } |
218d1cc1 | 295 | EXPORT_SYMBOL_GPL(crypto_transfer_akcipher_request_to_engine); |
735d37b5 BW |
296 | |
297 | /** | |
218d1cc1 CL |
298 | * crypto_transfer_hash_request_to_engine - transfer one ahash_request |
299 | * to list into the engine queue | |
735d37b5 BW |
300 | * @engine: the hardware engine |
301 | * @req: the request need to be listed into the engine queue | |
302 | */ | |
4cba7cf0 CL |
303 | int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine, |
304 | struct ahash_request *req) | |
735d37b5 | 305 | { |
218d1cc1 | 306 | return crypto_transfer_request_to_engine(engine, &req->base); |
735d37b5 | 307 | } |
4cba7cf0 | 308 | EXPORT_SYMBOL_GPL(crypto_transfer_hash_request_to_engine); |
735d37b5 | 309 | |
1730c5aa PK |
310 | /** |
311 | * crypto_transfer_kpp_request_to_engine - transfer one kpp_request to list | |
312 | * into the engine queue | |
313 | * @engine: the hardware engine | |
314 | * @req: the request need to be listed into the engine queue | |
315 | */ | |
316 | int crypto_transfer_kpp_request_to_engine(struct crypto_engine *engine, | |
317 | struct kpp_request *req) | |
318 | { | |
319 | return crypto_transfer_request_to_engine(engine, &req->base); | |
320 | } | |
321 | EXPORT_SYMBOL_GPL(crypto_transfer_kpp_request_to_engine); | |
322 | ||
735d37b5 | 323 | /** |
218d1cc1 CL |
324 | * crypto_transfer_skcipher_request_to_engine - transfer one skcipher_request |
325 | * to list into the engine queue | |
326 | * @engine: the hardware engine | |
327 | * @req: the request need to be listed into the engine queue | |
328 | */ | |
329 | int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine, | |
330 | struct skcipher_request *req) | |
331 | { | |
332 | return crypto_transfer_request_to_engine(engine, &req->base); | |
333 | } | |
334 | EXPORT_SYMBOL_GPL(crypto_transfer_skcipher_request_to_engine); | |
335 | ||
218d1cc1 CL |
336 | /** |
337 | * crypto_finalize_aead_request - finalize one aead_request if | |
338 | * the request is done | |
339 | * @engine: the hardware engine | |
340 | * @req: the request need to be finalized | |
341 | * @err: error number | |
342 | */ | |
343 | void crypto_finalize_aead_request(struct crypto_engine *engine, | |
344 | struct aead_request *req, int err) | |
345 | { | |
346 | return crypto_finalize_request(engine, &req->base, err); | |
347 | } | |
348 | EXPORT_SYMBOL_GPL(crypto_finalize_aead_request); | |
735d37b5 | 349 | |
218d1cc1 CL |
350 | /** |
351 | * crypto_finalize_akcipher_request - finalize one akcipher_request if | |
352 | * the request is done | |
353 | * @engine: the hardware engine | |
354 | * @req: the request need to be finalized | |
355 | * @err: error number | |
356 | */ | |
357 | void crypto_finalize_akcipher_request(struct crypto_engine *engine, | |
358 | struct akcipher_request *req, int err) | |
359 | { | |
360 | return crypto_finalize_request(engine, &req->base, err); | |
4cba7cf0 | 361 | } |
218d1cc1 | 362 | EXPORT_SYMBOL_GPL(crypto_finalize_akcipher_request); |
4cba7cf0 CL |
363 | |
364 | /** | |
218d1cc1 CL |
365 | * crypto_finalize_hash_request - finalize one ahash_request if |
366 | * the request is done | |
4cba7cf0 CL |
367 | * @engine: the hardware engine |
368 | * @req: the request need to be finalized | |
369 | * @err: error number | |
370 | */ | |
371 | void crypto_finalize_hash_request(struct crypto_engine *engine, | |
372 | struct ahash_request *req, int err) | |
373 | { | |
218d1cc1 | 374 | return crypto_finalize_request(engine, &req->base, err); |
735d37b5 | 375 | } |
4cba7cf0 | 376 | EXPORT_SYMBOL_GPL(crypto_finalize_hash_request); |
735d37b5 | 377 | |
1730c5aa PK |
378 | /** |
379 | * crypto_finalize_kpp_request - finalize one kpp_request if the request is done | |
380 | * @engine: the hardware engine | |
381 | * @req: the request need to be finalized | |
382 | * @err: error number | |
383 | */ | |
384 | void crypto_finalize_kpp_request(struct crypto_engine *engine, | |
385 | struct kpp_request *req, int err) | |
386 | { | |
387 | return crypto_finalize_request(engine, &req->base, err); | |
388 | } | |
389 | EXPORT_SYMBOL_GPL(crypto_finalize_kpp_request); | |
390 | ||
218d1cc1 CL |
391 | /** |
392 | * crypto_finalize_skcipher_request - finalize one skcipher_request if | |
393 | * the request is done | |
394 | * @engine: the hardware engine | |
395 | * @req: the request need to be finalized | |
396 | * @err: error number | |
397 | */ | |
398 | void crypto_finalize_skcipher_request(struct crypto_engine *engine, | |
399 | struct skcipher_request *req, int err) | |
400 | { | |
401 | return crypto_finalize_request(engine, &req->base, err); | |
402 | } | |
403 | EXPORT_SYMBOL_GPL(crypto_finalize_skcipher_request); | |
404 | ||
735d37b5 BW |
405 | /** |
406 | * crypto_engine_start - start the hardware engine | |
407 | * @engine: the hardware engine need to be started | |
408 | * | |
409 | * Return 0 on success, else on fail. | |
410 | */ | |
411 | int crypto_engine_start(struct crypto_engine *engine) | |
412 | { | |
413 | unsigned long flags; | |
414 | ||
415 | spin_lock_irqsave(&engine->queue_lock, flags); | |
416 | ||
417 | if (engine->running || engine->busy) { | |
418 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
419 | return -EBUSY; | |
420 | } | |
421 | ||
422 | engine->running = true; | |
423 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
424 | ||
c4ca2b0b | 425 | kthread_queue_work(engine->kworker, &engine->pump_requests); |
735d37b5 BW |
426 | |
427 | return 0; | |
428 | } | |
429 | EXPORT_SYMBOL_GPL(crypto_engine_start); | |
430 | ||
431 | /** | |
432 | * crypto_engine_stop - stop the hardware engine | |
433 | * @engine: the hardware engine need to be stopped | |
434 | * | |
435 | * Return 0 on success, else on fail. | |
436 | */ | |
437 | int crypto_engine_stop(struct crypto_engine *engine) | |
438 | { | |
439 | unsigned long flags; | |
4cba7cf0 | 440 | unsigned int limit = 500; |
735d37b5 BW |
441 | int ret = 0; |
442 | ||
443 | spin_lock_irqsave(&engine->queue_lock, flags); | |
444 | ||
445 | /* | |
446 | * If the engine queue is not empty or the engine is on busy state, | |
447 | * we need to wait for a while to pump the requests of engine queue. | |
448 | */ | |
449 | while ((crypto_queue_len(&engine->queue) || engine->busy) && limit--) { | |
450 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
451 | msleep(20); | |
452 | spin_lock_irqsave(&engine->queue_lock, flags); | |
453 | } | |
454 | ||
455 | if (crypto_queue_len(&engine->queue) || engine->busy) | |
456 | ret = -EBUSY; | |
457 | else | |
458 | engine->running = false; | |
459 | ||
460 | spin_unlock_irqrestore(&engine->queue_lock, flags); | |
461 | ||
462 | if (ret) | |
88d58ef8 | 463 | dev_warn(engine->dev, "could not stop engine\n"); |
735d37b5 BW |
464 | |
465 | return ret; | |
466 | } | |
467 | EXPORT_SYMBOL_GPL(crypto_engine_stop); | |
468 | ||
469 | /** | |
6a89f492 IP |
470 | * crypto_engine_alloc_init_and_set - allocate crypto hardware engine structure |
471 | * and initialize it by setting the maximum number of entries in the software | |
472 | * crypto-engine queue. | |
735d37b5 | 473 | * @dev: the device attached with one hardware engine |
6a89f492 | 474 | * @retry_support: whether hardware has support for retry mechanism |
40a3af45 | 475 | * @cbk_do_batch: pointer to a callback function to be invoked when executing |
8d908226 IP |
476 | * a batch of requests. |
477 | * This has the form: | |
478 | * callback(struct crypto_engine *engine) | |
479 | * where: | |
42a9a08b | 480 | * engine: the crypto engine structure. |
735d37b5 | 481 | * @rt: whether this queue is set to run as a realtime task |
6a89f492 | 482 | * @qlen: maximum size of the crypto-engine queue |
735d37b5 BW |
483 | * |
484 | * This must be called from context that can sleep. | |
485 | * Return: the crypto engine structure on success, else NULL. | |
486 | */ | |
6a89f492 IP |
487 | struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, |
488 | bool retry_support, | |
8d908226 | 489 | int (*cbk_do_batch)(struct crypto_engine *engine), |
6a89f492 | 490 | bool rt, int qlen) |
735d37b5 | 491 | { |
735d37b5 BW |
492 | struct crypto_engine *engine; |
493 | ||
494 | if (!dev) | |
495 | return NULL; | |
496 | ||
497 | engine = devm_kzalloc(dev, sizeof(*engine), GFP_KERNEL); | |
498 | if (!engine) | |
499 | return NULL; | |
500 | ||
88d58ef8 | 501 | engine->dev = dev; |
735d37b5 BW |
502 | engine->rt = rt; |
503 | engine->running = false; | |
504 | engine->busy = false; | |
505 | engine->idling = false; | |
6a89f492 | 506 | engine->retry_support = retry_support; |
735d37b5 | 507 | engine->priv_data = dev; |
8d908226 IP |
508 | /* |
509 | * Batch requests is possible only if | |
510 | * hardware has support for retry mechanism. | |
511 | */ | |
512 | engine->do_batch_requests = retry_support ? cbk_do_batch : NULL; | |
513 | ||
735d37b5 BW |
514 | snprintf(engine->name, sizeof(engine->name), |
515 | "%s-engine", dev_name(dev)); | |
516 | ||
6a89f492 | 517 | crypto_init_queue(&engine->queue, qlen); |
735d37b5 BW |
518 | spin_lock_init(&engine->queue_lock); |
519 | ||
c4ca2b0b PM |
520 | engine->kworker = kthread_create_worker(0, "%s", engine->name); |
521 | if (IS_ERR(engine->kworker)) { | |
735d37b5 BW |
522 | dev_err(dev, "failed to create crypto request pump task\n"); |
523 | return NULL; | |
524 | } | |
3989144f | 525 | kthread_init_work(&engine->pump_requests, crypto_pump_work); |
735d37b5 BW |
526 | |
527 | if (engine->rt) { | |
528 | dev_info(dev, "will run requests pump with realtime priority\n"); | |
dbc6d0d5 | 529 | sched_set_fifo(engine->kworker->task); |
735d37b5 BW |
530 | } |
531 | ||
532 | return engine; | |
533 | } | |
6a89f492 IP |
534 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init_and_set); |
535 | ||
536 | /** | |
537 | * crypto_engine_alloc_init - allocate crypto hardware engine structure and | |
538 | * initialize it. | |
539 | * @dev: the device attached with one hardware engine | |
540 | * @rt: whether this queue is set to run as a realtime task | |
541 | * | |
542 | * This must be called from context that can sleep. | |
543 | * Return: the crypto engine structure on success, else NULL. | |
544 | */ | |
545 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt) | |
546 | { | |
8d908226 | 547 | return crypto_engine_alloc_init_and_set(dev, false, NULL, rt, |
6a89f492 IP |
548 | CRYPTO_ENGINE_MAX_QLEN); |
549 | } | |
735d37b5 BW |
550 | EXPORT_SYMBOL_GPL(crypto_engine_alloc_init); |
551 | ||
552 | /** | |
553 | * crypto_engine_exit - free the resources of hardware engine when exit | |
554 | * @engine: the hardware engine need to be freed | |
555 | * | |
556 | * Return 0 for success. | |
557 | */ | |
558 | int crypto_engine_exit(struct crypto_engine *engine) | |
559 | { | |
560 | int ret; | |
561 | ||
562 | ret = crypto_engine_stop(engine); | |
563 | if (ret) | |
564 | return ret; | |
565 | ||
c4ca2b0b | 566 | kthread_destroy_worker(engine->kworker); |
735d37b5 BW |
567 | |
568 | return 0; | |
569 | } | |
570 | EXPORT_SYMBOL_GPL(crypto_engine_exit); | |
571 | ||
e5e7eb02 HX |
572 | int crypto_engine_register_aead(struct aead_engine_alg *alg) |
573 | { | |
574 | if (!alg->op.do_one_request) | |
575 | return -EINVAL; | |
576 | ||
577 | alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; | |
578 | ||
579 | return crypto_register_aead(&alg->base); | |
580 | } | |
581 | EXPORT_SYMBOL_GPL(crypto_engine_register_aead); | |
582 | ||
583 | void crypto_engine_unregister_aead(struct aead_engine_alg *alg) | |
584 | { | |
585 | crypto_unregister_aead(&alg->base); | |
586 | } | |
587 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_aead); | |
588 | ||
589 | int crypto_engine_register_aeads(struct aead_engine_alg *algs, int count) | |
590 | { | |
591 | int i, ret; | |
592 | ||
593 | for (i = 0; i < count; i++) { | |
594 | ret = crypto_engine_register_aead(&algs[i]); | |
595 | if (ret) | |
596 | goto err; | |
597 | } | |
598 | ||
599 | return 0; | |
600 | ||
601 | err: | |
602 | crypto_engine_unregister_aeads(algs, i); | |
603 | ||
604 | return ret; | |
605 | } | |
606 | EXPORT_SYMBOL_GPL(crypto_engine_register_aeads); | |
607 | ||
608 | void crypto_engine_unregister_aeads(struct aead_engine_alg *algs, int count) | |
609 | { | |
610 | int i; | |
611 | ||
612 | for (i = count - 1; i >= 0; --i) | |
613 | crypto_engine_unregister_aead(&algs[i]); | |
614 | } | |
615 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_aeads); | |
616 | ||
617 | int crypto_engine_register_ahash(struct ahash_engine_alg *alg) | |
618 | { | |
619 | if (!alg->op.do_one_request) | |
620 | return -EINVAL; | |
621 | ||
622 | alg->base.halg.base.cra_flags |= CRYPTO_ALG_ENGINE; | |
623 | ||
624 | return crypto_register_ahash(&alg->base); | |
625 | } | |
626 | EXPORT_SYMBOL_GPL(crypto_engine_register_ahash); | |
627 | ||
628 | void crypto_engine_unregister_ahash(struct ahash_engine_alg *alg) | |
629 | { | |
630 | crypto_unregister_ahash(&alg->base); | |
631 | } | |
632 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahash); | |
633 | ||
634 | int crypto_engine_register_ahashes(struct ahash_engine_alg *algs, int count) | |
635 | { | |
636 | int i, ret; | |
637 | ||
638 | for (i = 0; i < count; i++) { | |
639 | ret = crypto_engine_register_ahash(&algs[i]); | |
640 | if (ret) | |
641 | goto err; | |
642 | } | |
643 | ||
644 | return 0; | |
645 | ||
646 | err: | |
647 | crypto_engine_unregister_ahashes(algs, i); | |
648 | ||
649 | return ret; | |
650 | } | |
651 | EXPORT_SYMBOL_GPL(crypto_engine_register_ahashes); | |
652 | ||
653 | void crypto_engine_unregister_ahashes(struct ahash_engine_alg *algs, | |
654 | int count) | |
655 | { | |
656 | int i; | |
657 | ||
658 | for (i = count - 1; i >= 0; --i) | |
659 | crypto_engine_unregister_ahash(&algs[i]); | |
660 | } | |
661 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_ahashes); | |
662 | ||
663 | int crypto_engine_register_akcipher(struct akcipher_engine_alg *alg) | |
664 | { | |
665 | if (!alg->op.do_one_request) | |
666 | return -EINVAL; | |
667 | ||
668 | alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; | |
669 | ||
670 | return crypto_register_akcipher(&alg->base); | |
671 | } | |
672 | EXPORT_SYMBOL_GPL(crypto_engine_register_akcipher); | |
673 | ||
674 | void crypto_engine_unregister_akcipher(struct akcipher_engine_alg *alg) | |
675 | { | |
676 | crypto_unregister_akcipher(&alg->base); | |
677 | } | |
678 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_akcipher); | |
679 | ||
680 | int crypto_engine_register_kpp(struct kpp_engine_alg *alg) | |
681 | { | |
682 | if (!alg->op.do_one_request) | |
683 | return -EINVAL; | |
684 | ||
685 | alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; | |
686 | ||
687 | return crypto_register_kpp(&alg->base); | |
688 | } | |
689 | EXPORT_SYMBOL_GPL(crypto_engine_register_kpp); | |
690 | ||
691 | void crypto_engine_unregister_kpp(struct kpp_engine_alg *alg) | |
692 | { | |
693 | crypto_unregister_kpp(&alg->base); | |
694 | } | |
695 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_kpp); | |
696 | ||
697 | int crypto_engine_register_skcipher(struct skcipher_engine_alg *alg) | |
698 | { | |
699 | if (!alg->op.do_one_request) | |
700 | return -EINVAL; | |
701 | ||
702 | alg->base.base.cra_flags |= CRYPTO_ALG_ENGINE; | |
703 | ||
704 | return crypto_register_skcipher(&alg->base); | |
705 | } | |
706 | EXPORT_SYMBOL_GPL(crypto_engine_register_skcipher); | |
707 | ||
708 | void crypto_engine_unregister_skcipher(struct skcipher_engine_alg *alg) | |
709 | { | |
710 | return crypto_unregister_skcipher(&alg->base); | |
711 | } | |
712 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_skcipher); | |
713 | ||
714 | int crypto_engine_register_skciphers(struct skcipher_engine_alg *algs, | |
715 | int count) | |
716 | { | |
717 | int i, ret; | |
718 | ||
719 | for (i = 0; i < count; i++) { | |
720 | ret = crypto_engine_register_skcipher(&algs[i]); | |
721 | if (ret) | |
722 | goto err; | |
723 | } | |
724 | ||
725 | return 0; | |
726 | ||
727 | err: | |
728 | crypto_engine_unregister_skciphers(algs, i); | |
729 | ||
730 | return ret; | |
731 | } | |
732 | EXPORT_SYMBOL_GPL(crypto_engine_register_skciphers); | |
733 | ||
734 | void crypto_engine_unregister_skciphers(struct skcipher_engine_alg *algs, | |
735 | int count) | |
736 | { | |
737 | int i; | |
738 | ||
739 | for (i = count - 1; i >= 0; --i) | |
740 | crypto_engine_unregister_skcipher(&algs[i]); | |
741 | } | |
742 | EXPORT_SYMBOL_GPL(crypto_engine_unregister_skciphers); | |
743 | ||
735d37b5 BW |
744 | MODULE_LICENSE("GPL"); |
745 | MODULE_DESCRIPTION("Crypto hardware engine framework"); |