]>
Commit | Line | Data |
---|---|---|
cce9e06d HX |
1 | /* |
2 | * Cryptographic API for algorithms (i.e., low-level API). | |
3 | * | |
4 | * Copyright (c) 2006 Herbert Xu <[email protected]> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms of the GNU General Public License as published by the Free | |
8 | * Software Foundation; either version 2 of the License, or (at your option) | |
9 | * any later version. | |
10 | * | |
11 | */ | |
12 | #ifndef _CRYPTO_ALGAPI_H | |
13 | #define _CRYPTO_ALGAPI_H | |
14 | ||
15 | #include <linux/crypto.h> | |
b5b7f088 HX |
16 | #include <linux/list.h> |
17 | #include <linux/kernel.h> | |
735d37b5 | 18 | #include <linux/kthread.h> |
b6aa63c0 | 19 | #include <linux/skbuff.h> |
cce9e06d | 20 | |
5d1d65f8 | 21 | struct crypto_aead; |
319382a6 | 22 | struct crypto_instance; |
4cc7720c | 23 | struct module; |
ebc610e5 | 24 | struct rtattr; |
e853c3cf HX |
25 | struct seq_file; |
26 | ||
27 | struct crypto_type { | |
27d2a330 | 28 | unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); |
2ca33da1 | 29 | unsigned int (*extsize)(struct crypto_alg *alg); |
27d2a330 | 30 | int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask); |
2ca33da1 | 31 | int (*init_tfm)(struct crypto_tfm *tfm); |
e853c3cf | 32 | void (*show)(struct seq_file *m, struct crypto_alg *alg); |
b6aa63c0 | 33 | int (*report)(struct sk_buff *skb, struct crypto_alg *alg); |
7b0bac64 | 34 | struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask); |
319382a6 | 35 | void (*free)(struct crypto_instance *inst); |
7b0bac64 HX |
36 | |
37 | unsigned int type; | |
38 | unsigned int maskclear; | |
39 | unsigned int maskset; | |
40 | unsigned int tfmsize; | |
e853c3cf | 41 | }; |
4cc7720c HX |
42 | |
43 | struct crypto_instance { | |
44 | struct crypto_alg alg; | |
45 | ||
46 | struct crypto_template *tmpl; | |
47 | struct hlist_node list; | |
48 | ||
49 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | |
50 | }; | |
51 | ||
52 | struct crypto_template { | |
53 | struct list_head list; | |
54 | struct hlist_head instances; | |
55 | struct module *module; | |
56 | ||
ebc610e5 | 57 | struct crypto_instance *(*alloc)(struct rtattr **tb); |
4cc7720c | 58 | void (*free)(struct crypto_instance *inst); |
f2ac72e8 | 59 | int (*create)(struct crypto_template *tmpl, struct rtattr **tb); |
4cc7720c HX |
60 | |
61 | char name[CRYPTO_MAX_ALG_NAME]; | |
62 | }; | |
63 | ||
6bfd4809 HX |
64 | struct crypto_spawn { |
65 | struct list_head list; | |
66 | struct crypto_alg *alg; | |
67 | struct crypto_instance *inst; | |
97eedce1 | 68 | const struct crypto_type *frontend; |
a73e6996 | 69 | u32 mask; |
6bfd4809 HX |
70 | }; |
71 | ||
b5b7f088 HX |
72 | struct crypto_queue { |
73 | struct list_head list; | |
74 | struct list_head *backlog; | |
75 | ||
76 | unsigned int qlen; | |
77 | unsigned int max_qlen; | |
78 | }; | |
79 | ||
5c64097a HX |
80 | struct scatter_walk { |
81 | struct scatterlist *sg; | |
82 | unsigned int offset; | |
83 | }; | |
84 | ||
5cde0af2 HX |
85 | struct blkcipher_walk { |
86 | union { | |
87 | struct { | |
88 | struct page *page; | |
89 | unsigned long offset; | |
90 | } phys; | |
91 | ||
92 | struct { | |
93 | u8 *page; | |
94 | u8 *addr; | |
95 | } virt; | |
96 | } src, dst; | |
97 | ||
98 | struct scatter_walk in; | |
99 | unsigned int nbytes; | |
100 | ||
101 | struct scatter_walk out; | |
102 | unsigned int total; | |
103 | ||
104 | void *page; | |
105 | u8 *buffer; | |
106 | u8 *iv; | |
822be00f | 107 | unsigned int ivsize; |
5cde0af2 HX |
108 | |
109 | int flags; | |
822be00f AB |
110 | unsigned int walk_blocksize; |
111 | unsigned int cipher_blocksize; | |
112 | unsigned int alignmask; | |
5cde0af2 HX |
113 | }; |
114 | ||
bf06099d DM |
115 | struct ablkcipher_walk { |
116 | struct { | |
117 | struct page *page; | |
118 | unsigned int offset; | |
119 | } src, dst; | |
120 | ||
121 | struct scatter_walk in; | |
122 | unsigned int nbytes; | |
123 | struct scatter_walk out; | |
124 | unsigned int total; | |
125 | struct list_head buffers; | |
126 | u8 *iv_buffer; | |
127 | u8 *iv; | |
128 | int flags; | |
129 | unsigned int blocksize; | |
130 | }; | |
131 | ||
735d37b5 BW |
132 | #define ENGINE_NAME_LEN 30 |
133 | /* | |
134 | * struct crypto_engine - crypto hardware engine | |
135 | * @name: the engine name | |
136 | * @idling: the engine is entering idle state | |
137 | * @busy: request pump is busy | |
138 | * @running: the engine is on working | |
139 | * @cur_req_prepared: current request is prepared | |
140 | * @list: link with the global crypto engine list | |
141 | * @queue_lock: spinlock to syncronise access to request queue | |
142 | * @queue: the crypto queue of the engine | |
143 | * @rt: whether this queue is set to run as a realtime task | |
144 | * @prepare_crypt_hardware: a request will soon arrive from the queue | |
145 | * so the subsystem requests the driver to prepare the hardware | |
146 | * by issuing this call | |
147 | * @unprepare_crypt_hardware: there are currently no more requests on the | |
148 | * queue so the subsystem notifies the driver that it may relax the | |
149 | * hardware by issuing this call | |
150 | * @prepare_request: do some prepare if need before handle the current request | |
151 | * @unprepare_request: undo any work done by prepare_message() | |
152 | * @crypt_one_request: do encryption for current request | |
153 | * @kworker: thread struct for request pump | |
154 | * @kworker_task: pointer to task for request pump kworker thread | |
155 | * @pump_requests: work struct for scheduling work to the request pump | |
156 | * @priv_data: the engine private data | |
157 | * @cur_req: the current request which is on processing | |
158 | */ | |
159 | struct crypto_engine { | |
160 | char name[ENGINE_NAME_LEN]; | |
161 | bool idling; | |
162 | bool busy; | |
163 | bool running; | |
164 | bool cur_req_prepared; | |
165 | ||
166 | struct list_head list; | |
167 | spinlock_t queue_lock; | |
168 | struct crypto_queue queue; | |
169 | ||
170 | bool rt; | |
171 | ||
172 | int (*prepare_crypt_hardware)(struct crypto_engine *engine); | |
173 | int (*unprepare_crypt_hardware)(struct crypto_engine *engine); | |
174 | ||
175 | int (*prepare_request)(struct crypto_engine *engine, | |
176 | struct ablkcipher_request *req); | |
177 | int (*unprepare_request)(struct crypto_engine *engine, | |
178 | struct ablkcipher_request *req); | |
179 | int (*crypt_one_request)(struct crypto_engine *engine, | |
180 | struct ablkcipher_request *req); | |
181 | ||
182 | struct kthread_worker kworker; | |
183 | struct task_struct *kworker_task; | |
184 | struct kthread_work pump_requests; | |
185 | ||
186 | void *priv_data; | |
187 | struct ablkcipher_request *cur_req; | |
188 | }; | |
189 | ||
190 | int crypto_transfer_request(struct crypto_engine *engine, | |
191 | struct ablkcipher_request *req, bool need_pump); | |
192 | int crypto_transfer_request_to_engine(struct crypto_engine *engine, | |
193 | struct ablkcipher_request *req); | |
194 | void crypto_finalize_request(struct crypto_engine *engine, | |
195 | struct ablkcipher_request *req, int err); | |
196 | int crypto_engine_start(struct crypto_engine *engine); | |
197 | int crypto_engine_stop(struct crypto_engine *engine); | |
198 | struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); | |
199 | int crypto_engine_exit(struct crypto_engine *engine); | |
200 | ||
b5b7f088 | 201 | extern const struct crypto_type crypto_ablkcipher_type; |
5cde0af2 HX |
202 | extern const struct crypto_type crypto_blkcipher_type; |
203 | ||
db131ef9 HX |
204 | void crypto_mod_put(struct crypto_alg *alg); |
205 | ||
4cc7720c HX |
206 | int crypto_register_template(struct crypto_template *tmpl); |
207 | void crypto_unregister_template(struct crypto_template *tmpl); | |
208 | struct crypto_template *crypto_lookup_template(const char *name); | |
209 | ||
9cd899a3 HX |
210 | int crypto_register_instance(struct crypto_template *tmpl, |
211 | struct crypto_instance *inst); | |
87b16756 | 212 | int crypto_unregister_instance(struct crypto_instance *inst); |
9cd899a3 | 213 | |
6bfd4809 | 214 | int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg, |
a73e6996 | 215 | struct crypto_instance *inst, u32 mask); |
97eedce1 HX |
216 | int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg, |
217 | struct crypto_instance *inst, | |
218 | const struct crypto_type *frontend); | |
d6ef2f19 HX |
219 | int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name, |
220 | u32 type, u32 mask); | |
97eedce1 | 221 | |
6bfd4809 | 222 | void crypto_drop_spawn(struct crypto_spawn *spawn); |
2e306ee0 HX |
223 | struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type, |
224 | u32 mask); | |
97eedce1 | 225 | void *crypto_spawn_tfm2(struct crypto_spawn *spawn); |
6bfd4809 | 226 | |
378f4f51 HX |
227 | static inline void crypto_set_spawn(struct crypto_spawn *spawn, |
228 | struct crypto_instance *inst) | |
229 | { | |
230 | spawn->inst = inst; | |
231 | } | |
232 | ||
ebc610e5 HX |
233 | struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb); |
234 | int crypto_check_attr_type(struct rtattr **tb, u32 type); | |
68b6c7d6 | 235 | const char *crypto_attr_alg_name(struct rtattr *rta); |
d06854f0 HX |
236 | struct crypto_alg *crypto_attr_alg2(struct rtattr *rta, |
237 | const struct crypto_type *frontend, | |
238 | u32 type, u32 mask); | |
239 | ||
240 | static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta, | |
241 | u32 type, u32 mask) | |
242 | { | |
243 | return crypto_attr_alg2(rta, NULL, type, mask); | |
244 | } | |
245 | ||
3c09f17c | 246 | int crypto_attr_u32(struct rtattr *rta, u32 *num); |
70ec7bb9 HX |
247 | void *crypto_alloc_instance2(const char *name, struct crypto_alg *alg, |
248 | unsigned int head); | |
7fed0bf2 HX |
249 | struct crypto_instance *crypto_alloc_instance(const char *name, |
250 | struct crypto_alg *alg); | |
251 | ||
b5b7f088 HX |
252 | void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen); |
253 | int crypto_enqueue_request(struct crypto_queue *queue, | |
254 | struct crypto_async_request *request); | |
255 | struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue); | |
256 | int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm); | |
9f93a8a0 BW |
257 | static inline unsigned int crypto_queue_len(struct crypto_queue *queue) |
258 | { | |
259 | return queue->qlen; | |
260 | } | |
b5b7f088 | 261 | |
7613636d HX |
262 | /* These functions require the input/output to be aligned as u32. */ |
263 | void crypto_inc(u8 *a, unsigned int size); | |
264 | void crypto_xor(u8 *dst, const u8 *src, unsigned int size); | |
265 | ||
5cde0af2 HX |
266 | int blkcipher_walk_done(struct blkcipher_desc *desc, |
267 | struct blkcipher_walk *walk, int err); | |
268 | int blkcipher_walk_virt(struct blkcipher_desc *desc, | |
269 | struct blkcipher_walk *walk); | |
270 | int blkcipher_walk_phys(struct blkcipher_desc *desc, | |
271 | struct blkcipher_walk *walk); | |
7607bd8f HX |
272 | int blkcipher_walk_virt_block(struct blkcipher_desc *desc, |
273 | struct blkcipher_walk *walk, | |
274 | unsigned int blocksize); | |
4f7f1d7c AB |
275 | int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc, |
276 | struct blkcipher_walk *walk, | |
277 | struct crypto_aead *tfm, | |
278 | unsigned int blocksize); | |
5cde0af2 | 279 | |
bf06099d DM |
280 | int ablkcipher_walk_done(struct ablkcipher_request *req, |
281 | struct ablkcipher_walk *walk, int err); | |
282 | int ablkcipher_walk_phys(struct ablkcipher_request *req, | |
283 | struct ablkcipher_walk *walk); | |
284 | void __ablkcipher_walk_complete(struct ablkcipher_walk *walk); | |
285 | ||
5cde0af2 HX |
286 | static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm) |
287 | { | |
ab300465 HX |
288 | return PTR_ALIGN(crypto_tfm_ctx(tfm), |
289 | crypto_tfm_alg_alignmask(tfm) + 1); | |
5cde0af2 HX |
290 | } |
291 | ||
124b53d0 HX |
292 | static inline struct crypto_instance *crypto_tfm_alg_instance( |
293 | struct crypto_tfm *tfm) | |
294 | { | |
295 | return container_of(tfm->__crt_alg, struct crypto_instance, alg); | |
296 | } | |
297 | ||
4cc7720c HX |
298 | static inline void *crypto_instance_ctx(struct crypto_instance *inst) |
299 | { | |
300 | return inst->__ctx; | |
301 | } | |
302 | ||
b5b7f088 HX |
303 | static inline struct ablkcipher_alg *crypto_ablkcipher_alg( |
304 | struct crypto_ablkcipher *tfm) | |
305 | { | |
306 | return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher; | |
307 | } | |
308 | ||
309 | static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm) | |
310 | { | |
311 | return crypto_tfm_ctx(&tfm->base); | |
312 | } | |
313 | ||
aa379a6a SS |
314 | static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm) |
315 | { | |
316 | return crypto_tfm_ctx_aligned(&tfm->base); | |
317 | } | |
318 | ||
124b53d0 HX |
319 | static inline struct crypto_blkcipher *crypto_spawn_blkcipher( |
320 | struct crypto_spawn *spawn) | |
321 | { | |
322 | u32 type = CRYPTO_ALG_TYPE_BLKCIPHER; | |
332f8840 | 323 | u32 mask = CRYPTO_ALG_TYPE_MASK; |
124b53d0 HX |
324 | |
325 | return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask)); | |
326 | } | |
327 | ||
5cde0af2 HX |
328 | static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm) |
329 | { | |
330 | return crypto_tfm_ctx(&tfm->base); | |
331 | } | |
332 | ||
333 | static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm) | |
334 | { | |
335 | return crypto_tfm_ctx_aligned(&tfm->base); | |
336 | } | |
337 | ||
2e306ee0 HX |
338 | static inline struct crypto_cipher *crypto_spawn_cipher( |
339 | struct crypto_spawn *spawn) | |
340 | { | |
341 | u32 type = CRYPTO_ALG_TYPE_CIPHER; | |
342 | u32 mask = CRYPTO_ALG_TYPE_MASK; | |
343 | ||
344 | return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask)); | |
345 | } | |
346 | ||
f28776a3 HX |
347 | static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm) |
348 | { | |
349 | return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher; | |
350 | } | |
351 | ||
5cde0af2 HX |
352 | static inline void blkcipher_walk_init(struct blkcipher_walk *walk, |
353 | struct scatterlist *dst, | |
354 | struct scatterlist *src, | |
355 | unsigned int nbytes) | |
356 | { | |
357 | walk->in.sg = src; | |
358 | walk->out.sg = dst; | |
359 | walk->total = nbytes; | |
360 | } | |
361 | ||
bf06099d DM |
362 | static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk, |
363 | struct scatterlist *dst, | |
364 | struct scatterlist *src, | |
365 | unsigned int nbytes) | |
366 | { | |
367 | walk->in.sg = src; | |
368 | walk->out.sg = dst; | |
369 | walk->total = nbytes; | |
370 | INIT_LIST_HEAD(&walk->buffers); | |
371 | } | |
372 | ||
373 | static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk) | |
374 | { | |
375 | if (unlikely(!list_empty(&walk->buffers))) | |
376 | __ablkcipher_walk_complete(walk); | |
377 | } | |
378 | ||
b5b7f088 HX |
379 | static inline struct crypto_async_request *crypto_get_backlog( |
380 | struct crypto_queue *queue) | |
381 | { | |
382 | return queue->backlog == &queue->list ? NULL : | |
383 | container_of(queue->backlog, struct crypto_async_request, list); | |
384 | } | |
385 | ||
2de98e75 | 386 | static inline int ablkcipher_enqueue_request(struct crypto_queue *queue, |
b5b7f088 HX |
387 | struct ablkcipher_request *request) |
388 | { | |
2de98e75 | 389 | return crypto_enqueue_request(queue, &request->base); |
b5b7f088 HX |
390 | } |
391 | ||
392 | static inline struct ablkcipher_request *ablkcipher_dequeue_request( | |
2de98e75 | 393 | struct crypto_queue *queue) |
b5b7f088 | 394 | { |
2de98e75 | 395 | return ablkcipher_request_cast(crypto_dequeue_request(queue)); |
b5b7f088 HX |
396 | } |
397 | ||
398 | static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req) | |
399 | { | |
400 | return req->__ctx; | |
401 | } | |
402 | ||
2de98e75 HX |
403 | static inline int ablkcipher_tfm_in_queue(struct crypto_queue *queue, |
404 | struct crypto_ablkcipher *tfm) | |
b5b7f088 | 405 | { |
2de98e75 | 406 | return crypto_tfm_in_queue(queue, crypto_ablkcipher_tfm(tfm)); |
b5b7f088 HX |
407 | } |
408 | ||
3c09f17c HX |
409 | static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb, |
410 | u32 type, u32 mask) | |
411 | { | |
412 | return crypto_attr_alg(tb[1], type, mask); | |
413 | } | |
414 | ||
378f4f51 HX |
415 | /* |
416 | * Returns CRYPTO_ALG_ASYNC if type/mask requires the use of sync algorithms. | |
417 | * Otherwise returns zero. | |
418 | */ | |
419 | static inline int crypto_requires_sync(u32 type, u32 mask) | |
420 | { | |
421 | return (type ^ CRYPTO_ALG_ASYNC) & mask & CRYPTO_ALG_ASYNC; | |
422 | } | |
423 | ||
6bf37e5a JY |
424 | noinline unsigned long __crypto_memneq(const void *a, const void *b, size_t size); |
425 | ||
426 | /** | |
427 | * crypto_memneq - Compare two areas of memory without leaking | |
428 | * timing information. | |
429 | * | |
430 | * @a: One area of memory | |
431 | * @b: Another area of memory | |
432 | * @size: The size of the area. | |
433 | * | |
434 | * Returns 0 when data is equal, 1 otherwise. | |
435 | */ | |
436 | static inline int crypto_memneq(const void *a, const void *b, size_t size) | |
437 | { | |
438 | return __crypto_memneq(a, b, size) != 0UL ? 1 : 0; | |
439 | } | |
cce9e06d | 440 | |
bb55a4c1 MV |
441 | static inline void crypto_yield(u32 flags) |
442 | { | |
443 | if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) | |
444 | cond_resched(); | |
445 | } | |
446 | ||
6bf37e5a | 447 | #endif /* _CRYPTO_ALGAPI_H */ |