]>
Commit | Line | Data |
---|---|---|
004a403c LH |
1 | /* |
2 | * Asynchronous Cryptographic Hash operations. | |
3 | * | |
4 | * This is the asynchronous version of hash.c with notification of | |
5 | * completion via a callback. | |
6 | * | |
7 | * Copyright (c) 2008 Loc Ho <[email protected]> | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms of the GNU General Public License as published by the Free | |
11 | * Software Foundation; either version 2 of the License, or (at your option) | |
12 | * any later version. | |
13 | * | |
14 | */ | |
15 | ||
20036252 HX |
16 | #include <crypto/internal/hash.h> |
17 | #include <crypto/scatterwalk.h> | |
75ecb231 | 18 | #include <linux/bug.h> |
004a403c LH |
19 | #include <linux/err.h> |
20 | #include <linux/kernel.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/seq_file.h> | |
6238cbae SK |
25 | #include <linux/cryptouser.h> |
26 | #include <net/netlink.h> | |
004a403c LH |
27 | |
28 | #include "internal.h" | |
29 | ||
66f6ce5e HX |
30 | struct ahash_request_priv { |
31 | crypto_completion_t complete; | |
32 | void *data; | |
33 | u8 *result; | |
34 | void *ubuf[] CRYPTO_MINALIGN_ATTR; | |
35 | }; | |
36 | ||
88056ec3 HX |
37 | static inline struct ahash_alg *crypto_ahash_alg(struct crypto_ahash *hash) |
38 | { | |
39 | return container_of(crypto_hash_alg_common(hash), struct ahash_alg, | |
40 | halg); | |
41 | } | |
42 | ||
20036252 HX |
43 | static int hash_walk_next(struct crypto_hash_walk *walk) |
44 | { | |
45 | unsigned int alignmask = walk->alignmask; | |
46 | unsigned int offset = walk->offset; | |
47 | unsigned int nbytes = min(walk->entrylen, | |
48 | ((unsigned int)(PAGE_SIZE)) - offset); | |
49 | ||
75ecb231 HX |
50 | if (walk->flags & CRYPTO_ALG_ASYNC) |
51 | walk->data = kmap(walk->pg); | |
52 | else | |
53 | walk->data = kmap_atomic(walk->pg); | |
20036252 HX |
54 | walk->data += offset; |
55 | ||
23a75eee SÖ |
56 | if (offset & alignmask) { |
57 | unsigned int unaligned = alignmask + 1 - (offset & alignmask); | |
b516d514 | 58 | |
23a75eee SÖ |
59 | if (nbytes > unaligned) |
60 | nbytes = unaligned; | |
61 | } | |
20036252 HX |
62 | |
63 | walk->entrylen -= nbytes; | |
64 | return nbytes; | |
65 | } | |
66 | ||
67 | static int hash_walk_new_entry(struct crypto_hash_walk *walk) | |
68 | { | |
69 | struct scatterlist *sg; | |
70 | ||
71 | sg = walk->sg; | |
72 | walk->pg = sg_page(sg); | |
73 | walk->offset = sg->offset; | |
74 | walk->entrylen = sg->length; | |
75 | ||
76 | if (walk->entrylen > walk->total) | |
77 | walk->entrylen = walk->total; | |
78 | walk->total -= walk->entrylen; | |
79 | ||
80 | return hash_walk_next(walk); | |
81 | } | |
82 | ||
83 | int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) | |
84 | { | |
85 | unsigned int alignmask = walk->alignmask; | |
86 | unsigned int nbytes = walk->entrylen; | |
87 | ||
88 | walk->data -= walk->offset; | |
89 | ||
90 | if (nbytes && walk->offset & alignmask && !err) { | |
20036252 HX |
91 | walk->offset = ALIGN(walk->offset, alignmask + 1); |
92 | walk->data += walk->offset; | |
93 | ||
94 | nbytes = min(nbytes, | |
95 | ((unsigned int)(PAGE_SIZE)) - walk->offset); | |
96 | walk->entrylen -= nbytes; | |
97 | ||
98 | return nbytes; | |
99 | } | |
100 | ||
75ecb231 HX |
101 | if (walk->flags & CRYPTO_ALG_ASYNC) |
102 | kunmap(walk->pg); | |
103 | else { | |
104 | kunmap_atomic(walk->data); | |
105 | /* | |
106 | * The may sleep test only makes sense for sync users. | |
107 | * Async users don't need to sleep here anyway. | |
108 | */ | |
109 | crypto_yield(walk->flags); | |
110 | } | |
20036252 HX |
111 | |
112 | if (err) | |
113 | return err; | |
114 | ||
d315a0e0 HX |
115 | if (nbytes) { |
116 | walk->offset = 0; | |
117 | walk->pg++; | |
20036252 | 118 | return hash_walk_next(walk); |
d315a0e0 | 119 | } |
20036252 HX |
120 | |
121 | if (!walk->total) | |
122 | return 0; | |
123 | ||
5be4d4c9 | 124 | walk->sg = sg_next(walk->sg); |
20036252 HX |
125 | |
126 | return hash_walk_new_entry(walk); | |
127 | } | |
128 | EXPORT_SYMBOL_GPL(crypto_hash_walk_done); | |
129 | ||
130 | int crypto_hash_walk_first(struct ahash_request *req, | |
131 | struct crypto_hash_walk *walk) | |
132 | { | |
133 | walk->total = req->nbytes; | |
134 | ||
6d9529c5 TC |
135 | if (!walk->total) { |
136 | walk->entrylen = 0; | |
20036252 | 137 | return 0; |
6d9529c5 | 138 | } |
20036252 HX |
139 | |
140 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); | |
141 | walk->sg = req->src; | |
75ecb231 | 142 | walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; |
20036252 HX |
143 | |
144 | return hash_walk_new_entry(walk); | |
145 | } | |
146 | EXPORT_SYMBOL_GPL(crypto_hash_walk_first); | |
147 | ||
75ecb231 HX |
148 | int crypto_ahash_walk_first(struct ahash_request *req, |
149 | struct crypto_hash_walk *walk) | |
150 | { | |
151 | walk->total = req->nbytes; | |
152 | ||
6d9529c5 TC |
153 | if (!walk->total) { |
154 | walk->entrylen = 0; | |
75ecb231 | 155 | return 0; |
6d9529c5 | 156 | } |
75ecb231 HX |
157 | |
158 | walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); | |
159 | walk->sg = req->src; | |
160 | walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; | |
161 | walk->flags |= CRYPTO_ALG_ASYNC; | |
162 | ||
163 | BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC); | |
164 | ||
165 | return hash_walk_new_entry(walk); | |
166 | } | |
167 | EXPORT_SYMBOL_GPL(crypto_ahash_walk_first); | |
168 | ||
004a403c LH |
169 | static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key, |
170 | unsigned int keylen) | |
171 | { | |
004a403c LH |
172 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
173 | int ret; | |
174 | u8 *buffer, *alignbuffer; | |
175 | unsigned long absize; | |
176 | ||
177 | absize = keylen + alignmask; | |
093900c2 | 178 | buffer = kmalloc(absize, GFP_KERNEL); |
004a403c LH |
179 | if (!buffer) |
180 | return -ENOMEM; | |
181 | ||
182 | alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); | |
183 | memcpy(alignbuffer, key, keylen); | |
a70c5225 | 184 | ret = tfm->setkey(tfm, alignbuffer, keylen); |
8c32c516 | 185 | kzfree(buffer); |
004a403c LH |
186 | return ret; |
187 | } | |
188 | ||
66f6ce5e | 189 | int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, |
004a403c LH |
190 | unsigned int keylen) |
191 | { | |
004a403c LH |
192 | unsigned long alignmask = crypto_ahash_alignmask(tfm); |
193 | ||
194 | if ((unsigned long)key & alignmask) | |
195 | return ahash_setkey_unaligned(tfm, key, keylen); | |
196 | ||
a70c5225 | 197 | return tfm->setkey(tfm, key, keylen); |
004a403c | 198 | } |
66f6ce5e | 199 | EXPORT_SYMBOL_GPL(crypto_ahash_setkey); |
004a403c | 200 | |
3751f402 HX |
201 | static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, |
202 | unsigned int keylen) | |
203 | { | |
204 | return -ENOSYS; | |
205 | } | |
206 | ||
66f6ce5e HX |
207 | static inline unsigned int ahash_align_buffer_size(unsigned len, |
208 | unsigned long mask) | |
209 | { | |
210 | return len + (mask & ~(crypto_tfm_ctx_alignment() - 1)); | |
211 | } | |
212 | ||
1ffc9fbd | 213 | static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) |
66f6ce5e HX |
214 | { |
215 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
216 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | |
217 | unsigned int ds = crypto_ahash_digestsize(tfm); | |
218 | struct ahash_request_priv *priv; | |
66f6ce5e HX |
219 | |
220 | priv = kmalloc(sizeof(*priv) + ahash_align_buffer_size(ds, alignmask), | |
221 | (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? | |
5befbd5a | 222 | GFP_KERNEL : GFP_ATOMIC); |
66f6ce5e HX |
223 | if (!priv) |
224 | return -ENOMEM; | |
225 | ||
ab6bf4e5 MV |
226 | /* |
227 | * WARNING: Voodoo programming below! | |
228 | * | |
229 | * The code below is obscure and hard to understand, thus explanation | |
230 | * is necessary. See include/crypto/hash.h and include/linux/crypto.h | |
231 | * to understand the layout of structures used here! | |
232 | * | |
233 | * The code here will replace portions of the ORIGINAL request with | |
234 | * pointers to new code and buffers so the hashing operation can store | |
235 | * the result in aligned buffer. We will call the modified request | |
236 | * an ADJUSTED request. | |
237 | * | |
238 | * The newly mangled request will look as such: | |
239 | * | |
240 | * req { | |
241 | * .result = ADJUSTED[new aligned buffer] | |
242 | * .base.complete = ADJUSTED[pointer to completion function] | |
243 | * .base.data = ADJUSTED[*req (pointer to self)] | |
244 | * .priv = ADJUSTED[new priv] { | |
245 | * .result = ORIGINAL(result) | |
246 | * .complete = ORIGINAL(base.complete) | |
247 | * .data = ORIGINAL(base.data) | |
248 | * } | |
249 | */ | |
250 | ||
66f6ce5e HX |
251 | priv->result = req->result; |
252 | priv->complete = req->base.complete; | |
253 | priv->data = req->base.data; | |
ab6bf4e5 MV |
254 | /* |
255 | * WARNING: We do not backup req->priv here! The req->priv | |
256 | * is for internal use of the Crypto API and the | |
257 | * user must _NOT_ _EVER_ depend on it's content! | |
258 | */ | |
66f6ce5e HX |
259 | |
260 | req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); | |
1ffc9fbd | 261 | req->base.complete = cplt; |
66f6ce5e HX |
262 | req->base.data = req; |
263 | req->priv = priv; | |
264 | ||
1ffc9fbd MV |
265 | return 0; |
266 | } | |
267 | ||
268 | static void ahash_restore_req(struct ahash_request *req) | |
269 | { | |
270 | struct ahash_request_priv *priv = req->priv; | |
271 | ||
272 | /* Restore the original crypto request. */ | |
273 | req->result = priv->result; | |
274 | req->base.complete = priv->complete; | |
275 | req->base.data = priv->data; | |
276 | req->priv = NULL; | |
277 | ||
278 | /* Free the req->priv.priv from the ADJUSTED request. */ | |
279 | kzfree(priv); | |
280 | } | |
281 | ||
282 | static void ahash_op_unaligned_finish(struct ahash_request *req, int err) | |
283 | { | |
284 | struct ahash_request_priv *priv = req->priv; | |
285 | ||
286 | if (err == -EINPROGRESS) | |
287 | return; | |
288 | ||
289 | if (!err) | |
290 | memcpy(priv->result, req->result, | |
291 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | |
292 | ||
293 | ahash_restore_req(req); | |
294 | } | |
295 | ||
296 | static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) | |
297 | { | |
298 | struct ahash_request *areq = req->data; | |
299 | ||
300 | /* | |
301 | * Restore the original request, see ahash_op_unaligned() for what | |
302 | * goes where. | |
303 | * | |
304 | * The "struct ahash_request *req" here is in fact the "req.base" | |
305 | * from the ADJUSTED request from ahash_op_unaligned(), thus as it | |
306 | * is a pointer to self, it is also the ADJUSTED "req" . | |
307 | */ | |
308 | ||
309 | /* First copy req->result into req->priv.result */ | |
310 | ahash_op_unaligned_finish(areq, err); | |
311 | ||
312 | /* Complete the ORIGINAL request. */ | |
313 | areq->base.complete(&areq->base, err); | |
314 | } | |
315 | ||
316 | static int ahash_op_unaligned(struct ahash_request *req, | |
317 | int (*op)(struct ahash_request *)) | |
318 | { | |
319 | int err; | |
320 | ||
321 | err = ahash_save_req(req, ahash_op_unaligned_done); | |
322 | if (err) | |
323 | return err; | |
324 | ||
66f6ce5e HX |
325 | err = op(req); |
326 | ahash_op_unaligned_finish(req, err); | |
327 | ||
328 | return err; | |
329 | } | |
330 | ||
331 | static int crypto_ahash_op(struct ahash_request *req, | |
332 | int (*op)(struct ahash_request *)) | |
333 | { | |
334 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
335 | unsigned long alignmask = crypto_ahash_alignmask(tfm); | |
336 | ||
337 | if ((unsigned long)req->result & alignmask) | |
338 | return ahash_op_unaligned(req, op); | |
339 | ||
340 | return op(req); | |
341 | } | |
342 | ||
343 | int crypto_ahash_final(struct ahash_request *req) | |
344 | { | |
345 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); | |
346 | } | |
347 | EXPORT_SYMBOL_GPL(crypto_ahash_final); | |
348 | ||
349 | int crypto_ahash_finup(struct ahash_request *req) | |
350 | { | |
351 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); | |
352 | } | |
353 | EXPORT_SYMBOL_GPL(crypto_ahash_finup); | |
354 | ||
355 | int crypto_ahash_digest(struct ahash_request *req) | |
356 | { | |
357 | return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); | |
358 | } | |
359 | EXPORT_SYMBOL_GPL(crypto_ahash_digest); | |
360 | ||
361 | static void ahash_def_finup_finish2(struct ahash_request *req, int err) | |
362 | { | |
363 | struct ahash_request_priv *priv = req->priv; | |
364 | ||
365 | if (err == -EINPROGRESS) | |
366 | return; | |
367 | ||
368 | if (!err) | |
369 | memcpy(priv->result, req->result, | |
370 | crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); | |
371 | ||
d4a7a0fb | 372 | ahash_restore_req(req); |
66f6ce5e HX |
373 | } |
374 | ||
375 | static void ahash_def_finup_done2(struct crypto_async_request *req, int err) | |
376 | { | |
377 | struct ahash_request *areq = req->data; | |
66f6ce5e HX |
378 | |
379 | ahash_def_finup_finish2(areq, err); | |
380 | ||
d4a7a0fb | 381 | areq->base.complete(&areq->base, err); |
66f6ce5e HX |
382 | } |
383 | ||
384 | static int ahash_def_finup_finish1(struct ahash_request *req, int err) | |
385 | { | |
386 | if (err) | |
387 | goto out; | |
388 | ||
389 | req->base.complete = ahash_def_finup_done2; | |
390 | req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; | |
391 | err = crypto_ahash_reqtfm(req)->final(req); | |
392 | ||
393 | out: | |
394 | ahash_def_finup_finish2(req, err); | |
395 | return err; | |
396 | } | |
397 | ||
398 | static void ahash_def_finup_done1(struct crypto_async_request *req, int err) | |
399 | { | |
400 | struct ahash_request *areq = req->data; | |
66f6ce5e HX |
401 | |
402 | err = ahash_def_finup_finish1(areq, err); | |
403 | ||
d4a7a0fb | 404 | areq->base.complete(&areq->base, err); |
66f6ce5e HX |
405 | } |
406 | ||
407 | static int ahash_def_finup(struct ahash_request *req) | |
408 | { | |
409 | struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); | |
d4a7a0fb | 410 | int err; |
66f6ce5e | 411 | |
d4a7a0fb MV |
412 | err = ahash_save_req(req, ahash_def_finup_done1); |
413 | if (err) | |
414 | return err; | |
66f6ce5e | 415 | |
d4a7a0fb MV |
416 | err = tfm->update(req); |
417 | return ahash_def_finup_finish1(req, err); | |
66f6ce5e HX |
418 | } |
419 | ||
420 | static int ahash_no_export(struct ahash_request *req, void *out) | |
421 | { | |
422 | return -ENOSYS; | |
423 | } | |
424 | ||
425 | static int ahash_no_import(struct ahash_request *req, const void *in) | |
426 | { | |
427 | return -ENOSYS; | |
428 | } | |
429 | ||
88056ec3 HX |
430 | static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) |
431 | { | |
432 | struct crypto_ahash *hash = __crypto_ahash_cast(tfm); | |
433 | struct ahash_alg *alg = crypto_ahash_alg(hash); | |
88056ec3 | 434 | |
66f6ce5e | 435 | hash->setkey = ahash_nosetkey; |
a5596d63 | 436 | hash->has_setkey = false; |
66f6ce5e HX |
437 | hash->export = ahash_no_export; |
438 | hash->import = ahash_no_import; | |
439 | ||
88056ec3 HX |
440 | if (tfm->__crt_alg->cra_type != &crypto_ahash_type) |
441 | return crypto_init_shash_ops_async(tfm); | |
442 | ||
88056ec3 HX |
443 | hash->init = alg->init; |
444 | hash->update = alg->update; | |
66f6ce5e HX |
445 | hash->final = alg->final; |
446 | hash->finup = alg->finup ?: ahash_def_finup; | |
88056ec3 | 447 | hash->digest = alg->digest; |
66f6ce5e | 448 | |
a5596d63 | 449 | if (alg->setkey) { |
66f6ce5e | 450 | hash->setkey = alg->setkey; |
a5596d63 HX |
451 | hash->has_setkey = true; |
452 | } | |
66f6ce5e HX |
453 | if (alg->export) |
454 | hash->export = alg->export; | |
455 | if (alg->import) | |
456 | hash->import = alg->import; | |
88056ec3 HX |
457 | |
458 | return 0; | |
459 | } | |
460 | ||
461 | static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) | |
462 | { | |
463 | if (alg->cra_type == &crypto_ahash_type) | |
464 | return alg->cra_ctxsize; | |
465 | ||
466 | return sizeof(struct crypto_shash *); | |
467 | } | |
468 | ||
3acc8473 | 469 | #ifdef CONFIG_NET |
6238cbae SK |
470 | static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) |
471 | { | |
472 | struct crypto_report_hash rhash; | |
473 | ||
9a5467bf | 474 | strncpy(rhash.type, "ahash", sizeof(rhash.type)); |
6238cbae SK |
475 | |
476 | rhash.blocksize = alg->cra_blocksize; | |
477 | rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; | |
478 | ||
6662df33 DM |
479 | if (nla_put(skb, CRYPTOCFGA_REPORT_HASH, |
480 | sizeof(struct crypto_report_hash), &rhash)) | |
481 | goto nla_put_failure; | |
6238cbae SK |
482 | return 0; |
483 | ||
484 | nla_put_failure: | |
485 | return -EMSGSIZE; | |
486 | } | |
3acc8473 HX |
487 | #else |
488 | static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg) | |
489 | { | |
490 | return -ENOSYS; | |
491 | } | |
492 | #endif | |
6238cbae | 493 | |
004a403c LH |
494 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) |
495 | __attribute__ ((unused)); | |
496 | static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) | |
497 | { | |
498 | seq_printf(m, "type : ahash\n"); | |
499 | seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? | |
500 | "yes" : "no"); | |
501 | seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); | |
88056ec3 HX |
502 | seq_printf(m, "digestsize : %u\n", |
503 | __crypto_hash_alg_common(alg)->digestsize); | |
004a403c LH |
504 | } |
505 | ||
506 | const struct crypto_type crypto_ahash_type = { | |
88056ec3 HX |
507 | .extsize = crypto_ahash_extsize, |
508 | .init_tfm = crypto_ahash_init_tfm, | |
004a403c LH |
509 | #ifdef CONFIG_PROC_FS |
510 | .show = crypto_ahash_show, | |
511 | #endif | |
6238cbae | 512 | .report = crypto_ahash_report, |
88056ec3 HX |
513 | .maskclear = ~CRYPTO_ALG_TYPE_MASK, |
514 | .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, | |
515 | .type = CRYPTO_ALG_TYPE_AHASH, | |
516 | .tfmsize = offsetof(struct crypto_ahash, base), | |
004a403c LH |
517 | }; |
518 | EXPORT_SYMBOL_GPL(crypto_ahash_type); | |
519 | ||
88056ec3 HX |
520 | struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, |
521 | u32 mask) | |
522 | { | |
523 | return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); | |
524 | } | |
525 | EXPORT_SYMBOL_GPL(crypto_alloc_ahash); | |
526 | ||
8d18e34c HX |
527 | int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) |
528 | { | |
529 | return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); | |
530 | } | |
531 | EXPORT_SYMBOL_GPL(crypto_has_ahash); | |
532 | ||
01c2dece HX |
533 | static int ahash_prepare_alg(struct ahash_alg *alg) |
534 | { | |
535 | struct crypto_alg *base = &alg->halg.base; | |
536 | ||
537 | if (alg->halg.digestsize > PAGE_SIZE / 8 || | |
8996eafd RK |
538 | alg->halg.statesize > PAGE_SIZE / 8 || |
539 | alg->halg.statesize == 0) | |
01c2dece HX |
540 | return -EINVAL; |
541 | ||
542 | base->cra_type = &crypto_ahash_type; | |
543 | base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; | |
544 | base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; | |
545 | ||
546 | return 0; | |
547 | } | |
548 | ||
549 | int crypto_register_ahash(struct ahash_alg *alg) | |
550 | { | |
551 | struct crypto_alg *base = &alg->halg.base; | |
552 | int err; | |
553 | ||
554 | err = ahash_prepare_alg(alg); | |
555 | if (err) | |
556 | return err; | |
557 | ||
558 | return crypto_register_alg(base); | |
559 | } | |
560 | EXPORT_SYMBOL_GPL(crypto_register_ahash); | |
561 | ||
562 | int crypto_unregister_ahash(struct ahash_alg *alg) | |
563 | { | |
564 | return crypto_unregister_alg(&alg->halg.base); | |
565 | } | |
566 | EXPORT_SYMBOL_GPL(crypto_unregister_ahash); | |
567 | ||
568 | int ahash_register_instance(struct crypto_template *tmpl, | |
569 | struct ahash_instance *inst) | |
570 | { | |
571 | int err; | |
572 | ||
573 | err = ahash_prepare_alg(&inst->alg); | |
574 | if (err) | |
575 | return err; | |
576 | ||
577 | return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); | |
578 | } | |
579 | EXPORT_SYMBOL_GPL(ahash_register_instance); | |
580 | ||
581 | void ahash_free_instance(struct crypto_instance *inst) | |
582 | { | |
583 | crypto_drop_spawn(crypto_instance_ctx(inst)); | |
584 | kfree(ahash_instance(inst)); | |
585 | } | |
586 | EXPORT_SYMBOL_GPL(ahash_free_instance); | |
587 | ||
588 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | |
589 | struct hash_alg_common *alg, | |
590 | struct crypto_instance *inst) | |
591 | { | |
592 | return crypto_init_spawn2(&spawn->base, &alg->base, inst, | |
593 | &crypto_ahash_type); | |
594 | } | |
595 | EXPORT_SYMBOL_GPL(crypto_init_ahash_spawn); | |
596 | ||
597 | struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask) | |
598 | { | |
599 | struct crypto_alg *alg; | |
600 | ||
601 | alg = crypto_attr_alg2(rta, &crypto_ahash_type, type, mask); | |
602 | return IS_ERR(alg) ? ERR_CAST(alg) : __crypto_hash_alg_common(alg); | |
603 | } | |
604 | EXPORT_SYMBOL_GPL(ahash_attr_alg); | |
605 | ||
004a403c LH |
606 | MODULE_LICENSE("GPL"); |
607 | MODULE_DESCRIPTION("Asynchronous cryptographic hash type"); |