]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Scatterlist Cryptographic API. | |
3 | * | |
4 | * Copyright (c) 2002 James Morris <[email protected]> | |
5 | * Copyright (c) 2002 David S. Miller ([email protected]) | |
5cb1454b | 6 | * Copyright (c) 2005 Herbert Xu <[email protected]> |
1da177e4 LT |
7 | * |
8 | * Portions derived from Cryptoapi, by Alexander Kjeldaas <[email protected]> | |
18735dd8 | 9 | * and Nettle, by Niels Möller. |
1da177e4 LT |
10 | * |
11 | * This program is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the Free | |
13 | * Software Foundation; either version 2 of the License, or (at your option) | |
14 | * any later version. | |
15 | * | |
16 | */ | |
17 | #ifndef _LINUX_CRYPTO_H | |
18 | #define _LINUX_CRYPTO_H | |
19 | ||
60063497 | 20 | #include <linux/atomic.h> |
1da177e4 | 21 | #include <linux/kernel.h> |
1da177e4 | 22 | #include <linux/list.h> |
187f1882 | 23 | #include <linux/bug.h> |
79911102 | 24 | #include <linux/slab.h> |
1da177e4 | 25 | #include <linux/string.h> |
79911102 | 26 | #include <linux/uaccess.h> |
1da177e4 | 27 | |
5d26a105 KC |
28 | /* |
29 | * Autoloaded crypto modules should only use a prefixed name to avoid allowing | |
30 | * arbitrary modules to be loaded. Loading from userspace may still need the | |
31 | * unprefixed names, so retains those aliases as well. | |
32 | * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3 | |
33 | * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro | |
34 | * expands twice on the same line. Instead, use a separate base name for the | |
35 | * alias. | |
36 | */ | |
37 | #define MODULE_ALIAS_CRYPTO(name) \ | |
38 | __MODULE_INFO(alias, alias_userspace, name); \ | |
39 | __MODULE_INFO(alias, alias_crypto, "crypto-" name) | |
40 | ||
1da177e4 LT |
41 | /* |
42 | * Algorithm masks and types. | |
43 | */ | |
2825982d | 44 | #define CRYPTO_ALG_TYPE_MASK 0x0000000f |
1da177e4 | 45 | #define CRYPTO_ALG_TYPE_CIPHER 0x00000001 |
004a403c LH |
46 | #define CRYPTO_ALG_TYPE_COMPRESS 0x00000002 |
47 | #define CRYPTO_ALG_TYPE_AEAD 0x00000003 | |
055bcee3 | 48 | #define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004 |
332f8840 | 49 | #define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005 |
4e6c3df4 | 50 | #define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005 |
61da88e2 | 51 | #define CRYPTO_ALG_TYPE_GIVCIPHER 0x00000006 |
4e5f2c40 | 52 | #define CRYPTO_ALG_TYPE_KPP 0x00000008 |
2ebda74f | 53 | #define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a |
1ab53a77 | 54 | #define CRYPTO_ALG_TYPE_SCOMPRESS 0x0000000b |
17f0f4a4 | 55 | #define CRYPTO_ALG_TYPE_RNG 0x0000000c |
3c339ab8 | 56 | #define CRYPTO_ALG_TYPE_AKCIPHER 0x0000000d |
63044c4f GC |
57 | #define CRYPTO_ALG_TYPE_DIGEST 0x0000000e |
58 | #define CRYPTO_ALG_TYPE_HASH 0x0000000e | |
59 | #define CRYPTO_ALG_TYPE_SHASH 0x0000000e | |
60 | #define CRYPTO_ALG_TYPE_AHASH 0x0000000f | |
055bcee3 HX |
61 | |
62 | #define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e | |
63044c4f | 63 | #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e |
332f8840 | 64 | #define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c |
1ab53a77 | 65 | #define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e |
1da177e4 | 66 | |
2825982d | 67 | #define CRYPTO_ALG_LARVAL 0x00000010 |
6bfd4809 HX |
68 | #define CRYPTO_ALG_DEAD 0x00000020 |
69 | #define CRYPTO_ALG_DYING 0x00000040 | |
f3f632d6 | 70 | #define CRYPTO_ALG_ASYNC 0x00000080 |
2825982d | 71 | |
6010439f HX |
72 | /* |
73 | * Set this bit if and only if the algorithm requires another algorithm of | |
74 | * the same type to handle corner cases. | |
75 | */ | |
76 | #define CRYPTO_ALG_NEED_FALLBACK 0x00000100 | |
77 | ||
ecfc4329 HX |
78 | /* |
79 | * This bit is set for symmetric key ciphers that have already been wrapped | |
80 | * with a generic IV generator to prevent them from being wrapped again. | |
81 | */ | |
82 | #define CRYPTO_ALG_GENIV 0x00000200 | |
83 | ||
73d3864a HX |
84 | /* |
85 | * Set if the algorithm has passed automated run-time testing. Note that | |
86 | * if there is no run-time testing for a given algorithm it is considered | |
87 | * to have passed. | |
88 | */ | |
89 | ||
90 | #define CRYPTO_ALG_TESTED 0x00000400 | |
91 | ||
64a947b1 | 92 | /* |
864e0981 | 93 | * Set if the algorithm is an instance that is built from templates. |
64a947b1 SK |
94 | */ |
95 | #define CRYPTO_ALG_INSTANCE 0x00000800 | |
96 | ||
d912bb76 NM |
97 | /* Set this bit if the algorithm provided is hardware accelerated but |
98 | * not available to userspace via instruction set or so. | |
99 | */ | |
100 | #define CRYPTO_ALG_KERN_DRIVER_ONLY 0x00001000 | |
101 | ||
06ca7f68 SM |
102 | /* |
103 | * Mark a cipher as a service implementation only usable by another | |
104 | * cipher and never by a normal user of the kernel crypto API | |
105 | */ | |
106 | #define CRYPTO_ALG_INTERNAL 0x00002000 | |
107 | ||
1da177e4 LT |
108 | /* |
109 | * Transform masks and values (for crt_flags). | |
110 | */ | |
1da177e4 LT |
111 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 |
112 | #define CRYPTO_TFM_RES_MASK 0xfff00000 | |
113 | ||
1da177e4 | 114 | #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 |
64baf3cf | 115 | #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 |
32e3983f | 116 | #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 |
1da177e4 LT |
117 | #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 |
118 | #define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000 | |
119 | #define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000 | |
120 | #define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000 | |
121 | #define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000 | |
122 | ||
123 | /* | |
124 | * Miscellaneous stuff. | |
125 | */ | |
1da177e4 LT |
126 | #define CRYPTO_MAX_ALG_NAME 64 |
127 | ||
79911102 HX |
128 | /* |
129 | * The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual | |
130 | * declaration) is used to ensure that the crypto_tfm context structure is | |
131 | * aligned correctly for the given architecture so that there are no alignment | |
132 | * faults for C data types. In particular, this is required on platforms such | |
133 | * as arm where pointers are 32-bit aligned but there are data types such as | |
134 | * u64 which require 64-bit alignment. | |
135 | */ | |
79911102 | 136 | #define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN |
79911102 | 137 | |
79911102 | 138 | #define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN))) |
79911102 | 139 | |
1da177e4 | 140 | struct scatterlist; |
32e3983f HX |
141 | struct crypto_ablkcipher; |
142 | struct crypto_async_request; | |
5cde0af2 | 143 | struct crypto_blkcipher; |
40725181 | 144 | struct crypto_tfm; |
e853c3cf | 145 | struct crypto_type; |
61da88e2 | 146 | struct skcipher_givcrypt_request; |
40725181 | 147 | |
32e3983f HX |
148 | typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err); |
149 | ||
0d7f488f SM |
150 | /** |
151 | * DOC: Block Cipher Context Data Structures | |
152 | * | |
153 | * These data structures define the operating context for each block cipher | |
154 | * type. | |
155 | */ | |
156 | ||
32e3983f HX |
157 | struct crypto_async_request { |
158 | struct list_head list; | |
159 | crypto_completion_t complete; | |
160 | void *data; | |
161 | struct crypto_tfm *tfm; | |
162 | ||
163 | u32 flags; | |
164 | }; | |
165 | ||
166 | struct ablkcipher_request { | |
167 | struct crypto_async_request base; | |
168 | ||
169 | unsigned int nbytes; | |
170 | ||
171 | void *info; | |
172 | ||
173 | struct scatterlist *src; | |
174 | struct scatterlist *dst; | |
175 | ||
176 | void *__ctx[] CRYPTO_MINALIGN_ATTR; | |
177 | }; | |
178 | ||
5cde0af2 HX |
179 | struct blkcipher_desc { |
180 | struct crypto_blkcipher *tfm; | |
181 | void *info; | |
182 | u32 flags; | |
183 | }; | |
184 | ||
40725181 HX |
185 | struct cipher_desc { |
186 | struct crypto_tfm *tfm; | |
6c2bb98b | 187 | void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); |
40725181 HX |
188 | unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, |
189 | const u8 *src, unsigned int nbytes); | |
190 | void *info; | |
191 | }; | |
1da177e4 | 192 | |
0d7f488f SM |
193 | /** |
194 | * DOC: Block Cipher Algorithm Definitions | |
195 | * | |
196 | * These data structures define modular crypto algorithm implementations, | |
197 | * managed via crypto_register_alg() and crypto_unregister_alg(). | |
198 | */ | |
199 | ||
200 | /** | |
201 | * struct ablkcipher_alg - asynchronous block cipher definition | |
202 | * @min_keysize: Minimum key size supported by the transformation. This is the | |
203 | * smallest key length supported by this transformation algorithm. | |
204 | * This must be set to one of the pre-defined values as this is | |
205 | * not hardware specific. Possible values for this field can be | |
206 | * found via git grep "_MIN_KEY_SIZE" include/crypto/ | |
207 | * @max_keysize: Maximum key size supported by the transformation. This is the | |
208 | * largest key length supported by this transformation algorithm. | |
209 | * This must be set to one of the pre-defined values as this is | |
210 | * not hardware specific. Possible values for this field can be | |
211 | * found via git grep "_MAX_KEY_SIZE" include/crypto/ | |
212 | * @setkey: Set key for the transformation. This function is used to either | |
213 | * program a supplied key into the hardware or store the key in the | |
214 | * transformation context for programming it later. Note that this | |
215 | * function does modify the transformation context. This function can | |
216 | * be called multiple times during the existence of the transformation | |
217 | * object, so one must make sure the key is properly reprogrammed into | |
218 | * the hardware. This function is also responsible for checking the key | |
219 | * length for validity. In case a software fallback was put in place in | |
220 | * the @cra_init call, this function might need to use the fallback if | |
221 | * the algorithm doesn't support all of the key sizes. | |
222 | * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt | |
223 | * the supplied scatterlist containing the blocks of data. The crypto | |
224 | * API consumer is responsible for aligning the entries of the | |
225 | * scatterlist properly and making sure the chunks are correctly | |
226 | * sized. In case a software fallback was put in place in the | |
227 | * @cra_init call, this function might need to use the fallback if | |
228 | * the algorithm doesn't support all of the key sizes. In case the | |
229 | * key was stored in transformation context, the key might need to be | |
230 | * re-programmed into the hardware in this function. This function | |
231 | * shall not modify the transformation context, as this function may | |
232 | * be called in parallel with the same transformation object. | |
233 | * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt | |
234 | * and the conditions are exactly the same. | |
235 | * @givencrypt: Update the IV for encryption. With this function, a cipher | |
236 | * implementation may provide the function on how to update the IV | |
237 | * for encryption. | |
238 | * @givdecrypt: Update the IV for decryption. This is the reverse of | |
239 | * @givencrypt . | |
240 | * @geniv: The transformation implementation may use an "IV generator" provided | |
241 | * by the kernel crypto API. Several use cases have a predefined | |
242 | * approach how IVs are to be updated. For such use cases, the kernel | |
243 | * crypto API provides ready-to-use implementations that can be | |
244 | * referenced with this variable. | |
245 | * @ivsize: IV size applicable for transformation. The consumer must provide an | |
246 | * IV of exactly that size to perform the encrypt or decrypt operation. | |
247 | * | |
248 | * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are | |
249 | * mandatory and must be filled. | |
1da177e4 | 250 | */ |
b5b7f088 HX |
251 | struct ablkcipher_alg { |
252 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, | |
253 | unsigned int keylen); | |
254 | int (*encrypt)(struct ablkcipher_request *req); | |
255 | int (*decrypt)(struct ablkcipher_request *req); | |
61da88e2 HX |
256 | int (*givencrypt)(struct skcipher_givcrypt_request *req); |
257 | int (*givdecrypt)(struct skcipher_givcrypt_request *req); | |
b5b7f088 | 258 | |
23508e11 HX |
259 | const char *geniv; |
260 | ||
b5b7f088 HX |
261 | unsigned int min_keysize; |
262 | unsigned int max_keysize; | |
263 | unsigned int ivsize; | |
264 | }; | |
265 | ||
0d7f488f SM |
266 | /** |
267 | * struct blkcipher_alg - synchronous block cipher definition | |
268 | * @min_keysize: see struct ablkcipher_alg | |
269 | * @max_keysize: see struct ablkcipher_alg | |
270 | * @setkey: see struct ablkcipher_alg | |
271 | * @encrypt: see struct ablkcipher_alg | |
272 | * @decrypt: see struct ablkcipher_alg | |
273 | * @geniv: see struct ablkcipher_alg | |
274 | * @ivsize: see struct ablkcipher_alg | |
275 | * | |
276 | * All fields except @geniv and @ivsize are mandatory and must be filled. | |
277 | */ | |
5cde0af2 HX |
278 | struct blkcipher_alg { |
279 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, | |
280 | unsigned int keylen); | |
281 | int (*encrypt)(struct blkcipher_desc *desc, | |
282 | struct scatterlist *dst, struct scatterlist *src, | |
283 | unsigned int nbytes); | |
284 | int (*decrypt)(struct blkcipher_desc *desc, | |
285 | struct scatterlist *dst, struct scatterlist *src, | |
286 | unsigned int nbytes); | |
287 | ||
23508e11 HX |
288 | const char *geniv; |
289 | ||
5cde0af2 HX |
290 | unsigned int min_keysize; |
291 | unsigned int max_keysize; | |
292 | unsigned int ivsize; | |
293 | }; | |
294 | ||
0d7f488f SM |
295 | /** |
296 | * struct cipher_alg - single-block symmetric ciphers definition | |
297 | * @cia_min_keysize: Minimum key size supported by the transformation. This is | |
298 | * the smallest key length supported by this transformation | |
299 | * algorithm. This must be set to one of the pre-defined | |
300 | * values as this is not hardware specific. Possible values | |
301 | * for this field can be found via git grep "_MIN_KEY_SIZE" | |
302 | * include/crypto/ | |
303 | * @cia_max_keysize: Maximum key size supported by the transformation. This is | |
304 | * the largest key length supported by this transformation | |
305 | * algorithm. This must be set to one of the pre-defined values | |
306 | * as this is not hardware specific. Possible values for this | |
307 | * field can be found via git grep "_MAX_KEY_SIZE" | |
308 | * include/crypto/ | |
309 | * @cia_setkey: Set key for the transformation. This function is used to either | |
310 | * program a supplied key into the hardware or store the key in the | |
311 | * transformation context for programming it later. Note that this | |
312 | * function does modify the transformation context. This function | |
313 | * can be called multiple times during the existence of the | |
314 | * transformation object, so one must make sure the key is properly | |
315 | * reprogrammed into the hardware. This function is also | |
316 | * responsible for checking the key length for validity. | |
317 | * @cia_encrypt: Encrypt a single block. This function is used to encrypt a | |
318 | * single block of data, which must be @cra_blocksize big. This | |
319 | * always operates on a full @cra_blocksize and it is not possible | |
320 | * to encrypt a block of smaller size. The supplied buffers must | |
321 | * therefore also be at least of @cra_blocksize size. Both the | |
322 | * input and output buffers are always aligned to @cra_alignmask. | |
323 | * In case either of the input or output buffer supplied by user | |
324 | * of the crypto API is not aligned to @cra_alignmask, the crypto | |
325 | * API will re-align the buffers. The re-alignment means that a | |
326 | * new buffer will be allocated, the data will be copied into the | |
327 | * new buffer, then the processing will happen on the new buffer, | |
328 | * then the data will be copied back into the original buffer and | |
329 | * finally the new buffer will be freed. In case a software | |
330 | * fallback was put in place in the @cra_init call, this function | |
331 | * might need to use the fallback if the algorithm doesn't support | |
332 | * all of the key sizes. In case the key was stored in | |
333 | * transformation context, the key might need to be re-programmed | |
334 | * into the hardware in this function. This function shall not | |
335 | * modify the transformation context, as this function may be | |
336 | * called in parallel with the same transformation object. | |
337 | * @cia_decrypt: Decrypt a single block. This is a reverse counterpart to | |
338 | * @cia_encrypt, and the conditions are exactly the same. | |
339 | * | |
340 | * All fields are mandatory and must be filled. | |
341 | */ | |
1da177e4 LT |
342 | struct cipher_alg { |
343 | unsigned int cia_min_keysize; | |
344 | unsigned int cia_max_keysize; | |
6c2bb98b | 345 | int (*cia_setkey)(struct crypto_tfm *tfm, const u8 *key, |
560c06ae | 346 | unsigned int keylen); |
6c2bb98b HX |
347 | void (*cia_encrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); |
348 | void (*cia_decrypt)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
1da177e4 LT |
349 | }; |
350 | ||
1da177e4 | 351 | struct compress_alg { |
6c2bb98b HX |
352 | int (*coa_compress)(struct crypto_tfm *tfm, const u8 *src, |
353 | unsigned int slen, u8 *dst, unsigned int *dlen); | |
354 | int (*coa_decompress)(struct crypto_tfm *tfm, const u8 *src, | |
355 | unsigned int slen, u8 *dst, unsigned int *dlen); | |
1da177e4 LT |
356 | }; |
357 | ||
17f0f4a4 | 358 | |
b5b7f088 | 359 | #define cra_ablkcipher cra_u.ablkcipher |
5cde0af2 | 360 | #define cra_blkcipher cra_u.blkcipher |
1da177e4 | 361 | #define cra_cipher cra_u.cipher |
1da177e4 LT |
362 | #define cra_compress cra_u.compress |
363 | ||
0d7f488f SM |
364 | /** |
365 | * struct crypto_alg - definition of a cryptograpic cipher algorithm | |
366 | * @cra_flags: Flags describing this transformation. See include/linux/crypto.h | |
367 | * CRYPTO_ALG_* flags for the flags which go in here. Those are | |
368 | * used for fine-tuning the description of the transformation | |
369 | * algorithm. | |
370 | * @cra_blocksize: Minimum block size of this transformation. The size in bytes | |
371 | * of the smallest possible unit which can be transformed with | |
372 | * this algorithm. The users must respect this value. | |
373 | * In case of HASH transformation, it is possible for a smaller | |
374 | * block than @cra_blocksize to be passed to the crypto API for | |
375 | * transformation, in case of any other transformation type, an | |
376 | * error will be returned upon any attempt to transform smaller | |
377 | * than @cra_blocksize chunks. | |
378 | * @cra_ctxsize: Size of the operational context of the transformation. This | |
379 | * value informs the kernel crypto API about the memory size | |
380 | * needed to be allocated for the transformation context. | |
381 | * @cra_alignmask: Alignment mask for the input and output data buffer. The data | |
382 | * buffer containing the input data for the algorithm must be | |
383 | * aligned to this alignment mask. The data buffer for the | |
384 | * output data must be aligned to this alignment mask. Note that | |
385 | * the Crypto API will do the re-alignment in software, but | |
386 | * only under special conditions and there is a performance hit. | |
387 | * The re-alignment happens at these occasions for different | |
388 | * @cra_u types: cipher -- For both input data and output data | |
389 | * buffer; ahash -- For output hash destination buf; shash -- | |
390 | * For output hash destination buf. | |
391 | * This is needed on hardware which is flawed by design and | |
392 | * cannot pick data from arbitrary addresses. | |
393 | * @cra_priority: Priority of this transformation implementation. In case | |
394 | * multiple transformations with same @cra_name are available to | |
395 | * the Crypto API, the kernel will use the one with highest | |
396 | * @cra_priority. | |
397 | * @cra_name: Generic name (usable by multiple implementations) of the | |
398 | * transformation algorithm. This is the name of the transformation | |
399 | * itself. This field is used by the kernel when looking up the | |
400 | * providers of particular transformation. | |
401 | * @cra_driver_name: Unique name of the transformation provider. This is the | |
402 | * name of the provider of the transformation. This can be any | |
403 | * arbitrary value, but in the usual case, this contains the | |
404 | * name of the chip or provider and the name of the | |
405 | * transformation algorithm. | |
406 | * @cra_type: Type of the cryptographic transformation. This is a pointer to | |
407 | * struct crypto_type, which implements callbacks common for all | |
12f7c14a | 408 | * transformation types. There are multiple options: |
0d7f488f | 409 | * &crypto_blkcipher_type, &crypto_ablkcipher_type, |
b0d955ba | 410 | * &crypto_ahash_type, &crypto_rng_type. |
0d7f488f SM |
411 | * This field might be empty. In that case, there are no common |
412 | * callbacks. This is the case for: cipher, compress, shash. | |
413 | * @cra_u: Callbacks implementing the transformation. This is a union of | |
414 | * multiple structures. Depending on the type of transformation selected | |
415 | * by @cra_type and @cra_flags above, the associated structure must be | |
416 | * filled with callbacks. This field might be empty. This is the case | |
417 | * for ahash, shash. | |
418 | * @cra_init: Initialize the cryptographic transformation object. This function | |
419 | * is used to initialize the cryptographic transformation object. | |
420 | * This function is called only once at the instantiation time, right | |
421 | * after the transformation context was allocated. In case the | |
422 | * cryptographic hardware has some special requirements which need to | |
423 | * be handled by software, this function shall check for the precise | |
424 | * requirement of the transformation and put any software fallbacks | |
425 | * in place. | |
426 | * @cra_exit: Deinitialize the cryptographic transformation object. This is a | |
427 | * counterpart to @cra_init, used to remove various changes set in | |
428 | * @cra_init. | |
429 | * @cra_module: Owner of this transformation implementation. Set to THIS_MODULE | |
430 | * @cra_list: internally used | |
431 | * @cra_users: internally used | |
432 | * @cra_refcnt: internally used | |
433 | * @cra_destroy: internally used | |
434 | * | |
435 | * The struct crypto_alg describes a generic Crypto API algorithm and is common | |
436 | * for all of the transformations. Any variable not documented here shall not | |
437 | * be used by a cipher implementation as it is internal to the Crypto API. | |
438 | */ | |
1da177e4 LT |
439 | struct crypto_alg { |
440 | struct list_head cra_list; | |
6bfd4809 HX |
441 | struct list_head cra_users; |
442 | ||
1da177e4 LT |
443 | u32 cra_flags; |
444 | unsigned int cra_blocksize; | |
445 | unsigned int cra_ctxsize; | |
95477377 | 446 | unsigned int cra_alignmask; |
5cb1454b HX |
447 | |
448 | int cra_priority; | |
6521f302 | 449 | atomic_t cra_refcnt; |
5cb1454b | 450 | |
d913ea0d HX |
451 | char cra_name[CRYPTO_MAX_ALG_NAME]; |
452 | char cra_driver_name[CRYPTO_MAX_ALG_NAME]; | |
1da177e4 | 453 | |
e853c3cf HX |
454 | const struct crypto_type *cra_type; |
455 | ||
1da177e4 | 456 | union { |
b5b7f088 | 457 | struct ablkcipher_alg ablkcipher; |
5cde0af2 | 458 | struct blkcipher_alg blkcipher; |
1da177e4 | 459 | struct cipher_alg cipher; |
1da177e4 LT |
460 | struct compress_alg compress; |
461 | } cra_u; | |
c7fc0599 HX |
462 | |
463 | int (*cra_init)(struct crypto_tfm *tfm); | |
464 | void (*cra_exit)(struct crypto_tfm *tfm); | |
6521f302 | 465 | void (*cra_destroy)(struct crypto_alg *alg); |
1da177e4 LT |
466 | |
467 | struct module *cra_module; | |
edf18b91 | 468 | } CRYPTO_MINALIGN_ATTR; |
1da177e4 LT |
469 | |
470 | /* | |
471 | * Algorithm registration interface. | |
472 | */ | |
473 | int crypto_register_alg(struct crypto_alg *alg); | |
474 | int crypto_unregister_alg(struct crypto_alg *alg); | |
4b004346 MB |
475 | int crypto_register_algs(struct crypto_alg *algs, int count); |
476 | int crypto_unregister_algs(struct crypto_alg *algs, int count); | |
1da177e4 LT |
477 | |
478 | /* | |
479 | * Algorithm query interface. | |
480 | */ | |
fce32d70 | 481 | int crypto_has_alg(const char *name, u32 type, u32 mask); |
1da177e4 LT |
482 | |
483 | /* | |
484 | * Transforms: user-instantiated objects which encapsulate algorithms | |
6d7d684d HX |
485 | * and core processing logic. Managed via crypto_alloc_*() and |
486 | * crypto_free_*(), as well as the various helpers below. | |
1da177e4 | 487 | */ |
1da177e4 | 488 | |
32e3983f HX |
489 | struct ablkcipher_tfm { |
490 | int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key, | |
491 | unsigned int keylen); | |
492 | int (*encrypt)(struct ablkcipher_request *req); | |
493 | int (*decrypt)(struct ablkcipher_request *req); | |
61da88e2 | 494 | |
ecfc4329 HX |
495 | struct crypto_ablkcipher *base; |
496 | ||
32e3983f HX |
497 | unsigned int ivsize; |
498 | unsigned int reqsize; | |
499 | }; | |
500 | ||
5cde0af2 HX |
501 | struct blkcipher_tfm { |
502 | void *iv; | |
503 | int (*setkey)(struct crypto_tfm *tfm, const u8 *key, | |
504 | unsigned int keylen); | |
505 | int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, | |
506 | struct scatterlist *src, unsigned int nbytes); | |
507 | int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst, | |
508 | struct scatterlist *src, unsigned int nbytes); | |
509 | }; | |
510 | ||
1da177e4 | 511 | struct cipher_tfm { |
1da177e4 LT |
512 | int (*cit_setkey)(struct crypto_tfm *tfm, |
513 | const u8 *key, unsigned int keylen); | |
f28776a3 HX |
514 | void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); |
515 | void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | |
1da177e4 LT |
516 | }; |
517 | ||
1da177e4 LT |
518 | struct compress_tfm { |
519 | int (*cot_compress)(struct crypto_tfm *tfm, | |
520 | const u8 *src, unsigned int slen, | |
521 | u8 *dst, unsigned int *dlen); | |
522 | int (*cot_decompress)(struct crypto_tfm *tfm, | |
523 | const u8 *src, unsigned int slen, | |
524 | u8 *dst, unsigned int *dlen); | |
525 | }; | |
526 | ||
32e3983f | 527 | #define crt_ablkcipher crt_u.ablkcipher |
5cde0af2 | 528 | #define crt_blkcipher crt_u.blkcipher |
1da177e4 | 529 | #define crt_cipher crt_u.cipher |
1da177e4 LT |
530 | #define crt_compress crt_u.compress |
531 | ||
532 | struct crypto_tfm { | |
533 | ||
534 | u32 crt_flags; | |
535 | ||
536 | union { | |
32e3983f | 537 | struct ablkcipher_tfm ablkcipher; |
5cde0af2 | 538 | struct blkcipher_tfm blkcipher; |
1da177e4 | 539 | struct cipher_tfm cipher; |
1da177e4 LT |
540 | struct compress_tfm compress; |
541 | } crt_u; | |
4a779486 HX |
542 | |
543 | void (*exit)(struct crypto_tfm *tfm); | |
1da177e4 LT |
544 | |
545 | struct crypto_alg *__crt_alg; | |
f10b7897 | 546 | |
79911102 | 547 | void *__crt_ctx[] CRYPTO_MINALIGN_ATTR; |
1da177e4 LT |
548 | }; |
549 | ||
32e3983f HX |
550 | struct crypto_ablkcipher { |
551 | struct crypto_tfm base; | |
552 | }; | |
553 | ||
5cde0af2 HX |
554 | struct crypto_blkcipher { |
555 | struct crypto_tfm base; | |
556 | }; | |
557 | ||
78a1fe4f HX |
558 | struct crypto_cipher { |
559 | struct crypto_tfm base; | |
560 | }; | |
561 | ||
562 | struct crypto_comp { | |
563 | struct crypto_tfm base; | |
564 | }; | |
565 | ||
2b8c19db HX |
566 | enum { |
567 | CRYPTOA_UNSPEC, | |
568 | CRYPTOA_ALG, | |
ebc610e5 | 569 | CRYPTOA_TYPE, |
39e1ee01 | 570 | CRYPTOA_U32, |
ebc610e5 | 571 | __CRYPTOA_MAX, |
2b8c19db HX |
572 | }; |
573 | ||
ebc610e5 HX |
574 | #define CRYPTOA_MAX (__CRYPTOA_MAX - 1) |
575 | ||
39e1ee01 HX |
576 | /* Maximum number of (rtattr) parameters for each template. */ |
577 | #define CRYPTO_MAX_ATTRS 32 | |
578 | ||
2b8c19db HX |
579 | struct crypto_attr_alg { |
580 | char name[CRYPTO_MAX_ALG_NAME]; | |
581 | }; | |
582 | ||
ebc610e5 HX |
583 | struct crypto_attr_type { |
584 | u32 type; | |
585 | u32 mask; | |
586 | }; | |
587 | ||
39e1ee01 HX |
588 | struct crypto_attr_u32 { |
589 | u32 num; | |
590 | }; | |
591 | ||
1da177e4 LT |
592 | /* |
593 | * Transform user interface. | |
594 | */ | |
595 | ||
6d7d684d | 596 | struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask); |
7b2cd92a HX |
597 | void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm); |
598 | ||
599 | static inline void crypto_free_tfm(struct crypto_tfm *tfm) | |
600 | { | |
601 | return crypto_destroy_tfm(tfm, tfm); | |
602 | } | |
1da177e4 | 603 | |
da7f033d HX |
604 | int alg_test(const char *driver, const char *alg, u32 type, u32 mask); |
605 | ||
1da177e4 LT |
606 | /* |
607 | * Transform helpers which query the underlying algorithm. | |
608 | */ | |
609 | static inline const char *crypto_tfm_alg_name(struct crypto_tfm *tfm) | |
610 | { | |
611 | return tfm->__crt_alg->cra_name; | |
612 | } | |
613 | ||
b14cdd67 ML |
614 | static inline const char *crypto_tfm_alg_driver_name(struct crypto_tfm *tfm) |
615 | { | |
616 | return tfm->__crt_alg->cra_driver_name; | |
617 | } | |
618 | ||
619 | static inline int crypto_tfm_alg_priority(struct crypto_tfm *tfm) | |
620 | { | |
621 | return tfm->__crt_alg->cra_priority; | |
622 | } | |
623 | ||
1da177e4 LT |
624 | static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) |
625 | { | |
626 | return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; | |
627 | } | |
628 | ||
1da177e4 LT |
629 | static inline unsigned int crypto_tfm_alg_blocksize(struct crypto_tfm *tfm) |
630 | { | |
631 | return tfm->__crt_alg->cra_blocksize; | |
632 | } | |
633 | ||
fbdae9f3 HX |
634 | static inline unsigned int crypto_tfm_alg_alignmask(struct crypto_tfm *tfm) |
635 | { | |
636 | return tfm->__crt_alg->cra_alignmask; | |
637 | } | |
638 | ||
f28776a3 HX |
639 | static inline u32 crypto_tfm_get_flags(struct crypto_tfm *tfm) |
640 | { | |
641 | return tfm->crt_flags; | |
642 | } | |
643 | ||
644 | static inline void crypto_tfm_set_flags(struct crypto_tfm *tfm, u32 flags) | |
645 | { | |
646 | tfm->crt_flags |= flags; | |
647 | } | |
648 | ||
649 | static inline void crypto_tfm_clear_flags(struct crypto_tfm *tfm, u32 flags) | |
650 | { | |
651 | tfm->crt_flags &= ~flags; | |
652 | } | |
653 | ||
40725181 HX |
654 | static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm) |
655 | { | |
f10b7897 HX |
656 | return tfm->__crt_ctx; |
657 | } | |
658 | ||
659 | static inline unsigned int crypto_tfm_ctx_alignment(void) | |
660 | { | |
661 | struct crypto_tfm *tfm; | |
662 | return __alignof__(tfm->__crt_ctx); | |
40725181 HX |
663 | } |
664 | ||
1da177e4 LT |
665 | /* |
666 | * API wrappers. | |
667 | */ | |
32e3983f HX |
668 | static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast( |
669 | struct crypto_tfm *tfm) | |
670 | { | |
671 | return (struct crypto_ablkcipher *)tfm; | |
672 | } | |
673 | ||
378f4f51 | 674 | static inline u32 crypto_skcipher_type(u32 type) |
32e3983f | 675 | { |
ecfc4329 | 676 | type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
32e3983f | 677 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; |
378f4f51 HX |
678 | return type; |
679 | } | |
680 | ||
681 | static inline u32 crypto_skcipher_mask(u32 mask) | |
682 | { | |
ecfc4329 | 683 | mask &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV); |
332f8840 | 684 | mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK; |
378f4f51 HX |
685 | return mask; |
686 | } | |
32e3983f | 687 | |
f13ec330 SM |
688 | /** |
689 | * DOC: Asynchronous Block Cipher API | |
690 | * | |
691 | * Asynchronous block cipher API is used with the ciphers of type | |
692 | * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto). | |
693 | * | |
694 | * Asynchronous cipher operations imply that the function invocation for a | |
695 | * cipher request returns immediately before the completion of the operation. | |
696 | * The cipher request is scheduled as a separate kernel thread and therefore | |
697 | * load-balanced on the different CPUs via the process scheduler. To allow | |
698 | * the kernel crypto API to inform the caller about the completion of a cipher | |
699 | * request, the caller must provide a callback function. That function is | |
700 | * invoked with the cipher handle when the request completes. | |
701 | * | |
702 | * To support the asynchronous operation, additional information than just the | |
703 | * cipher handle must be supplied to the kernel crypto API. That additional | |
704 | * information is given by filling in the ablkcipher_request data structure. | |
705 | * | |
706 | * For the asynchronous block cipher API, the state is maintained with the tfm | |
707 | * cipher handle. A single tfm can be used across multiple calls and in | |
708 | * parallel. For asynchronous block cipher calls, context data supplied and | |
709 | * only used by the caller can be referenced the request data structure in | |
710 | * addition to the IV used for the cipher request. The maintenance of such | |
711 | * state information would be important for a crypto driver implementer to | |
712 | * have, because when calling the callback function upon completion of the | |
713 | * cipher operation, that callback function may need some information about | |
714 | * which operation just finished if it invoked multiple in parallel. This | |
715 | * state information is unused by the kernel crypto API. | |
716 | */ | |
717 | ||
32e3983f HX |
718 | static inline struct crypto_tfm *crypto_ablkcipher_tfm( |
719 | struct crypto_ablkcipher *tfm) | |
720 | { | |
721 | return &tfm->base; | |
722 | } | |
723 | ||
f13ec330 SM |
724 | /** |
725 | * crypto_free_ablkcipher() - zeroize and free cipher handle | |
726 | * @tfm: cipher handle to be freed | |
727 | */ | |
32e3983f HX |
728 | static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm) |
729 | { | |
730 | crypto_free_tfm(crypto_ablkcipher_tfm(tfm)); | |
731 | } | |
732 | ||
f13ec330 SM |
733 | /** |
734 | * crypto_has_ablkcipher() - Search for the availability of an ablkcipher. | |
735 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | |
736 | * ablkcipher | |
737 | * @type: specifies the type of the cipher | |
738 | * @mask: specifies the mask for the cipher | |
739 | * | |
740 | * Return: true when the ablkcipher is known to the kernel crypto API; false | |
741 | * otherwise | |
742 | */ | |
32e3983f HX |
743 | static inline int crypto_has_ablkcipher(const char *alg_name, u32 type, |
744 | u32 mask) | |
745 | { | |
378f4f51 HX |
746 | return crypto_has_alg(alg_name, crypto_skcipher_type(type), |
747 | crypto_skcipher_mask(mask)); | |
32e3983f HX |
748 | } |
749 | ||
750 | static inline struct ablkcipher_tfm *crypto_ablkcipher_crt( | |
751 | struct crypto_ablkcipher *tfm) | |
752 | { | |
753 | return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher; | |
754 | } | |
755 | ||
f13ec330 SM |
756 | /** |
757 | * crypto_ablkcipher_ivsize() - obtain IV size | |
758 | * @tfm: cipher handle | |
759 | * | |
760 | * The size of the IV for the ablkcipher referenced by the cipher handle is | |
761 | * returned. This IV size may be zero if the cipher does not need an IV. | |
762 | * | |
763 | * Return: IV size in bytes | |
764 | */ | |
32e3983f HX |
765 | static inline unsigned int crypto_ablkcipher_ivsize( |
766 | struct crypto_ablkcipher *tfm) | |
767 | { | |
768 | return crypto_ablkcipher_crt(tfm)->ivsize; | |
769 | } | |
770 | ||
f13ec330 SM |
771 | /** |
772 | * crypto_ablkcipher_blocksize() - obtain block size of cipher | |
773 | * @tfm: cipher handle | |
774 | * | |
775 | * The block size for the ablkcipher referenced with the cipher handle is | |
776 | * returned. The caller may use that information to allocate appropriate | |
777 | * memory for the data returned by the encryption or decryption operation | |
778 | * | |
779 | * Return: block size of cipher | |
780 | */ | |
32e3983f HX |
781 | static inline unsigned int crypto_ablkcipher_blocksize( |
782 | struct crypto_ablkcipher *tfm) | |
783 | { | |
784 | return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm)); | |
785 | } | |
786 | ||
787 | static inline unsigned int crypto_ablkcipher_alignmask( | |
788 | struct crypto_ablkcipher *tfm) | |
789 | { | |
790 | return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm)); | |
791 | } | |
792 | ||
793 | static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm) | |
794 | { | |
795 | return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm)); | |
796 | } | |
797 | ||
798 | static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm, | |
799 | u32 flags) | |
800 | { | |
801 | crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags); | |
802 | } | |
803 | ||
804 | static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm, | |
805 | u32 flags) | |
806 | { | |
807 | crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags); | |
808 | } | |
809 | ||
f13ec330 SM |
810 | /** |
811 | * crypto_ablkcipher_setkey() - set key for cipher | |
812 | * @tfm: cipher handle | |
813 | * @key: buffer holding the key | |
814 | * @keylen: length of the key in bytes | |
815 | * | |
816 | * The caller provided key is set for the ablkcipher referenced by the cipher | |
817 | * handle. | |
818 | * | |
819 | * Note, the key length determines the cipher type. Many block ciphers implement | |
820 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | |
821 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | |
822 | * is performed. | |
823 | * | |
824 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | |
825 | */ | |
32e3983f HX |
826 | static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm, |
827 | const u8 *key, unsigned int keylen) | |
828 | { | |
ecfc4329 HX |
829 | struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm); |
830 | ||
831 | return crt->setkey(crt->base, key, keylen); | |
32e3983f HX |
832 | } |
833 | ||
f13ec330 SM |
834 | /** |
835 | * crypto_ablkcipher_reqtfm() - obtain cipher handle from request | |
836 | * @req: ablkcipher_request out of which the cipher handle is to be obtained | |
837 | * | |
838 | * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request | |
839 | * data structure. | |
840 | * | |
841 | * Return: crypto_ablkcipher handle | |
842 | */ | |
32e3983f HX |
843 | static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm( |
844 | struct ablkcipher_request *req) | |
845 | { | |
846 | return __crypto_ablkcipher_cast(req->base.tfm); | |
847 | } | |
848 | ||
f13ec330 SM |
849 | /** |
850 | * crypto_ablkcipher_encrypt() - encrypt plaintext | |
851 | * @req: reference to the ablkcipher_request handle that holds all information | |
852 | * needed to perform the cipher operation | |
853 | * | |
854 | * Encrypt plaintext data using the ablkcipher_request handle. That data | |
855 | * structure and how it is filled with data is discussed with the | |
856 | * ablkcipher_request_* functions. | |
857 | * | |
858 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | |
859 | */ | |
32e3983f HX |
860 | static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req) |
861 | { | |
862 | struct ablkcipher_tfm *crt = | |
863 | crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); | |
864 | return crt->encrypt(req); | |
865 | } | |
866 | ||
f13ec330 SM |
867 | /** |
868 | * crypto_ablkcipher_decrypt() - decrypt ciphertext | |
869 | * @req: reference to the ablkcipher_request handle that holds all information | |
870 | * needed to perform the cipher operation | |
871 | * | |
872 | * Decrypt ciphertext data using the ablkcipher_request handle. That data | |
873 | * structure and how it is filled with data is discussed with the | |
874 | * ablkcipher_request_* functions. | |
875 | * | |
876 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | |
877 | */ | |
32e3983f HX |
878 | static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req) |
879 | { | |
880 | struct ablkcipher_tfm *crt = | |
881 | crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req)); | |
882 | return crt->decrypt(req); | |
883 | } | |
884 | ||
f13ec330 SM |
885 | /** |
886 | * DOC: Asynchronous Cipher Request Handle | |
887 | * | |
888 | * The ablkcipher_request data structure contains all pointers to data | |
889 | * required for the asynchronous cipher operation. This includes the cipher | |
890 | * handle (which can be used by multiple ablkcipher_request instances), pointer | |
891 | * to plaintext and ciphertext, asynchronous callback function, etc. It acts | |
892 | * as a handle to the ablkcipher_request_* API calls in a similar way as | |
893 | * ablkcipher handle to the crypto_ablkcipher_* API calls. | |
894 | */ | |
895 | ||
896 | /** | |
897 | * crypto_ablkcipher_reqsize() - obtain size of the request data structure | |
898 | * @tfm: cipher handle | |
899 | * | |
900 | * Return: number of bytes | |
901 | */ | |
b16c3a2e HX |
902 | static inline unsigned int crypto_ablkcipher_reqsize( |
903 | struct crypto_ablkcipher *tfm) | |
32e3983f HX |
904 | { |
905 | return crypto_ablkcipher_crt(tfm)->reqsize; | |
906 | } | |
907 | ||
f13ec330 SM |
908 | /** |
909 | * ablkcipher_request_set_tfm() - update cipher handle reference in request | |
910 | * @req: request handle to be modified | |
911 | * @tfm: cipher handle that shall be added to the request handle | |
912 | * | |
913 | * Allow the caller to replace the existing ablkcipher handle in the request | |
914 | * data structure with a different one. | |
915 | */ | |
e196d625 HX |
916 | static inline void ablkcipher_request_set_tfm( |
917 | struct ablkcipher_request *req, struct crypto_ablkcipher *tfm) | |
918 | { | |
ecfc4329 | 919 | req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base); |
e196d625 HX |
920 | } |
921 | ||
b5b7f088 HX |
922 | static inline struct ablkcipher_request *ablkcipher_request_cast( |
923 | struct crypto_async_request *req) | |
924 | { | |
925 | return container_of(req, struct ablkcipher_request, base); | |
926 | } | |
927 | ||
f13ec330 SM |
928 | /** |
929 | * ablkcipher_request_alloc() - allocate request data structure | |
930 | * @tfm: cipher handle to be registered with the request | |
931 | * @gfp: memory allocation flag that is handed to kmalloc by the API call. | |
932 | * | |
933 | * Allocate the request data structure that must be used with the ablkcipher | |
934 | * encrypt and decrypt API calls. During the allocation, the provided ablkcipher | |
935 | * handle is registered in the request data structure. | |
936 | * | |
6eae29e7 | 937 | * Return: allocated request handle in case of success, or NULL if out of memory |
f13ec330 | 938 | */ |
32e3983f HX |
939 | static inline struct ablkcipher_request *ablkcipher_request_alloc( |
940 | struct crypto_ablkcipher *tfm, gfp_t gfp) | |
941 | { | |
942 | struct ablkcipher_request *req; | |
943 | ||
944 | req = kmalloc(sizeof(struct ablkcipher_request) + | |
945 | crypto_ablkcipher_reqsize(tfm), gfp); | |
946 | ||
947 | if (likely(req)) | |
e196d625 | 948 | ablkcipher_request_set_tfm(req, tfm); |
32e3983f HX |
949 | |
950 | return req; | |
951 | } | |
952 | ||
f13ec330 SM |
953 | /** |
954 | * ablkcipher_request_free() - zeroize and free request data structure | |
955 | * @req: request data structure cipher handle to be freed | |
956 | */ | |
32e3983f HX |
957 | static inline void ablkcipher_request_free(struct ablkcipher_request *req) |
958 | { | |
aef73cfc | 959 | kzfree(req); |
32e3983f HX |
960 | } |
961 | ||
f13ec330 SM |
962 | /** |
963 | * ablkcipher_request_set_callback() - set asynchronous callback function | |
964 | * @req: request handle | |
965 | * @flags: specify zero or an ORing of the flags | |
0184cfe7 | 966 | * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and |
f13ec330 SM |
967 | * increase the wait queue beyond the initial maximum size; |
968 | * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep | |
969 | * @compl: callback function pointer to be registered with the request handle | |
970 | * @data: The data pointer refers to memory that is not used by the kernel | |
971 | * crypto API, but provided to the callback function for it to use. Here, | |
972 | * the caller can provide a reference to memory the callback function can | |
973 | * operate on. As the callback function is invoked asynchronously to the | |
974 | * related functionality, it may need to access data structures of the | |
975 | * related functionality which can be referenced using this pointer. The | |
976 | * callback function can access the memory via the "data" field in the | |
977 | * crypto_async_request data structure provided to the callback function. | |
978 | * | |
979 | * This function allows setting the callback function that is triggered once the | |
980 | * cipher operation completes. | |
981 | * | |
982 | * The callback function is registered with the ablkcipher_request handle and | |
0184cfe7 | 983 | * must comply with the following template:: |
f13ec330 SM |
984 | * |
985 | * void callback_function(struct crypto_async_request *req, int error) | |
986 | */ | |
32e3983f HX |
987 | static inline void ablkcipher_request_set_callback( |
988 | struct ablkcipher_request *req, | |
3e3dc25f | 989 | u32 flags, crypto_completion_t compl, void *data) |
32e3983f | 990 | { |
3e3dc25f | 991 | req->base.complete = compl; |
32e3983f HX |
992 | req->base.data = data; |
993 | req->base.flags = flags; | |
994 | } | |
995 | ||
f13ec330 SM |
996 | /** |
997 | * ablkcipher_request_set_crypt() - set data buffers | |
998 | * @req: request handle | |
999 | * @src: source scatter / gather list | |
1000 | * @dst: destination scatter / gather list | |
1001 | * @nbytes: number of bytes to process from @src | |
1002 | * @iv: IV for the cipher operation which must comply with the IV size defined | |
1003 | * by crypto_ablkcipher_ivsize | |
1004 | * | |
1005 | * This function allows setting of the source data and destination data | |
1006 | * scatter / gather lists. | |
1007 | * | |
1008 | * For encryption, the source is treated as the plaintext and the | |
1009 | * destination is the ciphertext. For a decryption operation, the use is | |
379dcfb4 | 1010 | * reversed - the source is the ciphertext and the destination is the plaintext. |
f13ec330 | 1011 | */ |
32e3983f HX |
1012 | static inline void ablkcipher_request_set_crypt( |
1013 | struct ablkcipher_request *req, | |
1014 | struct scatterlist *src, struct scatterlist *dst, | |
1015 | unsigned int nbytes, void *iv) | |
1016 | { | |
1017 | req->src = src; | |
1018 | req->dst = dst; | |
1019 | req->nbytes = nbytes; | |
1020 | req->info = iv; | |
1021 | } | |
1022 | ||
58284f0d SM |
1023 | /** |
1024 | * DOC: Synchronous Block Cipher API | |
1025 | * | |
1026 | * The synchronous block cipher API is used with the ciphers of type | |
1027 | * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto) | |
1028 | * | |
1029 | * Synchronous calls, have a context in the tfm. But since a single tfm can be | |
1030 | * used in multiple calls and in parallel, this info should not be changeable | |
1031 | * (unless a lock is used). This applies, for example, to the symmetric key. | |
1032 | * However, the IV is changeable, so there is an iv field in blkcipher_tfm | |
1033 | * structure for synchronous blkcipher api. So, its the only state info that can | |
1034 | * be kept for synchronous calls without using a big lock across a tfm. | |
1035 | * | |
1036 | * The block cipher API allows the use of a complete cipher, i.e. a cipher | |
1037 | * consisting of a template (a block chaining mode) and a single block cipher | |
1038 | * primitive (e.g. AES). | |
1039 | * | |
1040 | * The plaintext data buffer and the ciphertext data buffer are pointed to | |
1041 | * by using scatter/gather lists. The cipher operation is performed | |
1042 | * on all segments of the provided scatter/gather lists. | |
1043 | * | |
1044 | * The kernel crypto API supports a cipher operation "in-place" which means that | |
1045 | * the caller may provide the same scatter/gather list for the plaintext and | |
1046 | * cipher text. After the completion of the cipher operation, the plaintext | |
1047 | * data is replaced with the ciphertext data in case of an encryption and vice | |
1048 | * versa for a decryption. The caller must ensure that the scatter/gather lists | |
1049 | * for the output data point to sufficiently large buffers, i.e. multiples of | |
1050 | * the block size of the cipher. | |
1051 | */ | |
1052 | ||
5cde0af2 HX |
1053 | static inline struct crypto_blkcipher *__crypto_blkcipher_cast( |
1054 | struct crypto_tfm *tfm) | |
1055 | { | |
1056 | return (struct crypto_blkcipher *)tfm; | |
1057 | } | |
1058 | ||
1059 | static inline struct crypto_blkcipher *crypto_blkcipher_cast( | |
1060 | struct crypto_tfm *tfm) | |
1061 | { | |
1062 | BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER); | |
1063 | return __crypto_blkcipher_cast(tfm); | |
1064 | } | |
1065 | ||
58284f0d SM |
1066 | /** |
1067 | * crypto_alloc_blkcipher() - allocate synchronous block cipher handle | |
1068 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | |
1069 | * blkcipher cipher | |
1070 | * @type: specifies the type of the cipher | |
1071 | * @mask: specifies the mask for the cipher | |
1072 | * | |
1073 | * Allocate a cipher handle for a block cipher. The returned struct | |
1074 | * crypto_blkcipher is the cipher handle that is required for any subsequent | |
1075 | * API invocation for that block cipher. | |
1076 | * | |
1077 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | |
1078 | * of an error, PTR_ERR() returns the error code. | |
1079 | */ | |
5cde0af2 HX |
1080 | static inline struct crypto_blkcipher *crypto_alloc_blkcipher( |
1081 | const char *alg_name, u32 type, u32 mask) | |
1082 | { | |
332f8840 | 1083 | type &= ~CRYPTO_ALG_TYPE_MASK; |
5cde0af2 | 1084 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; |
332f8840 | 1085 | mask |= CRYPTO_ALG_TYPE_MASK; |
5cde0af2 HX |
1086 | |
1087 | return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask)); | |
1088 | } | |
1089 | ||
1090 | static inline struct crypto_tfm *crypto_blkcipher_tfm( | |
1091 | struct crypto_blkcipher *tfm) | |
1092 | { | |
1093 | return &tfm->base; | |
1094 | } | |
1095 | ||
58284f0d SM |
1096 | /** |
1097 | * crypto_free_blkcipher() - zeroize and free the block cipher handle | |
1098 | * @tfm: cipher handle to be freed | |
1099 | */ | |
5cde0af2 HX |
1100 | static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm) |
1101 | { | |
1102 | crypto_free_tfm(crypto_blkcipher_tfm(tfm)); | |
1103 | } | |
1104 | ||
58284f0d SM |
1105 | /** |
1106 | * crypto_has_blkcipher() - Search for the availability of a block cipher | |
1107 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | |
1108 | * block cipher | |
1109 | * @type: specifies the type of the cipher | |
1110 | * @mask: specifies the mask for the cipher | |
1111 | * | |
1112 | * Return: true when the block cipher is known to the kernel crypto API; false | |
1113 | * otherwise | |
1114 | */ | |
fce32d70 HX |
1115 | static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask) |
1116 | { | |
332f8840 | 1117 | type &= ~CRYPTO_ALG_TYPE_MASK; |
fce32d70 | 1118 | type |= CRYPTO_ALG_TYPE_BLKCIPHER; |
332f8840 | 1119 | mask |= CRYPTO_ALG_TYPE_MASK; |
fce32d70 HX |
1120 | |
1121 | return crypto_has_alg(alg_name, type, mask); | |
1122 | } | |
1123 | ||
58284f0d SM |
1124 | /** |
1125 | * crypto_blkcipher_name() - return the name / cra_name from the cipher handle | |
1126 | * @tfm: cipher handle | |
1127 | * | |
1128 | * Return: The character string holding the name of the cipher | |
1129 | */ | |
5cde0af2 HX |
1130 | static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm) |
1131 | { | |
1132 | return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm)); | |
1133 | } | |
1134 | ||
1135 | static inline struct blkcipher_tfm *crypto_blkcipher_crt( | |
1136 | struct crypto_blkcipher *tfm) | |
1137 | { | |
1138 | return &crypto_blkcipher_tfm(tfm)->crt_blkcipher; | |
1139 | } | |
1140 | ||
1141 | static inline struct blkcipher_alg *crypto_blkcipher_alg( | |
1142 | struct crypto_blkcipher *tfm) | |
1143 | { | |
1144 | return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher; | |
1145 | } | |
1146 | ||
58284f0d SM |
1147 | /** |
1148 | * crypto_blkcipher_ivsize() - obtain IV size | |
1149 | * @tfm: cipher handle | |
1150 | * | |
1151 | * The size of the IV for the block cipher referenced by the cipher handle is | |
1152 | * returned. This IV size may be zero if the cipher does not need an IV. | |
1153 | * | |
1154 | * Return: IV size in bytes | |
1155 | */ | |
5cde0af2 HX |
1156 | static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm) |
1157 | { | |
1158 | return crypto_blkcipher_alg(tfm)->ivsize; | |
1159 | } | |
1160 | ||
58284f0d SM |
1161 | /** |
1162 | * crypto_blkcipher_blocksize() - obtain block size of cipher | |
1163 | * @tfm: cipher handle | |
1164 | * | |
1165 | * The block size for the block cipher referenced with the cipher handle is | |
1166 | * returned. The caller may use that information to allocate appropriate | |
1167 | * memory for the data returned by the encryption or decryption operation. | |
1168 | * | |
1169 | * Return: block size of cipher | |
1170 | */ | |
5cde0af2 HX |
1171 | static inline unsigned int crypto_blkcipher_blocksize( |
1172 | struct crypto_blkcipher *tfm) | |
1173 | { | |
1174 | return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm)); | |
1175 | } | |
1176 | ||
1177 | static inline unsigned int crypto_blkcipher_alignmask( | |
1178 | struct crypto_blkcipher *tfm) | |
1179 | { | |
1180 | return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm)); | |
1181 | } | |
1182 | ||
1183 | static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm) | |
1184 | { | |
1185 | return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm)); | |
1186 | } | |
1187 | ||
1188 | static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm, | |
1189 | u32 flags) | |
1190 | { | |
1191 | crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags); | |
1192 | } | |
1193 | ||
1194 | static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm, | |
1195 | u32 flags) | |
1196 | { | |
1197 | crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags); | |
1198 | } | |
1199 | ||
58284f0d SM |
1200 | /** |
1201 | * crypto_blkcipher_setkey() - set key for cipher | |
1202 | * @tfm: cipher handle | |
1203 | * @key: buffer holding the key | |
1204 | * @keylen: length of the key in bytes | |
1205 | * | |
1206 | * The caller provided key is set for the block cipher referenced by the cipher | |
1207 | * handle. | |
1208 | * | |
1209 | * Note, the key length determines the cipher type. Many block ciphers implement | |
1210 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | |
1211 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | |
1212 | * is performed. | |
1213 | * | |
1214 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | |
1215 | */ | |
5cde0af2 HX |
1216 | static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm, |
1217 | const u8 *key, unsigned int keylen) | |
1218 | { | |
1219 | return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm), | |
1220 | key, keylen); | |
1221 | } | |
1222 | ||
58284f0d SM |
1223 | /** |
1224 | * crypto_blkcipher_encrypt() - encrypt plaintext | |
1225 | * @desc: reference to the block cipher handle with meta data | |
1226 | * @dst: scatter/gather list that is filled by the cipher operation with the | |
1227 | * ciphertext | |
1228 | * @src: scatter/gather list that holds the plaintext | |
1229 | * @nbytes: number of bytes of the plaintext to encrypt. | |
1230 | * | |
1231 | * Encrypt plaintext data using the IV set by the caller with a preceding | |
1232 | * call of crypto_blkcipher_set_iv. | |
1233 | * | |
1234 | * The blkcipher_desc data structure must be filled by the caller and can | |
1235 | * reside on the stack. The caller must fill desc as follows: desc.tfm is filled | |
1236 | * with the block cipher handle; desc.flags is filled with either | |
1237 | * CRYPTO_TFM_REQ_MAY_SLEEP or 0. | |
1238 | * | |
1239 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | |
1240 | */ | |
5cde0af2 HX |
1241 | static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc, |
1242 | struct scatterlist *dst, | |
1243 | struct scatterlist *src, | |
1244 | unsigned int nbytes) | |
1245 | { | |
1246 | desc->info = crypto_blkcipher_crt(desc->tfm)->iv; | |
1247 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | |
1248 | } | |
1249 | ||
58284f0d SM |
1250 | /** |
1251 | * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV | |
1252 | * @desc: reference to the block cipher handle with meta data | |
1253 | * @dst: scatter/gather list that is filled by the cipher operation with the | |
1254 | * ciphertext | |
1255 | * @src: scatter/gather list that holds the plaintext | |
1256 | * @nbytes: number of bytes of the plaintext to encrypt. | |
1257 | * | |
1258 | * Encrypt plaintext data with the use of an IV that is solely used for this | |
1259 | * cipher operation. Any previously set IV is not used. | |
1260 | * | |
1261 | * The blkcipher_desc data structure must be filled by the caller and can | |
1262 | * reside on the stack. The caller must fill desc as follows: desc.tfm is filled | |
1263 | * with the block cipher handle; desc.info is filled with the IV to be used for | |
1264 | * the current operation; desc.flags is filled with either | |
1265 | * CRYPTO_TFM_REQ_MAY_SLEEP or 0. | |
1266 | * | |
1267 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | |
1268 | */ | |
5cde0af2 HX |
1269 | static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc, |
1270 | struct scatterlist *dst, | |
1271 | struct scatterlist *src, | |
1272 | unsigned int nbytes) | |
1273 | { | |
1274 | return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes); | |
1275 | } | |
1276 | ||
58284f0d SM |
1277 | /** |
1278 | * crypto_blkcipher_decrypt() - decrypt ciphertext | |
1279 | * @desc: reference to the block cipher handle with meta data | |
1280 | * @dst: scatter/gather list that is filled by the cipher operation with the | |
1281 | * plaintext | |
1282 | * @src: scatter/gather list that holds the ciphertext | |
1283 | * @nbytes: number of bytes of the ciphertext to decrypt. | |
1284 | * | |
1285 | * Decrypt ciphertext data using the IV set by the caller with a preceding | |
1286 | * call of crypto_blkcipher_set_iv. | |
1287 | * | |
1288 | * The blkcipher_desc data structure must be filled by the caller as documented | |
1289 | * for the crypto_blkcipher_encrypt call above. | |
1290 | * | |
1291 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | |
1292 | * | |
1293 | */ | |
5cde0af2 HX |
1294 | static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc, |
1295 | struct scatterlist *dst, | |
1296 | struct scatterlist *src, | |
1297 | unsigned int nbytes) | |
1298 | { | |
1299 | desc->info = crypto_blkcipher_crt(desc->tfm)->iv; | |
1300 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | |
1301 | } | |
1302 | ||
58284f0d SM |
1303 | /** |
1304 | * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV | |
1305 | * @desc: reference to the block cipher handle with meta data | |
1306 | * @dst: scatter/gather list that is filled by the cipher operation with the | |
1307 | * plaintext | |
1308 | * @src: scatter/gather list that holds the ciphertext | |
1309 | * @nbytes: number of bytes of the ciphertext to decrypt. | |
1310 | * | |
1311 | * Decrypt ciphertext data with the use of an IV that is solely used for this | |
1312 | * cipher operation. Any previously set IV is not used. | |
1313 | * | |
1314 | * The blkcipher_desc data structure must be filled by the caller as documented | |
1315 | * for the crypto_blkcipher_encrypt_iv call above. | |
1316 | * | |
1317 | * Return: 0 if the cipher operation was successful; < 0 if an error occurred | |
1318 | */ | |
5cde0af2 HX |
1319 | static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc, |
1320 | struct scatterlist *dst, | |
1321 | struct scatterlist *src, | |
1322 | unsigned int nbytes) | |
1323 | { | |
1324 | return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes); | |
1325 | } | |
1326 | ||
58284f0d SM |
1327 | /** |
1328 | * crypto_blkcipher_set_iv() - set IV for cipher | |
1329 | * @tfm: cipher handle | |
1330 | * @src: buffer holding the IV | |
1331 | * @len: length of the IV in bytes | |
1332 | * | |
1333 | * The caller provided IV is set for the block cipher referenced by the cipher | |
1334 | * handle. | |
1335 | */ | |
5cde0af2 HX |
1336 | static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm, |
1337 | const u8 *src, unsigned int len) | |
1338 | { | |
1339 | memcpy(crypto_blkcipher_crt(tfm)->iv, src, len); | |
1340 | } | |
1341 | ||
58284f0d SM |
1342 | /** |
1343 | * crypto_blkcipher_get_iv() - obtain IV from cipher | |
1344 | * @tfm: cipher handle | |
1345 | * @dst: buffer filled with the IV | |
1346 | * @len: length of the buffer dst | |
1347 | * | |
1348 | * The caller can obtain the IV set for the block cipher referenced by the | |
1349 | * cipher handle and store it into the user-provided buffer. If the buffer | |
1350 | * has an insufficient space, the IV is truncated to fit the buffer. | |
1351 | */ | |
5cde0af2 HX |
1352 | static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm, |
1353 | u8 *dst, unsigned int len) | |
1354 | { | |
1355 | memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len); | |
1356 | } | |
1357 | ||
16e61030 SM |
1358 | /** |
1359 | * DOC: Single Block Cipher API | |
1360 | * | |
1361 | * The single block cipher API is used with the ciphers of type | |
1362 | * CRYPTO_ALG_TYPE_CIPHER (listed as type "cipher" in /proc/crypto). | |
1363 | * | |
1364 | * Using the single block cipher API calls, operations with the basic cipher | |
1365 | * primitive can be implemented. These cipher primitives exclude any block | |
1366 | * chaining operations including IV handling. | |
1367 | * | |
1368 | * The purpose of this single block cipher API is to support the implementation | |
1369 | * of templates or other concepts that only need to perform the cipher operation | |
1370 | * on one block at a time. Templates invoke the underlying cipher primitive | |
1371 | * block-wise and process either the input or the output data of these cipher | |
1372 | * operations. | |
1373 | */ | |
1374 | ||
f28776a3 HX |
1375 | static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm) |
1376 | { | |
1377 | return (struct crypto_cipher *)tfm; | |
1378 | } | |
1379 | ||
1380 | static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm) | |
1381 | { | |
1382 | BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER); | |
1383 | return __crypto_cipher_cast(tfm); | |
1384 | } | |
1385 | ||
16e61030 SM |
1386 | /** |
1387 | * crypto_alloc_cipher() - allocate single block cipher handle | |
1388 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | |
1389 | * single block cipher | |
1390 | * @type: specifies the type of the cipher | |
1391 | * @mask: specifies the mask for the cipher | |
1392 | * | |
1393 | * Allocate a cipher handle for a single block cipher. The returned struct | |
1394 | * crypto_cipher is the cipher handle that is required for any subsequent API | |
1395 | * invocation for that single block cipher. | |
1396 | * | |
1397 | * Return: allocated cipher handle in case of success; IS_ERR() is true in case | |
1398 | * of an error, PTR_ERR() returns the error code. | |
1399 | */ | |
f28776a3 HX |
1400 | static inline struct crypto_cipher *crypto_alloc_cipher(const char *alg_name, |
1401 | u32 type, u32 mask) | |
1402 | { | |
1403 | type &= ~CRYPTO_ALG_TYPE_MASK; | |
1404 | type |= CRYPTO_ALG_TYPE_CIPHER; | |
1405 | mask |= CRYPTO_ALG_TYPE_MASK; | |
1406 | ||
1407 | return __crypto_cipher_cast(crypto_alloc_base(alg_name, type, mask)); | |
1408 | } | |
1409 | ||
1410 | static inline struct crypto_tfm *crypto_cipher_tfm(struct crypto_cipher *tfm) | |
1411 | { | |
78a1fe4f | 1412 | return &tfm->base; |
f28776a3 HX |
1413 | } |
1414 | ||
16e61030 SM |
1415 | /** |
1416 | * crypto_free_cipher() - zeroize and free the single block cipher handle | |
1417 | * @tfm: cipher handle to be freed | |
1418 | */ | |
f28776a3 HX |
1419 | static inline void crypto_free_cipher(struct crypto_cipher *tfm) |
1420 | { | |
1421 | crypto_free_tfm(crypto_cipher_tfm(tfm)); | |
1422 | } | |
1423 | ||
16e61030 SM |
1424 | /** |
1425 | * crypto_has_cipher() - Search for the availability of a single block cipher | |
1426 | * @alg_name: is the cra_name / name or cra_driver_name / driver name of the | |
1427 | * single block cipher | |
1428 | * @type: specifies the type of the cipher | |
1429 | * @mask: specifies the mask for the cipher | |
1430 | * | |
1431 | * Return: true when the single block cipher is known to the kernel crypto API; | |
1432 | * false otherwise | |
1433 | */ | |
fce32d70 HX |
1434 | static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask) |
1435 | { | |
1436 | type &= ~CRYPTO_ALG_TYPE_MASK; | |
1437 | type |= CRYPTO_ALG_TYPE_CIPHER; | |
1438 | mask |= CRYPTO_ALG_TYPE_MASK; | |
1439 | ||
1440 | return crypto_has_alg(alg_name, type, mask); | |
1441 | } | |
1442 | ||
f28776a3 HX |
1443 | static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm) |
1444 | { | |
1445 | return &crypto_cipher_tfm(tfm)->crt_cipher; | |
1446 | } | |
1447 | ||
16e61030 SM |
1448 | /** |
1449 | * crypto_cipher_blocksize() - obtain block size for cipher | |
1450 | * @tfm: cipher handle | |
1451 | * | |
1452 | * The block size for the single block cipher referenced with the cipher handle | |
1453 | * tfm is returned. The caller may use that information to allocate appropriate | |
1454 | * memory for the data returned by the encryption or decryption operation | |
1455 | * | |
1456 | * Return: block size of cipher | |
1457 | */ | |
f28776a3 HX |
1458 | static inline unsigned int crypto_cipher_blocksize(struct crypto_cipher *tfm) |
1459 | { | |
1460 | return crypto_tfm_alg_blocksize(crypto_cipher_tfm(tfm)); | |
1461 | } | |
1462 | ||
1463 | static inline unsigned int crypto_cipher_alignmask(struct crypto_cipher *tfm) | |
1464 | { | |
1465 | return crypto_tfm_alg_alignmask(crypto_cipher_tfm(tfm)); | |
1466 | } | |
1467 | ||
1468 | static inline u32 crypto_cipher_get_flags(struct crypto_cipher *tfm) | |
1469 | { | |
1470 | return crypto_tfm_get_flags(crypto_cipher_tfm(tfm)); | |
1471 | } | |
1472 | ||
1473 | static inline void crypto_cipher_set_flags(struct crypto_cipher *tfm, | |
1474 | u32 flags) | |
1475 | { | |
1476 | crypto_tfm_set_flags(crypto_cipher_tfm(tfm), flags); | |
1477 | } | |
1478 | ||
1479 | static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm, | |
1480 | u32 flags) | |
1481 | { | |
1482 | crypto_tfm_clear_flags(crypto_cipher_tfm(tfm), flags); | |
1483 | } | |
1484 | ||
16e61030 SM |
1485 | /** |
1486 | * crypto_cipher_setkey() - set key for cipher | |
1487 | * @tfm: cipher handle | |
1488 | * @key: buffer holding the key | |
1489 | * @keylen: length of the key in bytes | |
1490 | * | |
1491 | * The caller provided key is set for the single block cipher referenced by the | |
1492 | * cipher handle. | |
1493 | * | |
1494 | * Note, the key length determines the cipher type. Many block ciphers implement | |
1495 | * different cipher modes depending on the key size, such as AES-128 vs AES-192 | |
1496 | * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128 | |
1497 | * is performed. | |
1498 | * | |
1499 | * Return: 0 if the setting of the key was successful; < 0 if an error occurred | |
1500 | */ | |
7226bc87 HX |
1501 | static inline int crypto_cipher_setkey(struct crypto_cipher *tfm, |
1502 | const u8 *key, unsigned int keylen) | |
1503 | { | |
1504 | return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm), | |
1505 | key, keylen); | |
1506 | } | |
1507 | ||
16e61030 SM |
1508 | /** |
1509 | * crypto_cipher_encrypt_one() - encrypt one block of plaintext | |
1510 | * @tfm: cipher handle | |
1511 | * @dst: points to the buffer that will be filled with the ciphertext | |
1512 | * @src: buffer holding the plaintext to be encrypted | |
1513 | * | |
1514 | * Invoke the encryption operation of one block. The caller must ensure that | |
1515 | * the plaintext and ciphertext buffers are at least one block in size. | |
1516 | */ | |
f28776a3 HX |
1517 | static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm, |
1518 | u8 *dst, const u8 *src) | |
1519 | { | |
1520 | crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm), | |
1521 | dst, src); | |
1522 | } | |
1523 | ||
16e61030 SM |
1524 | /** |
1525 | * crypto_cipher_decrypt_one() - decrypt one block of ciphertext | |
1526 | * @tfm: cipher handle | |
1527 | * @dst: points to the buffer that will be filled with the plaintext | |
1528 | * @src: buffer holding the ciphertext to be decrypted | |
1529 | * | |
1530 | * Invoke the decryption operation of one block. The caller must ensure that | |
1531 | * the plaintext and ciphertext buffers are at least one block in size. | |
1532 | */ | |
f28776a3 HX |
1533 | static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm, |
1534 | u8 *dst, const u8 *src) | |
1535 | { | |
1536 | crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm), | |
1537 | dst, src); | |
1538 | } | |
1539 | ||
fce32d70 HX |
1540 | static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm) |
1541 | { | |
1542 | return (struct crypto_comp *)tfm; | |
1543 | } | |
1544 | ||
1545 | static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm) | |
1546 | { | |
1547 | BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) & | |
1548 | CRYPTO_ALG_TYPE_MASK); | |
1549 | return __crypto_comp_cast(tfm); | |
1550 | } | |
1551 | ||
1552 | static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name, | |
1553 | u32 type, u32 mask) | |
1554 | { | |
1555 | type &= ~CRYPTO_ALG_TYPE_MASK; | |
1556 | type |= CRYPTO_ALG_TYPE_COMPRESS; | |
1557 | mask |= CRYPTO_ALG_TYPE_MASK; | |
1558 | ||
1559 | return __crypto_comp_cast(crypto_alloc_base(alg_name, type, mask)); | |
1560 | } | |
1561 | ||
1562 | static inline struct crypto_tfm *crypto_comp_tfm(struct crypto_comp *tfm) | |
1563 | { | |
78a1fe4f | 1564 | return &tfm->base; |
fce32d70 HX |
1565 | } |
1566 | ||
1567 | static inline void crypto_free_comp(struct crypto_comp *tfm) | |
1568 | { | |
1569 | crypto_free_tfm(crypto_comp_tfm(tfm)); | |
1570 | } | |
1571 | ||
1572 | static inline int crypto_has_comp(const char *alg_name, u32 type, u32 mask) | |
1573 | { | |
1574 | type &= ~CRYPTO_ALG_TYPE_MASK; | |
1575 | type |= CRYPTO_ALG_TYPE_COMPRESS; | |
1576 | mask |= CRYPTO_ALG_TYPE_MASK; | |
1577 | ||
1578 | return crypto_has_alg(alg_name, type, mask); | |
1579 | } | |
1580 | ||
e4d5b79c HX |
1581 | static inline const char *crypto_comp_name(struct crypto_comp *tfm) |
1582 | { | |
1583 | return crypto_tfm_alg_name(crypto_comp_tfm(tfm)); | |
1584 | } | |
1585 | ||
fce32d70 HX |
1586 | static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm) |
1587 | { | |
1588 | return &crypto_comp_tfm(tfm)->crt_compress; | |
1589 | } | |
1590 | ||
1591 | static inline int crypto_comp_compress(struct crypto_comp *tfm, | |
1da177e4 LT |
1592 | const u8 *src, unsigned int slen, |
1593 | u8 *dst, unsigned int *dlen) | |
1594 | { | |
78a1fe4f HX |
1595 | return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm), |
1596 | src, slen, dst, dlen); | |
1da177e4 LT |
1597 | } |
1598 | ||
fce32d70 | 1599 | static inline int crypto_comp_decompress(struct crypto_comp *tfm, |
1da177e4 LT |
1600 | const u8 *src, unsigned int slen, |
1601 | u8 *dst, unsigned int *dlen) | |
1602 | { | |
78a1fe4f HX |
1603 | return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm), |
1604 | src, slen, dst, dlen); | |
1da177e4 LT |
1605 | } |
1606 | ||
1da177e4 LT |
1607 | #endif /* _LINUX_CRYPTO_H */ |
1608 |