]>
Commit | Line | Data |
---|---|---|
cac5818c CL |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Crypto user configuration API. | |
4 | * | |
5 | * Copyright (C) 2017-2018 Corentin Labbe <[email protected]> | |
6 | * | |
7 | */ | |
8 | ||
9 | #include <linux/crypto.h> | |
10 | #include <linux/cryptouser.h> | |
11 | #include <linux/sched.h> | |
12 | #include <net/netlink.h> | |
13 | #include <crypto/internal/skcipher.h> | |
14 | #include <crypto/internal/rng.h> | |
15 | #include <crypto/akcipher.h> | |
16 | #include <crypto/kpp.h> | |
17 | #include <crypto/internal/cryptouser.h> | |
18 | ||
19 | #include "internal.h" | |
20 | ||
21 | #define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x)) | |
22 | ||
23 | static DEFINE_MUTEX(crypto_cfg_mutex); | |
24 | ||
25 | extern struct sock *crypto_nlsk; | |
26 | ||
27 | struct crypto_dump_info { | |
28 | struct sk_buff *in_skb; | |
29 | struct sk_buff *out_skb; | |
30 | u32 nlmsg_seq; | |
31 | u16 nlmsg_flags; | |
32 | }; | |
33 | ||
34 | static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg) | |
35 | { | |
36 | struct crypto_stat raead; | |
37 | u64 v64; | |
38 | u32 v32; | |
39 | ||
40 | strncpy(raead.type, "aead", sizeof(raead.type)); | |
41 | ||
42 | v32 = atomic_read(&alg->encrypt_cnt); | |
43 | raead.stat_encrypt_cnt = v32; | |
44 | v64 = atomic64_read(&alg->encrypt_tlen); | |
45 | raead.stat_encrypt_tlen = v64; | |
46 | v32 = atomic_read(&alg->decrypt_cnt); | |
47 | raead.stat_decrypt_cnt = v32; | |
48 | v64 = atomic64_read(&alg->decrypt_tlen); | |
49 | raead.stat_decrypt_tlen = v64; | |
50 | v32 = atomic_read(&alg->aead_err_cnt); | |
51 | raead.stat_aead_err_cnt = v32; | |
52 | ||
53 | if (nla_put(skb, CRYPTOCFGA_STAT_AEAD, | |
54 | sizeof(struct crypto_stat), &raead)) | |
55 | goto nla_put_failure; | |
56 | return 0; | |
57 | ||
58 | nla_put_failure: | |
59 | return -EMSGSIZE; | |
60 | } | |
61 | ||
62 | static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg) | |
63 | { | |
64 | struct crypto_stat rcipher; | |
65 | u64 v64; | |
66 | u32 v32; | |
67 | ||
68 | strlcpy(rcipher.type, "cipher", sizeof(rcipher.type)); | |
69 | ||
70 | v32 = atomic_read(&alg->encrypt_cnt); | |
71 | rcipher.stat_encrypt_cnt = v32; | |
72 | v64 = atomic64_read(&alg->encrypt_tlen); | |
73 | rcipher.stat_encrypt_tlen = v64; | |
74 | v32 = atomic_read(&alg->decrypt_cnt); | |
75 | rcipher.stat_decrypt_cnt = v32; | |
76 | v64 = atomic64_read(&alg->decrypt_tlen); | |
77 | rcipher.stat_decrypt_tlen = v64; | |
78 | v32 = atomic_read(&alg->cipher_err_cnt); | |
79 | rcipher.stat_cipher_err_cnt = v32; | |
80 | ||
81 | if (nla_put(skb, CRYPTOCFGA_STAT_CIPHER, | |
82 | sizeof(struct crypto_stat), &rcipher)) | |
83 | goto nla_put_failure; | |
84 | return 0; | |
85 | ||
86 | nla_put_failure: | |
87 | return -EMSGSIZE; | |
88 | } | |
89 | ||
90 | static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg) | |
91 | { | |
92 | struct crypto_stat rcomp; | |
93 | u64 v64; | |
94 | u32 v32; | |
95 | ||
96 | strlcpy(rcomp.type, "compression", sizeof(rcomp.type)); | |
97 | v32 = atomic_read(&alg->compress_cnt); | |
98 | rcomp.stat_compress_cnt = v32; | |
99 | v64 = atomic64_read(&alg->compress_tlen); | |
100 | rcomp.stat_compress_tlen = v64; | |
101 | v32 = atomic_read(&alg->decompress_cnt); | |
102 | rcomp.stat_decompress_cnt = v32; | |
103 | v64 = atomic64_read(&alg->decompress_tlen); | |
104 | rcomp.stat_decompress_tlen = v64; | |
105 | v32 = atomic_read(&alg->cipher_err_cnt); | |
106 | rcomp.stat_compress_err_cnt = v32; | |
107 | ||
108 | if (nla_put(skb, CRYPTOCFGA_STAT_COMPRESS, | |
109 | sizeof(struct crypto_stat), &rcomp)) | |
110 | goto nla_put_failure; | |
111 | return 0; | |
112 | ||
113 | nla_put_failure: | |
114 | return -EMSGSIZE; | |
115 | } | |
116 | ||
117 | static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg) | |
118 | { | |
119 | struct crypto_stat racomp; | |
120 | u64 v64; | |
121 | u32 v32; | |
122 | ||
123 | strlcpy(racomp.type, "acomp", sizeof(racomp.type)); | |
124 | v32 = atomic_read(&alg->compress_cnt); | |
125 | racomp.stat_compress_cnt = v32; | |
126 | v64 = atomic64_read(&alg->compress_tlen); | |
127 | racomp.stat_compress_tlen = v64; | |
128 | v32 = atomic_read(&alg->decompress_cnt); | |
129 | racomp.stat_decompress_cnt = v32; | |
130 | v64 = atomic64_read(&alg->decompress_tlen); | |
131 | racomp.stat_decompress_tlen = v64; | |
132 | v32 = atomic_read(&alg->cipher_err_cnt); | |
133 | racomp.stat_compress_err_cnt = v32; | |
134 | ||
135 | if (nla_put(skb, CRYPTOCFGA_STAT_ACOMP, | |
136 | sizeof(struct crypto_stat), &racomp)) | |
137 | goto nla_put_failure; | |
138 | return 0; | |
139 | ||
140 | nla_put_failure: | |
141 | return -EMSGSIZE; | |
142 | } | |
143 | ||
144 | static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg) | |
145 | { | |
146 | struct crypto_stat rakcipher; | |
147 | u64 v64; | |
148 | u32 v32; | |
149 | ||
150 | strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type)); | |
151 | v32 = atomic_read(&alg->encrypt_cnt); | |
152 | rakcipher.stat_encrypt_cnt = v32; | |
153 | v64 = atomic64_read(&alg->encrypt_tlen); | |
154 | rakcipher.stat_encrypt_tlen = v64; | |
155 | v32 = atomic_read(&alg->decrypt_cnt); | |
156 | rakcipher.stat_decrypt_cnt = v32; | |
157 | v64 = atomic64_read(&alg->decrypt_tlen); | |
158 | rakcipher.stat_decrypt_tlen = v64; | |
159 | v32 = atomic_read(&alg->sign_cnt); | |
160 | rakcipher.stat_sign_cnt = v32; | |
161 | v32 = atomic_read(&alg->verify_cnt); | |
162 | rakcipher.stat_verify_cnt = v32; | |
163 | v32 = atomic_read(&alg->akcipher_err_cnt); | |
164 | rakcipher.stat_akcipher_err_cnt = v32; | |
165 | ||
166 | if (nla_put(skb, CRYPTOCFGA_STAT_AKCIPHER, | |
167 | sizeof(struct crypto_stat), &rakcipher)) | |
168 | goto nla_put_failure; | |
169 | return 0; | |
170 | ||
171 | nla_put_failure: | |
172 | return -EMSGSIZE; | |
173 | } | |
174 | ||
175 | static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg) | |
176 | { | |
177 | struct crypto_stat rkpp; | |
178 | u32 v; | |
179 | ||
180 | strlcpy(rkpp.type, "kpp", sizeof(rkpp.type)); | |
181 | ||
182 | v = atomic_read(&alg->setsecret_cnt); | |
183 | rkpp.stat_setsecret_cnt = v; | |
184 | v = atomic_read(&alg->generate_public_key_cnt); | |
185 | rkpp.stat_generate_public_key_cnt = v; | |
186 | v = atomic_read(&alg->compute_shared_secret_cnt); | |
187 | rkpp.stat_compute_shared_secret_cnt = v; | |
188 | v = atomic_read(&alg->kpp_err_cnt); | |
189 | rkpp.stat_kpp_err_cnt = v; | |
190 | ||
191 | if (nla_put(skb, CRYPTOCFGA_STAT_KPP, | |
192 | sizeof(struct crypto_stat), &rkpp)) | |
193 | goto nla_put_failure; | |
194 | return 0; | |
195 | ||
196 | nla_put_failure: | |
197 | return -EMSGSIZE; | |
198 | } | |
199 | ||
200 | static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg) | |
201 | { | |
202 | struct crypto_stat rhash; | |
203 | u64 v64; | |
204 | u32 v32; | |
205 | ||
206 | strncpy(rhash.type, "ahash", sizeof(rhash.type)); | |
207 | ||
208 | v32 = atomic_read(&alg->hash_cnt); | |
209 | rhash.stat_hash_cnt = v32; | |
210 | v64 = atomic64_read(&alg->hash_tlen); | |
211 | rhash.stat_hash_tlen = v64; | |
212 | v32 = atomic_read(&alg->hash_err_cnt); | |
213 | rhash.stat_hash_err_cnt = v32; | |
214 | ||
215 | if (nla_put(skb, CRYPTOCFGA_STAT_HASH, | |
216 | sizeof(struct crypto_stat), &rhash)) | |
217 | goto nla_put_failure; | |
218 | return 0; | |
219 | ||
220 | nla_put_failure: | |
221 | return -EMSGSIZE; | |
222 | } | |
223 | ||
224 | static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg) | |
225 | { | |
226 | struct crypto_stat rhash; | |
227 | u64 v64; | |
228 | u32 v32; | |
229 | ||
230 | strncpy(rhash.type, "shash", sizeof(rhash.type)); | |
231 | ||
232 | v32 = atomic_read(&alg->hash_cnt); | |
233 | rhash.stat_hash_cnt = v32; | |
234 | v64 = atomic64_read(&alg->hash_tlen); | |
235 | rhash.stat_hash_tlen = v64; | |
236 | v32 = atomic_read(&alg->hash_err_cnt); | |
237 | rhash.stat_hash_err_cnt = v32; | |
238 | ||
239 | if (nla_put(skb, CRYPTOCFGA_STAT_HASH, | |
240 | sizeof(struct crypto_stat), &rhash)) | |
241 | goto nla_put_failure; | |
242 | return 0; | |
243 | ||
244 | nla_put_failure: | |
245 | return -EMSGSIZE; | |
246 | } | |
247 | ||
248 | static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg) | |
249 | { | |
250 | struct crypto_stat rrng; | |
251 | u64 v64; | |
252 | u32 v32; | |
253 | ||
254 | strncpy(rrng.type, "rng", sizeof(rrng.type)); | |
255 | ||
256 | v32 = atomic_read(&alg->generate_cnt); | |
257 | rrng.stat_generate_cnt = v32; | |
258 | v64 = atomic64_read(&alg->generate_tlen); | |
259 | rrng.stat_generate_tlen = v64; | |
260 | v32 = atomic_read(&alg->seed_cnt); | |
261 | rrng.stat_seed_cnt = v32; | |
262 | v32 = atomic_read(&alg->hash_err_cnt); | |
263 | rrng.stat_rng_err_cnt = v32; | |
264 | ||
265 | if (nla_put(skb, CRYPTOCFGA_STAT_RNG, | |
266 | sizeof(struct crypto_stat), &rrng)) | |
267 | goto nla_put_failure; | |
268 | return 0; | |
269 | ||
270 | nla_put_failure: | |
271 | return -EMSGSIZE; | |
272 | } | |
273 | ||
274 | static int crypto_reportstat_one(struct crypto_alg *alg, | |
275 | struct crypto_user_alg *ualg, | |
276 | struct sk_buff *skb) | |
277 | { | |
278 | strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name)); | |
279 | strlcpy(ualg->cru_driver_name, alg->cra_driver_name, | |
280 | sizeof(ualg->cru_driver_name)); | |
281 | strlcpy(ualg->cru_module_name, module_name(alg->cra_module), | |
282 | sizeof(ualg->cru_module_name)); | |
283 | ||
284 | ualg->cru_type = 0; | |
285 | ualg->cru_mask = 0; | |
286 | ualg->cru_flags = alg->cra_flags; | |
287 | ualg->cru_refcnt = refcount_read(&alg->cra_refcnt); | |
288 | ||
289 | if (nla_put_u32(skb, CRYPTOCFGA_PRIORITY_VAL, alg->cra_priority)) | |
290 | goto nla_put_failure; | |
291 | if (alg->cra_flags & CRYPTO_ALG_LARVAL) { | |
292 | struct crypto_stat rl; | |
293 | ||
294 | strlcpy(rl.type, "larval", sizeof(rl.type)); | |
295 | if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL, | |
296 | sizeof(struct crypto_stat), &rl)) | |
297 | goto nla_put_failure; | |
298 | goto out; | |
299 | } | |
300 | ||
301 | switch (alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL)) { | |
302 | case CRYPTO_ALG_TYPE_AEAD: | |
303 | if (crypto_report_aead(skb, alg)) | |
304 | goto nla_put_failure; | |
305 | break; | |
306 | case CRYPTO_ALG_TYPE_SKCIPHER: | |
307 | if (crypto_report_cipher(skb, alg)) | |
308 | goto nla_put_failure; | |
309 | break; | |
310 | case CRYPTO_ALG_TYPE_BLKCIPHER: | |
311 | if (crypto_report_cipher(skb, alg)) | |
312 | goto nla_put_failure; | |
313 | break; | |
314 | case CRYPTO_ALG_TYPE_CIPHER: | |
315 | if (crypto_report_cipher(skb, alg)) | |
316 | goto nla_put_failure; | |
317 | break; | |
318 | case CRYPTO_ALG_TYPE_COMPRESS: | |
319 | if (crypto_report_comp(skb, alg)) | |
320 | goto nla_put_failure; | |
321 | break; | |
322 | case CRYPTO_ALG_TYPE_ACOMPRESS: | |
323 | if (crypto_report_acomp(skb, alg)) | |
324 | goto nla_put_failure; | |
325 | break; | |
326 | case CRYPTO_ALG_TYPE_SCOMPRESS: | |
327 | if (crypto_report_acomp(skb, alg)) | |
328 | goto nla_put_failure; | |
329 | break; | |
330 | case CRYPTO_ALG_TYPE_AKCIPHER: | |
331 | if (crypto_report_akcipher(skb, alg)) | |
332 | goto nla_put_failure; | |
333 | break; | |
334 | case CRYPTO_ALG_TYPE_KPP: | |
335 | if (crypto_report_kpp(skb, alg)) | |
336 | goto nla_put_failure; | |
337 | break; | |
338 | case CRYPTO_ALG_TYPE_AHASH: | |
339 | if (crypto_report_ahash(skb, alg)) | |
340 | goto nla_put_failure; | |
341 | break; | |
342 | case CRYPTO_ALG_TYPE_HASH: | |
343 | if (crypto_report_shash(skb, alg)) | |
344 | goto nla_put_failure; | |
345 | break; | |
346 | case CRYPTO_ALG_TYPE_RNG: | |
347 | if (crypto_report_rng(skb, alg)) | |
348 | goto nla_put_failure; | |
349 | break; | |
350 | default: | |
351 | pr_err("ERROR: Unhandled alg %d in %s\n", | |
352 | alg->cra_flags & (CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_LARVAL), | |
353 | __func__); | |
354 | } | |
355 | ||
356 | out: | |
357 | return 0; | |
358 | ||
359 | nla_put_failure: | |
360 | return -EMSGSIZE; | |
361 | } | |
362 | ||
363 | static int crypto_reportstat_alg(struct crypto_alg *alg, | |
364 | struct crypto_dump_info *info) | |
365 | { | |
366 | struct sk_buff *in_skb = info->in_skb; | |
367 | struct sk_buff *skb = info->out_skb; | |
368 | struct nlmsghdr *nlh; | |
369 | struct crypto_user_alg *ualg; | |
370 | int err = 0; | |
371 | ||
372 | nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, info->nlmsg_seq, | |
373 | CRYPTO_MSG_GETSTAT, sizeof(*ualg), info->nlmsg_flags); | |
374 | if (!nlh) { | |
375 | err = -EMSGSIZE; | |
376 | goto out; | |
377 | } | |
378 | ||
379 | ualg = nlmsg_data(nlh); | |
380 | ||
381 | err = crypto_reportstat_one(alg, ualg, skb); | |
382 | if (err) { | |
383 | nlmsg_cancel(skb, nlh); | |
384 | goto out; | |
385 | } | |
386 | ||
387 | nlmsg_end(skb, nlh); | |
388 | ||
389 | out: | |
390 | return err; | |
391 | } | |
392 | ||
393 | int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh, | |
394 | struct nlattr **attrs) | |
395 | { | |
396 | struct crypto_user_alg *p = nlmsg_data(in_nlh); | |
397 | struct crypto_alg *alg; | |
398 | struct sk_buff *skb; | |
399 | struct crypto_dump_info info; | |
400 | int err; | |
401 | ||
402 | if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name)) | |
403 | return -EINVAL; | |
404 | ||
405 | alg = crypto_alg_match(p, 0); | |
406 | if (!alg) | |
407 | return -ENOENT; | |
408 | ||
409 | err = -ENOMEM; | |
410 | skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); | |
411 | if (!skb) | |
412 | goto drop_alg; | |
413 | ||
414 | info.in_skb = in_skb; | |
415 | info.out_skb = skb; | |
416 | info.nlmsg_seq = in_nlh->nlmsg_seq; | |
417 | info.nlmsg_flags = 0; | |
418 | ||
419 | err = crypto_reportstat_alg(alg, &info); | |
420 | ||
421 | drop_alg: | |
422 | crypto_mod_put(alg); | |
423 | ||
424 | if (err) | |
425 | return err; | |
426 | ||
427 | return nlmsg_unicast(crypto_nlsk, skb, NETLINK_CB(in_skb).portid); | |
428 | } | |
429 | ||
430 | int crypto_dump_reportstat(struct sk_buff *skb, struct netlink_callback *cb) | |
431 | { | |
432 | struct crypto_alg *alg; | |
433 | struct crypto_dump_info info; | |
434 | int err; | |
435 | ||
436 | if (cb->args[0]) | |
437 | goto out; | |
438 | ||
439 | cb->args[0] = 1; | |
440 | ||
441 | info.in_skb = cb->skb; | |
442 | info.out_skb = skb; | |
443 | info.nlmsg_seq = cb->nlh->nlmsg_seq; | |
444 | info.nlmsg_flags = NLM_F_MULTI; | |
445 | ||
446 | list_for_each_entry(alg, &crypto_alg_list, cra_list) { | |
447 | err = crypto_reportstat_alg(alg, &info); | |
448 | if (err) | |
449 | goto out_err; | |
450 | } | |
451 | ||
452 | out: | |
453 | return skb->len; | |
454 | out_err: | |
455 | return err; | |
456 | } | |
457 | ||
458 | int crypto_dump_reportstat_done(struct netlink_callback *cb) | |
459 | { | |
460 | return 0; | |
461 | } | |
462 | ||
463 | MODULE_LICENSE("GPL"); |