]> Git Repo - linux.git/blob - drivers/s390/crypto/zcrypt_api.c
arm64: avoid prototype warnings for syscalls
[linux.git] / drivers / s390 / crypto / zcrypt_api.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *  Copyright IBM Corp. 2001, 2018
4  *  Author(s): Robert Burroughs
5  *             Eric Rossman ([email protected])
6  *             Cornelia Huck <[email protected]>
7  *
8  *  Hotplug & misc device support: Jochen Roehrig ([email protected])
9  *  Major cleanup & driver split: Martin Schwidefsky <[email protected]>
10  *                                Ralph Wuerthner <[email protected]>
11  *  MSGTYPE restruct:             Holger Dengler <[email protected]>
12  *  Multiple device nodes: Harald Freudenberger <[email protected]>
13  */
14
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/miscdevice.h>
19 #include <linux/fs.h>
20 #include <linux/compat.h>
21 #include <linux/slab.h>
22 #include <linux/atomic.h>
23 #include <linux/uaccess.h>
24 #include <linux/hw_random.h>
25 #include <linux/debugfs.h>
26 #include <linux/cdev.h>
27 #include <linux/ctype.h>
28 #include <linux/capability.h>
29 #include <asm/debug.h>
30
31 #define CREATE_TRACE_POINTS
32 #include <asm/trace/zcrypt.h>
33
34 #include "zcrypt_api.h"
35 #include "zcrypt_debug.h"
36
37 #include "zcrypt_msgtype6.h"
38 #include "zcrypt_msgtype50.h"
39 #include "zcrypt_ccamisc.h"
40 #include "zcrypt_ep11misc.h"
41
42 /*
43  * Module description.
44  */
45 MODULE_AUTHOR("IBM Corporation");
46 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
47                    "Copyright IBM Corp. 2001, 2012");
48 MODULE_LICENSE("GPL");
49
50 /*
51  * zcrypt tracepoint functions
52  */
53 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
54 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
55
56 DEFINE_SPINLOCK(zcrypt_list_lock);
57 LIST_HEAD(zcrypt_card_list);
58
59 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
60 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
61
62 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
63 EXPORT_SYMBOL(zcrypt_rescan_req);
64
65 static LIST_HEAD(zcrypt_ops_list);
66
67 /* Zcrypt related debug feature stuff. */
68 debug_info_t *zcrypt_dbf_info;
69
70 /*
71  * Process a rescan of the transport layer.
72  *
73  * Returns 1, if the rescan has been processed, otherwise 0.
74  */
75 static inline int zcrypt_process_rescan(void)
76 {
77         if (atomic_read(&zcrypt_rescan_req)) {
78                 atomic_set(&zcrypt_rescan_req, 0);
79                 atomic_inc(&zcrypt_rescan_count);
80                 ap_bus_force_rescan();
81                 ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__,
82                                 atomic_inc_return(&zcrypt_rescan_count));
83                 return 1;
84         }
85         return 0;
86 }
87
88 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
89 {
90         list_add_tail(&zops->list, &zcrypt_ops_list);
91 }
92
93 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
94 {
95         list_del_init(&zops->list);
96 }
97
98 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
99 {
100         struct zcrypt_ops *zops;
101
102         list_for_each_entry(zops, &zcrypt_ops_list, list)
103                 if (zops->variant == variant &&
104                     (!strncmp(zops->name, name, sizeof(zops->name))))
105                         return zops;
106         return NULL;
107 }
108 EXPORT_SYMBOL(zcrypt_msgtype);
109
110 /*
111  * Multi device nodes extension functions.
112  */
113
114 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
115
116 struct zcdn_device;
117
118 static struct class *zcrypt_class;
119 static dev_t zcrypt_devt;
120 static struct cdev zcrypt_cdev;
121
122 struct zcdn_device {
123         struct device device;
124         struct ap_perms perms;
125 };
126
127 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
128
129 #define ZCDN_MAX_NAME 32
130
131 static int zcdn_create(const char *name);
132 static int zcdn_destroy(const char *name);
133
134 /*
135  * Find zcdn device by name.
136  * Returns reference to the zcdn device which needs to be released
137  * with put_device() after use.
138  */
139 static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
140 {
141         struct device *dev = class_find_device_by_name(zcrypt_class, name);
142
143         return dev ? to_zcdn_dev(dev) : NULL;
144 }
145
146 /*
147  * Find zcdn device by devt value.
148  * Returns reference to the zcdn device which needs to be released
149  * with put_device() after use.
150  */
151 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
152 {
153         struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
154
155         return dev ? to_zcdn_dev(dev) : NULL;
156 }
157
158 static ssize_t ioctlmask_show(struct device *dev,
159                               struct device_attribute *attr,
160                               char *buf)
161 {
162         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
163         int i, n;
164
165         if (mutex_lock_interruptible(&ap_perms_mutex))
166                 return -ERESTARTSYS;
167
168         n = sysfs_emit(buf, "0x");
169         for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
170                 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
171         n += sysfs_emit_at(buf, n, "\n");
172
173         mutex_unlock(&ap_perms_mutex);
174
175         return n;
176 }
177
178 static ssize_t ioctlmask_store(struct device *dev,
179                                struct device_attribute *attr,
180                                const char *buf, size_t count)
181 {
182         int rc;
183         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
184
185         rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
186                                AP_IOCTLS, &ap_perms_mutex);
187         if (rc)
188                 return rc;
189
190         return count;
191 }
192
193 static DEVICE_ATTR_RW(ioctlmask);
194
195 static ssize_t apmask_show(struct device *dev,
196                            struct device_attribute *attr,
197                            char *buf)
198 {
199         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
200         int i, n;
201
202         if (mutex_lock_interruptible(&ap_perms_mutex))
203                 return -ERESTARTSYS;
204
205         n = sysfs_emit(buf, "0x");
206         for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
207                 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
208         n += sysfs_emit_at(buf, n, "\n");
209
210         mutex_unlock(&ap_perms_mutex);
211
212         return n;
213 }
214
215 static ssize_t apmask_store(struct device *dev,
216                             struct device_attribute *attr,
217                             const char *buf, size_t count)
218 {
219         int rc;
220         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
221
222         rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
223                                AP_DEVICES, &ap_perms_mutex);
224         if (rc)
225                 return rc;
226
227         return count;
228 }
229
230 static DEVICE_ATTR_RW(apmask);
231
232 static ssize_t aqmask_show(struct device *dev,
233                            struct device_attribute *attr,
234                            char *buf)
235 {
236         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
237         int i, n;
238
239         if (mutex_lock_interruptible(&ap_perms_mutex))
240                 return -ERESTARTSYS;
241
242         n = sysfs_emit(buf, "0x");
243         for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
244                 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
245         n += sysfs_emit_at(buf, n, "\n");
246
247         mutex_unlock(&ap_perms_mutex);
248
249         return n;
250 }
251
252 static ssize_t aqmask_store(struct device *dev,
253                             struct device_attribute *attr,
254                             const char *buf, size_t count)
255 {
256         int rc;
257         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
258
259         rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
260                                AP_DOMAINS, &ap_perms_mutex);
261         if (rc)
262                 return rc;
263
264         return count;
265 }
266
267 static DEVICE_ATTR_RW(aqmask);
268
269 static ssize_t admask_show(struct device *dev,
270                            struct device_attribute *attr,
271                            char *buf)
272 {
273         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
274         int i, n;
275
276         if (mutex_lock_interruptible(&ap_perms_mutex))
277                 return -ERESTARTSYS;
278
279         n = sysfs_emit(buf, "0x");
280         for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
281                 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
282         n += sysfs_emit_at(buf, n, "\n");
283
284         mutex_unlock(&ap_perms_mutex);
285
286         return n;
287 }
288
289 static ssize_t admask_store(struct device *dev,
290                             struct device_attribute *attr,
291                             const char *buf, size_t count)
292 {
293         int rc;
294         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
295
296         rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
297                                AP_DOMAINS, &ap_perms_mutex);
298         if (rc)
299                 return rc;
300
301         return count;
302 }
303
304 static DEVICE_ATTR_RW(admask);
305
306 static struct attribute *zcdn_dev_attrs[] = {
307         &dev_attr_ioctlmask.attr,
308         &dev_attr_apmask.attr,
309         &dev_attr_aqmask.attr,
310         &dev_attr_admask.attr,
311         NULL
312 };
313
314 static struct attribute_group zcdn_dev_attr_group = {
315         .attrs = zcdn_dev_attrs
316 };
317
318 static const struct attribute_group *zcdn_dev_attr_groups[] = {
319         &zcdn_dev_attr_group,
320         NULL
321 };
322
323 static ssize_t zcdn_create_store(const struct class *class,
324                                  const struct class_attribute *attr,
325                                  const char *buf, size_t count)
326 {
327         int rc;
328         char name[ZCDN_MAX_NAME];
329
330         strscpy(name, skip_spaces(buf), sizeof(name));
331
332         rc = zcdn_create(strim(name));
333
334         return rc ? rc : count;
335 }
336
337 static const struct class_attribute class_attr_zcdn_create =
338         __ATTR(create, 0600, NULL, zcdn_create_store);
339
340 static ssize_t zcdn_destroy_store(const struct class *class,
341                                   const struct class_attribute *attr,
342                                   const char *buf, size_t count)
343 {
344         int rc;
345         char name[ZCDN_MAX_NAME];
346
347         strscpy(name, skip_spaces(buf), sizeof(name));
348
349         rc = zcdn_destroy(strim(name));
350
351         return rc ? rc : count;
352 }
353
354 static const struct class_attribute class_attr_zcdn_destroy =
355         __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
356
357 static void zcdn_device_release(struct device *dev)
358 {
359         struct zcdn_device *zcdndev = to_zcdn_dev(dev);
360
361         ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
362                         __func__, MAJOR(dev->devt), MINOR(dev->devt));
363
364         kfree(zcdndev);
365 }
366
367 static int zcdn_create(const char *name)
368 {
369         dev_t devt;
370         int i, rc = 0;
371         char nodename[ZCDN_MAX_NAME];
372         struct zcdn_device *zcdndev;
373
374         if (mutex_lock_interruptible(&ap_perms_mutex))
375                 return -ERESTARTSYS;
376
377         /* check if device node with this name already exists */
378         if (name[0]) {
379                 zcdndev = find_zcdndev_by_name(name);
380                 if (zcdndev) {
381                         put_device(&zcdndev->device);
382                         rc = -EEXIST;
383                         goto unlockout;
384                 }
385         }
386
387         /* find an unused minor number */
388         for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
389                 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
390                 zcdndev = find_zcdndev_by_devt(devt);
391                 if (zcdndev)
392                         put_device(&zcdndev->device);
393                 else
394                         break;
395         }
396         if (i == ZCRYPT_MAX_MINOR_NODES) {
397                 rc = -ENOSPC;
398                 goto unlockout;
399         }
400
401         /* alloc and prepare a new zcdn device */
402         zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
403         if (!zcdndev) {
404                 rc = -ENOMEM;
405                 goto unlockout;
406         }
407         zcdndev->device.release = zcdn_device_release;
408         zcdndev->device.class = zcrypt_class;
409         zcdndev->device.devt = devt;
410         zcdndev->device.groups = zcdn_dev_attr_groups;
411         if (name[0])
412                 strncpy(nodename, name, sizeof(nodename));
413         else
414                 snprintf(nodename, sizeof(nodename),
415                          ZCRYPT_NAME "_%d", (int)MINOR(devt));
416         nodename[sizeof(nodename) - 1] = '\0';
417         if (dev_set_name(&zcdndev->device, nodename)) {
418                 rc = -EINVAL;
419                 goto unlockout;
420         }
421         rc = device_register(&zcdndev->device);
422         if (rc) {
423                 put_device(&zcdndev->device);
424                 goto unlockout;
425         }
426
427         ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
428                         __func__, MAJOR(devt), MINOR(devt));
429
430 unlockout:
431         mutex_unlock(&ap_perms_mutex);
432         return rc;
433 }
434
435 static int zcdn_destroy(const char *name)
436 {
437         int rc = 0;
438         struct zcdn_device *zcdndev;
439
440         if (mutex_lock_interruptible(&ap_perms_mutex))
441                 return -ERESTARTSYS;
442
443         /* try to find this zcdn device */
444         zcdndev = find_zcdndev_by_name(name);
445         if (!zcdndev) {
446                 rc = -ENOENT;
447                 goto unlockout;
448         }
449
450         /*
451          * The zcdn device is not hard destroyed. It is subject to
452          * reference counting and thus just needs to be unregistered.
453          */
454         put_device(&zcdndev->device);
455         device_unregister(&zcdndev->device);
456
457 unlockout:
458         mutex_unlock(&ap_perms_mutex);
459         return rc;
460 }
461
462 static void zcdn_destroy_all(void)
463 {
464         int i;
465         dev_t devt;
466         struct zcdn_device *zcdndev;
467
468         mutex_lock(&ap_perms_mutex);
469         for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
470                 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
471                 zcdndev = find_zcdndev_by_devt(devt);
472                 if (zcdndev) {
473                         put_device(&zcdndev->device);
474                         device_unregister(&zcdndev->device);
475                 }
476         }
477         mutex_unlock(&ap_perms_mutex);
478 }
479
480 #endif
481
482 /*
483  * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
484  *
485  * This function is not supported beyond zcrypt 1.3.1.
486  */
487 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
488                            size_t count, loff_t *f_pos)
489 {
490         return -EPERM;
491 }
492
493 /*
494  * zcrypt_write(): Not allowed.
495  *
496  * Write is not allowed
497  */
498 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
499                             size_t count, loff_t *f_pos)
500 {
501         return -EPERM;
502 }
503
504 /*
505  * zcrypt_open(): Count number of users.
506  *
507  * Device open function to count number of users.
508  */
509 static int zcrypt_open(struct inode *inode, struct file *filp)
510 {
511         struct ap_perms *perms = &ap_perms;
512
513 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
514         if (filp->f_inode->i_cdev == &zcrypt_cdev) {
515                 struct zcdn_device *zcdndev;
516
517                 if (mutex_lock_interruptible(&ap_perms_mutex))
518                         return -ERESTARTSYS;
519                 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
520                 /* find returns a reference, no get_device() needed */
521                 mutex_unlock(&ap_perms_mutex);
522                 if (zcdndev)
523                         perms = &zcdndev->perms;
524         }
525 #endif
526         filp->private_data = (void *)perms;
527
528         atomic_inc(&zcrypt_open_count);
529         return stream_open(inode, filp);
530 }
531
532 /*
533  * zcrypt_release(): Count number of users.
534  *
535  * Device close function to count number of users.
536  */
537 static int zcrypt_release(struct inode *inode, struct file *filp)
538 {
539 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
540         if (filp->f_inode->i_cdev == &zcrypt_cdev) {
541                 struct zcdn_device *zcdndev;
542
543                 mutex_lock(&ap_perms_mutex);
544                 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
545                 mutex_unlock(&ap_perms_mutex);
546                 if (zcdndev) {
547                         /* 2 puts here: one for find, one for open */
548                         put_device(&zcdndev->device);
549                         put_device(&zcdndev->device);
550                 }
551         }
552 #endif
553
554         atomic_dec(&zcrypt_open_count);
555         return 0;
556 }
557
558 static inline int zcrypt_check_ioctl(struct ap_perms *perms,
559                                      unsigned int cmd)
560 {
561         int rc = -EPERM;
562         int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
563
564         if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
565                 if (test_bit_inv(ioctlnr, perms->ioctlm))
566                         rc = 0;
567         }
568
569         if (rc)
570                 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
571                                 __func__, ioctlnr, rc);
572
573         return rc;
574 }
575
576 static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
577 {
578         return test_bit_inv(card, perms->apm) ? true : false;
579 }
580
581 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
582 {
583         return test_bit_inv(queue, perms->aqm) ? true : false;
584 }
585
586 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
587                                                      struct zcrypt_queue *zq,
588                                                      struct module **pmod,
589                                                      unsigned int weight)
590 {
591         if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
592                 return NULL;
593         zcrypt_queue_get(zq);
594         get_device(&zq->queue->ap_dev.device);
595         atomic_add(weight, &zc->load);
596         atomic_add(weight, &zq->load);
597         zq->request_count++;
598         *pmod = zq->queue->ap_dev.device.driver->owner;
599         return zq;
600 }
601
602 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
603                                      struct zcrypt_queue *zq,
604                                      struct module *mod,
605                                      unsigned int weight)
606 {
607         zq->request_count--;
608         atomic_sub(weight, &zc->load);
609         atomic_sub(weight, &zq->load);
610         put_device(&zq->queue->ap_dev.device);
611         zcrypt_queue_put(zq);
612         module_put(mod);
613 }
614
615 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
616                                        struct zcrypt_card *pref_zc,
617                                        unsigned int weight,
618                                        unsigned int pref_weight)
619 {
620         if (!pref_zc)
621                 return true;
622         weight += atomic_read(&zc->load);
623         pref_weight += atomic_read(&pref_zc->load);
624         if (weight == pref_weight)
625                 return atomic64_read(&zc->card->total_request_count) <
626                         atomic64_read(&pref_zc->card->total_request_count);
627         return weight < pref_weight;
628 }
629
630 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
631                                         struct zcrypt_queue *pref_zq,
632                                         unsigned int weight,
633                                         unsigned int pref_weight)
634 {
635         if (!pref_zq)
636                 return true;
637         weight += atomic_read(&zq->load);
638         pref_weight += atomic_read(&pref_zq->load);
639         if (weight == pref_weight)
640                 return zq->queue->total_request_count <
641                         pref_zq->queue->total_request_count;
642         return weight < pref_weight;
643 }
644
645 /*
646  * zcrypt ioctls.
647  */
648 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
649                                struct zcrypt_track *tr,
650                                struct ica_rsa_modexpo *mex)
651 {
652         struct zcrypt_card *zc, *pref_zc;
653         struct zcrypt_queue *zq, *pref_zq;
654         struct ap_message ap_msg;
655         unsigned int wgt = 0, pref_wgt = 0;
656         unsigned int func_code;
657         int cpen, qpen, qid = 0, rc = -ENODEV;
658         struct module *mod;
659
660         trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
661
662         ap_init_message(&ap_msg);
663
664 #ifdef CONFIG_ZCRYPT_DEBUG
665         if (tr && tr->fi.cmd)
666                 ap_msg.fi.cmd = tr->fi.cmd;
667 #endif
668
669         if (mex->outputdatalength < mex->inputdatalength) {
670                 func_code = 0;
671                 rc = -EINVAL;
672                 goto out;
673         }
674
675         /*
676          * As long as outputdatalength is big enough, we can set the
677          * outputdatalength equal to the inputdatalength, since that is the
678          * number of bytes we will copy in any case
679          */
680         mex->outputdatalength = mex->inputdatalength;
681
682         rc = get_rsa_modex_fc(mex, &func_code);
683         if (rc)
684                 goto out;
685
686         pref_zc = NULL;
687         pref_zq = NULL;
688         spin_lock(&zcrypt_list_lock);
689         for_each_zcrypt_card(zc) {
690                 /* Check for usable accelarator or CCA card */
691                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
692                     !(zc->card->functions & 0x18000000))
693                         continue;
694                 /* Check for size limits */
695                 if (zc->min_mod_size > mex->inputdatalength ||
696                     zc->max_mod_size < mex->inputdatalength)
697                         continue;
698                 /* check if device node has admission for this card */
699                 if (!zcrypt_check_card(perms, zc->card->id))
700                         continue;
701                 /* get weight index of the card device  */
702                 wgt = zc->speed_rating[func_code];
703                 /* penalty if this msg was previously sent via this card */
704                 cpen = (tr && tr->again_counter && tr->last_qid &&
705                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
706                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
707                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
708                         continue;
709                 for_each_zcrypt_queue(zq, zc) {
710                         /* check if device is usable and eligible */
711                         if (!zq->online || !zq->ops->rsa_modexpo ||
712                             !zq->queue->config || zq->queue->chkstop)
713                                 continue;
714                         /* check if device node has admission for this queue */
715                         if (!zcrypt_check_queue(perms,
716                                                 AP_QID_QUEUE(zq->queue->qid)))
717                                 continue;
718                         /* penalty if the msg was previously sent at this qid */
719                         qpen = (tr && tr->again_counter && tr->last_qid &&
720                                 tr->last_qid == zq->queue->qid) ?
721                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
722                         if (!zcrypt_queue_compare(zq, pref_zq,
723                                                   wgt + cpen + qpen, pref_wgt))
724                                 continue;
725                         pref_zc = zc;
726                         pref_zq = zq;
727                         pref_wgt = wgt + cpen + qpen;
728                 }
729         }
730         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
731         spin_unlock(&zcrypt_list_lock);
732
733         if (!pref_zq) {
734                 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
735                                __func__);
736                 rc = -ENODEV;
737                 goto out;
738         }
739
740         qid = pref_zq->queue->qid;
741         rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
742
743         spin_lock(&zcrypt_list_lock);
744         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
745         spin_unlock(&zcrypt_list_lock);
746
747 out:
748         ap_release_message(&ap_msg);
749         if (tr) {
750                 tr->last_rc = rc;
751                 tr->last_qid = qid;
752         }
753         trace_s390_zcrypt_rep(mex, func_code, rc,
754                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
755         return rc;
756 }
757
758 static long zcrypt_rsa_crt(struct ap_perms *perms,
759                            struct zcrypt_track *tr,
760                            struct ica_rsa_modexpo_crt *crt)
761 {
762         struct zcrypt_card *zc, *pref_zc;
763         struct zcrypt_queue *zq, *pref_zq;
764         struct ap_message ap_msg;
765         unsigned int wgt = 0, pref_wgt = 0;
766         unsigned int func_code;
767         int cpen, qpen, qid = 0, rc = -ENODEV;
768         struct module *mod;
769
770         trace_s390_zcrypt_req(crt, TP_ICARSACRT);
771
772         ap_init_message(&ap_msg);
773
774 #ifdef CONFIG_ZCRYPT_DEBUG
775         if (tr && tr->fi.cmd)
776                 ap_msg.fi.cmd = tr->fi.cmd;
777 #endif
778
779         if (crt->outputdatalength < crt->inputdatalength) {
780                 func_code = 0;
781                 rc = -EINVAL;
782                 goto out;
783         }
784
785         /*
786          * As long as outputdatalength is big enough, we can set the
787          * outputdatalength equal to the inputdatalength, since that is the
788          * number of bytes we will copy in any case
789          */
790         crt->outputdatalength = crt->inputdatalength;
791
792         rc = get_rsa_crt_fc(crt, &func_code);
793         if (rc)
794                 goto out;
795
796         pref_zc = NULL;
797         pref_zq = NULL;
798         spin_lock(&zcrypt_list_lock);
799         for_each_zcrypt_card(zc) {
800                 /* Check for usable accelarator or CCA card */
801                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
802                     !(zc->card->functions & 0x18000000))
803                         continue;
804                 /* Check for size limits */
805                 if (zc->min_mod_size > crt->inputdatalength ||
806                     zc->max_mod_size < crt->inputdatalength)
807                         continue;
808                 /* check if device node has admission for this card */
809                 if (!zcrypt_check_card(perms, zc->card->id))
810                         continue;
811                 /* get weight index of the card device  */
812                 wgt = zc->speed_rating[func_code];
813                 /* penalty if this msg was previously sent via this card */
814                 cpen = (tr && tr->again_counter && tr->last_qid &&
815                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
816                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
817                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
818                         continue;
819                 for_each_zcrypt_queue(zq, zc) {
820                         /* check if device is usable and eligible */
821                         if (!zq->online || !zq->ops->rsa_modexpo_crt ||
822                             !zq->queue->config || zq->queue->chkstop)
823                                 continue;
824                         /* check if device node has admission for this queue */
825                         if (!zcrypt_check_queue(perms,
826                                                 AP_QID_QUEUE(zq->queue->qid)))
827                                 continue;
828                         /* penalty if the msg was previously sent at this qid */
829                         qpen = (tr && tr->again_counter && tr->last_qid &&
830                                 tr->last_qid == zq->queue->qid) ?
831                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
832                         if (!zcrypt_queue_compare(zq, pref_zq,
833                                                   wgt + cpen + qpen, pref_wgt))
834                                 continue;
835                         pref_zc = zc;
836                         pref_zq = zq;
837                         pref_wgt = wgt + cpen + qpen;
838                 }
839         }
840         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
841         spin_unlock(&zcrypt_list_lock);
842
843         if (!pref_zq) {
844                 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
845                                __func__);
846                 rc = -ENODEV;
847                 goto out;
848         }
849
850         qid = pref_zq->queue->qid;
851         rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
852
853         spin_lock(&zcrypt_list_lock);
854         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
855         spin_unlock(&zcrypt_list_lock);
856
857 out:
858         ap_release_message(&ap_msg);
859         if (tr) {
860                 tr->last_rc = rc;
861                 tr->last_qid = qid;
862         }
863         trace_s390_zcrypt_rep(crt, func_code, rc,
864                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
865         return rc;
866 }
867
868 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
869                               struct zcrypt_track *tr,
870                               struct ica_xcRB *xcrb)
871 {
872         struct zcrypt_card *zc, *pref_zc;
873         struct zcrypt_queue *zq, *pref_zq;
874         struct ap_message ap_msg;
875         unsigned int wgt = 0, pref_wgt = 0;
876         unsigned int func_code;
877         unsigned short *domain, tdom;
878         int cpen, qpen, qid = 0, rc = -ENODEV;
879         struct module *mod;
880
881         trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
882
883         xcrb->status = 0;
884         ap_init_message(&ap_msg);
885
886 #ifdef CONFIG_ZCRYPT_DEBUG
887         if (tr && tr->fi.cmd)
888                 ap_msg.fi.cmd = tr->fi.cmd;
889         if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
890                 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
891                                 __func__, tr->fi.cmd);
892                 xcrb->agent_ID = 0x4646;
893         }
894 #endif
895
896         rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
897         if (rc)
898                 goto out;
899
900         tdom = *domain;
901         if (perms != &ap_perms && tdom < AP_DOMAINS) {
902                 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
903                         if (!test_bit_inv(tdom, perms->adm)) {
904                                 rc = -ENODEV;
905                                 goto out;
906                         }
907                 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
908                         rc = -EOPNOTSUPP;
909                         goto out;
910                 }
911         }
912         /*
913          * If a valid target domain is set and this domain is NOT a usage
914          * domain but a control only domain, autoselect target domain.
915          */
916         if (tdom < AP_DOMAINS &&
917             !ap_test_config_usage_domain(tdom) &&
918             ap_test_config_ctrl_domain(tdom))
919                 tdom = AUTOSEL_DOM;
920
921         pref_zc = NULL;
922         pref_zq = NULL;
923         spin_lock(&zcrypt_list_lock);
924         for_each_zcrypt_card(zc) {
925                 /* Check for usable CCA card */
926                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
927                     !(zc->card->functions & 0x10000000))
928                         continue;
929                 /* Check for user selected CCA card */
930                 if (xcrb->user_defined != AUTOSELECT &&
931                     xcrb->user_defined != zc->card->id)
932                         continue;
933                 /* check if request size exceeds card max msg size */
934                 if (ap_msg.len > zc->card->maxmsgsize)
935                         continue;
936                 /* check if device node has admission for this card */
937                 if (!zcrypt_check_card(perms, zc->card->id))
938                         continue;
939                 /* get weight index of the card device  */
940                 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
941                 /* penalty if this msg was previously sent via this card */
942                 cpen = (tr && tr->again_counter && tr->last_qid &&
943                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
944                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
945                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
946                         continue;
947                 for_each_zcrypt_queue(zq, zc) {
948                         /* check for device usable and eligible */
949                         if (!zq->online || !zq->ops->send_cprb ||
950                             !zq->queue->config || zq->queue->chkstop ||
951                             (tdom != AUTOSEL_DOM &&
952                              tdom != AP_QID_QUEUE(zq->queue->qid)))
953                                 continue;
954                         /* check if device node has admission for this queue */
955                         if (!zcrypt_check_queue(perms,
956                                                 AP_QID_QUEUE(zq->queue->qid)))
957                                 continue;
958                         /* penalty if the msg was previously sent at this qid */
959                         qpen = (tr && tr->again_counter && tr->last_qid &&
960                                 tr->last_qid == zq->queue->qid) ?
961                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
962                         if (!zcrypt_queue_compare(zq, pref_zq,
963                                                   wgt + cpen + qpen, pref_wgt))
964                                 continue;
965                         pref_zc = zc;
966                         pref_zq = zq;
967                         pref_wgt = wgt + cpen + qpen;
968                 }
969         }
970         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
971         spin_unlock(&zcrypt_list_lock);
972
973         if (!pref_zq) {
974                 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
975                                __func__, xcrb->user_defined, *domain);
976                 rc = -ENODEV;
977                 goto out;
978         }
979
980         /* in case of auto select, provide the correct domain */
981         qid = pref_zq->queue->qid;
982         if (*domain == AUTOSEL_DOM)
983                 *domain = AP_QID_QUEUE(qid);
984
985 #ifdef CONFIG_ZCRYPT_DEBUG
986         if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
987                 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
988                                 __func__, tr->fi.cmd);
989                 *domain = 99;
990         }
991 #endif
992
993         rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
994
995         spin_lock(&zcrypt_list_lock);
996         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
997         spin_unlock(&zcrypt_list_lock);
998
999 out:
1000         ap_release_message(&ap_msg);
1001         if (tr) {
1002                 tr->last_rc = rc;
1003                 tr->last_qid = qid;
1004         }
1005         trace_s390_zcrypt_rep(xcrb, func_code, rc,
1006                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1007         return rc;
1008 }
1009
1010 long zcrypt_send_cprb(struct ica_xcRB *xcrb)
1011 {
1012         return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb);
1013 }
1014 EXPORT_SYMBOL(zcrypt_send_cprb);
1015
1016 static bool is_desired_ep11_card(unsigned int dev_id,
1017                                  unsigned short target_num,
1018                                  struct ep11_target_dev *targets)
1019 {
1020         while (target_num-- > 0) {
1021                 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
1022                         return true;
1023                 targets++;
1024         }
1025         return false;
1026 }
1027
1028 static bool is_desired_ep11_queue(unsigned int dev_qid,
1029                                   unsigned short target_num,
1030                                   struct ep11_target_dev *targets)
1031 {
1032         int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
1033
1034         while (target_num-- > 0) {
1035                 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
1036                     (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
1037                         return true;
1038                 targets++;
1039         }
1040         return false;
1041 }
1042
1043 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
1044                                    struct zcrypt_track *tr,
1045                                    struct ep11_urb *xcrb)
1046 {
1047         struct zcrypt_card *zc, *pref_zc;
1048         struct zcrypt_queue *zq, *pref_zq;
1049         struct ep11_target_dev *targets;
1050         unsigned short target_num;
1051         unsigned int wgt = 0, pref_wgt = 0;
1052         unsigned int func_code, domain;
1053         struct ap_message ap_msg;
1054         int cpen, qpen, qid = 0, rc = -ENODEV;
1055         struct module *mod;
1056
1057         trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1058
1059         ap_init_message(&ap_msg);
1060
1061 #ifdef CONFIG_ZCRYPT_DEBUG
1062         if (tr && tr->fi.cmd)
1063                 ap_msg.fi.cmd = tr->fi.cmd;
1064 #endif
1065
1066         target_num = (unsigned short)xcrb->targets_num;
1067
1068         /* empty list indicates autoselect (all available targets) */
1069         targets = NULL;
1070         if (target_num != 0) {
1071                 struct ep11_target_dev __user *uptr;
1072
1073                 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
1074                 if (!targets) {
1075                         func_code = 0;
1076                         rc = -ENOMEM;
1077                         goto out;
1078                 }
1079
1080                 uptr = (struct ep11_target_dev __force __user *)xcrb->targets;
1081                 if (z_copy_from_user(userspace, targets, uptr,
1082                                      target_num * sizeof(*targets))) {
1083                         func_code = 0;
1084                         rc = -EFAULT;
1085                         goto out_free;
1086                 }
1087         }
1088
1089         rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
1090         if (rc)
1091                 goto out_free;
1092
1093         if (perms != &ap_perms && domain < AUTOSEL_DOM) {
1094                 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
1095                         if (!test_bit_inv(domain, perms->adm)) {
1096                                 rc = -ENODEV;
1097                                 goto out_free;
1098                         }
1099                 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
1100                         rc = -EOPNOTSUPP;
1101                         goto out_free;
1102                 }
1103         }
1104
1105         pref_zc = NULL;
1106         pref_zq = NULL;
1107         spin_lock(&zcrypt_list_lock);
1108         for_each_zcrypt_card(zc) {
1109                 /* Check for usable EP11 card */
1110                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1111                     !(zc->card->functions & 0x04000000))
1112                         continue;
1113                 /* Check for user selected EP11 card */
1114                 if (targets &&
1115                     !is_desired_ep11_card(zc->card->id, target_num, targets))
1116                         continue;
1117                 /* check if request size exceeds card max msg size */
1118                 if (ap_msg.len > zc->card->maxmsgsize)
1119                         continue;
1120                 /* check if device node has admission for this card */
1121                 if (!zcrypt_check_card(perms, zc->card->id))
1122                         continue;
1123                 /* get weight index of the card device  */
1124                 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1125                 /* penalty if this msg was previously sent via this card */
1126                 cpen = (tr && tr->again_counter && tr->last_qid &&
1127                         AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1128                         TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1129                 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1130                         continue;
1131                 for_each_zcrypt_queue(zq, zc) {
1132                         /* check if device is usable and eligible */
1133                         if (!zq->online || !zq->ops->send_ep11_cprb ||
1134                             !zq->queue->config || zq->queue->chkstop ||
1135                             (targets &&
1136                              !is_desired_ep11_queue(zq->queue->qid,
1137                                                     target_num, targets)))
1138                                 continue;
1139                         /* check if device node has admission for this queue */
1140                         if (!zcrypt_check_queue(perms,
1141                                                 AP_QID_QUEUE(zq->queue->qid)))
1142                                 continue;
1143                         /* penalty if the msg was previously sent at this qid */
1144                         qpen = (tr && tr->again_counter && tr->last_qid &&
1145                                 tr->last_qid == zq->queue->qid) ?
1146                                 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1147                         if (!zcrypt_queue_compare(zq, pref_zq,
1148                                                   wgt + cpen + qpen, pref_wgt))
1149                                 continue;
1150                         pref_zc = zc;
1151                         pref_zq = zq;
1152                         pref_wgt = wgt + cpen + qpen;
1153                 }
1154         }
1155         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1156         spin_unlock(&zcrypt_list_lock);
1157
1158         if (!pref_zq) {
1159                 if (targets && target_num == 1) {
1160                         ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
1161                                        __func__, (int)targets->ap_id,
1162                                        (int)targets->dom_id);
1163                 } else if (targets) {
1164                         ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n",
1165                                        __func__, (int)target_num);
1166                 } else {
1167                         ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n",
1168                                        __func__);
1169                 }
1170                 rc = -ENODEV;
1171                 goto out_free;
1172         }
1173
1174         qid = pref_zq->queue->qid;
1175         rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1176
1177         spin_lock(&zcrypt_list_lock);
1178         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1179         spin_unlock(&zcrypt_list_lock);
1180
1181 out_free:
1182         kfree(targets);
1183 out:
1184         ap_release_message(&ap_msg);
1185         if (tr) {
1186                 tr->last_rc = rc;
1187                 tr->last_qid = qid;
1188         }
1189         trace_s390_zcrypt_rep(xcrb, func_code, rc,
1190                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1191         return rc;
1192 }
1193
1194 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
1195 {
1196         return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
1197 }
1198 EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1199
1200 static long zcrypt_rng(char *buffer)
1201 {
1202         struct zcrypt_card *zc, *pref_zc;
1203         struct zcrypt_queue *zq, *pref_zq;
1204         unsigned int wgt = 0, pref_wgt = 0;
1205         unsigned int func_code;
1206         struct ap_message ap_msg;
1207         unsigned int domain;
1208         int qid = 0, rc = -ENODEV;
1209         struct module *mod;
1210
1211         trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1212
1213         ap_init_message(&ap_msg);
1214         rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
1215         if (rc)
1216                 goto out;
1217
1218         pref_zc = NULL;
1219         pref_zq = NULL;
1220         spin_lock(&zcrypt_list_lock);
1221         for_each_zcrypt_card(zc) {
1222                 /* Check for usable CCA card */
1223                 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1224                     !(zc->card->functions & 0x10000000))
1225                         continue;
1226                 /* get weight index of the card device  */
1227                 wgt = zc->speed_rating[func_code];
1228                 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1229                         continue;
1230                 for_each_zcrypt_queue(zq, zc) {
1231                         /* check if device is usable and eligible */
1232                         if (!zq->online || !zq->ops->rng ||
1233                             !zq->queue->config || zq->queue->chkstop)
1234                                 continue;
1235                         if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1236                                 continue;
1237                         pref_zc = zc;
1238                         pref_zq = zq;
1239                         pref_wgt = wgt;
1240                 }
1241         }
1242         pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1243         spin_unlock(&zcrypt_list_lock);
1244
1245         if (!pref_zq) {
1246                 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
1247                                __func__);
1248                 rc = -ENODEV;
1249                 goto out;
1250         }
1251
1252         qid = pref_zq->queue->qid;
1253         rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1254
1255         spin_lock(&zcrypt_list_lock);
1256         zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1257         spin_unlock(&zcrypt_list_lock);
1258
1259 out:
1260         ap_release_message(&ap_msg);
1261         trace_s390_zcrypt_rep(buffer, func_code, rc,
1262                               AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1263         return rc;
1264 }
1265
1266 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1267 {
1268         struct zcrypt_card *zc;
1269         struct zcrypt_queue *zq;
1270         struct zcrypt_device_status *stat;
1271         int card, queue;
1272
1273         memset(devstatus, 0, MAX_ZDEV_ENTRIES
1274                * sizeof(struct zcrypt_device_status));
1275
1276         spin_lock(&zcrypt_list_lock);
1277         for_each_zcrypt_card(zc) {
1278                 for_each_zcrypt_queue(zq, zc) {
1279                         card = AP_QID_CARD(zq->queue->qid);
1280                         if (card >= MAX_ZDEV_CARDIDS)
1281                                 continue;
1282                         queue = AP_QID_QUEUE(zq->queue->qid);
1283                         stat = &devstatus[card * AP_DOMAINS + queue];
1284                         stat->hwtype = zc->card->ap_dev.device_type;
1285                         stat->functions = zc->card->functions >> 26;
1286                         stat->qid = zq->queue->qid;
1287                         stat->online = zq->online ? 0x01 : 0x00;
1288                 }
1289         }
1290         spin_unlock(&zcrypt_list_lock);
1291 }
1292
1293 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
1294 {
1295         struct zcrypt_card *zc;
1296         struct zcrypt_queue *zq;
1297         struct zcrypt_device_status_ext *stat;
1298         int card, queue;
1299
1300         memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
1301                * sizeof(struct zcrypt_device_status_ext));
1302
1303         spin_lock(&zcrypt_list_lock);
1304         for_each_zcrypt_card(zc) {
1305                 for_each_zcrypt_queue(zq, zc) {
1306                         card = AP_QID_CARD(zq->queue->qid);
1307                         queue = AP_QID_QUEUE(zq->queue->qid);
1308                         stat = &devstatus[card * AP_DOMAINS + queue];
1309                         stat->hwtype = zc->card->ap_dev.device_type;
1310                         stat->functions = zc->card->functions >> 26;
1311                         stat->qid = zq->queue->qid;
1312                         stat->online = zq->online ? 0x01 : 0x00;
1313                 }
1314         }
1315         spin_unlock(&zcrypt_list_lock);
1316 }
1317 EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1318
1319 int zcrypt_device_status_ext(int card, int queue,
1320                              struct zcrypt_device_status_ext *devstat)
1321 {
1322         struct zcrypt_card *zc;
1323         struct zcrypt_queue *zq;
1324
1325         memset(devstat, 0, sizeof(*devstat));
1326
1327         spin_lock(&zcrypt_list_lock);
1328         for_each_zcrypt_card(zc) {
1329                 for_each_zcrypt_queue(zq, zc) {
1330                         if (card == AP_QID_CARD(zq->queue->qid) &&
1331                             queue == AP_QID_QUEUE(zq->queue->qid)) {
1332                                 devstat->hwtype = zc->card->ap_dev.device_type;
1333                                 devstat->functions = zc->card->functions >> 26;
1334                                 devstat->qid = zq->queue->qid;
1335                                 devstat->online = zq->online ? 0x01 : 0x00;
1336                                 spin_unlock(&zcrypt_list_lock);
1337                                 return 0;
1338                         }
1339                 }
1340         }
1341         spin_unlock(&zcrypt_list_lock);
1342
1343         return -ENODEV;
1344 }
1345 EXPORT_SYMBOL(zcrypt_device_status_ext);
1346
1347 static void zcrypt_status_mask(char status[], size_t max_adapters)
1348 {
1349         struct zcrypt_card *zc;
1350         struct zcrypt_queue *zq;
1351         int card;
1352
1353         memset(status, 0, max_adapters);
1354         spin_lock(&zcrypt_list_lock);
1355         for_each_zcrypt_card(zc) {
1356                 for_each_zcrypt_queue(zq, zc) {
1357                         card = AP_QID_CARD(zq->queue->qid);
1358                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1359                             card >= max_adapters)
1360                                 continue;
1361                         status[card] = zc->online ? zc->user_space_type : 0x0d;
1362                 }
1363         }
1364         spin_unlock(&zcrypt_list_lock);
1365 }
1366
1367 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1368 {
1369         struct zcrypt_card *zc;
1370         struct zcrypt_queue *zq;
1371         int card;
1372
1373         memset(qdepth, 0, max_adapters);
1374         spin_lock(&zcrypt_list_lock);
1375         local_bh_disable();
1376         for_each_zcrypt_card(zc) {
1377                 for_each_zcrypt_queue(zq, zc) {
1378                         card = AP_QID_CARD(zq->queue->qid);
1379                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1380                             card >= max_adapters)
1381                                 continue;
1382                         spin_lock(&zq->queue->lock);
1383                         qdepth[card] =
1384                                 zq->queue->pendingq_count +
1385                                 zq->queue->requestq_count;
1386                         spin_unlock(&zq->queue->lock);
1387                 }
1388         }
1389         local_bh_enable();
1390         spin_unlock(&zcrypt_list_lock);
1391 }
1392
1393 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1394 {
1395         struct zcrypt_card *zc;
1396         struct zcrypt_queue *zq;
1397         int card;
1398         u64 cnt;
1399
1400         memset(reqcnt, 0, sizeof(int) * max_adapters);
1401         spin_lock(&zcrypt_list_lock);
1402         local_bh_disable();
1403         for_each_zcrypt_card(zc) {
1404                 for_each_zcrypt_queue(zq, zc) {
1405                         card = AP_QID_CARD(zq->queue->qid);
1406                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1407                             card >= max_adapters)
1408                                 continue;
1409                         spin_lock(&zq->queue->lock);
1410                         cnt = zq->queue->total_request_count;
1411                         spin_unlock(&zq->queue->lock);
1412                         reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
1413                 }
1414         }
1415         local_bh_enable();
1416         spin_unlock(&zcrypt_list_lock);
1417 }
1418
1419 static int zcrypt_pendingq_count(void)
1420 {
1421         struct zcrypt_card *zc;
1422         struct zcrypt_queue *zq;
1423         int pendingq_count;
1424
1425         pendingq_count = 0;
1426         spin_lock(&zcrypt_list_lock);
1427         local_bh_disable();
1428         for_each_zcrypt_card(zc) {
1429                 for_each_zcrypt_queue(zq, zc) {
1430                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1431                                 continue;
1432                         spin_lock(&zq->queue->lock);
1433                         pendingq_count += zq->queue->pendingq_count;
1434                         spin_unlock(&zq->queue->lock);
1435                 }
1436         }
1437         local_bh_enable();
1438         spin_unlock(&zcrypt_list_lock);
1439         return pendingq_count;
1440 }
1441
1442 static int zcrypt_requestq_count(void)
1443 {
1444         struct zcrypt_card *zc;
1445         struct zcrypt_queue *zq;
1446         int requestq_count;
1447
1448         requestq_count = 0;
1449         spin_lock(&zcrypt_list_lock);
1450         local_bh_disable();
1451         for_each_zcrypt_card(zc) {
1452                 for_each_zcrypt_queue(zq, zc) {
1453                         if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1454                                 continue;
1455                         spin_lock(&zq->queue->lock);
1456                         requestq_count += zq->queue->requestq_count;
1457                         spin_unlock(&zq->queue->lock);
1458                 }
1459         }
1460         local_bh_enable();
1461         spin_unlock(&zcrypt_list_lock);
1462         return requestq_count;
1463 }
1464
1465 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1466 {
1467         int rc;
1468         struct zcrypt_track tr;
1469         struct ica_rsa_modexpo mex;
1470         struct ica_rsa_modexpo __user *umex = (void __user *)arg;
1471
1472         memset(&tr, 0, sizeof(tr));
1473         if (copy_from_user(&mex, umex, sizeof(mex)))
1474                 return -EFAULT;
1475
1476 #ifdef CONFIG_ZCRYPT_DEBUG
1477         if (mex.inputdatalength & (1U << 31)) {
1478                 if (!capable(CAP_SYS_ADMIN))
1479                         return -EPERM;
1480                 tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
1481         }
1482         mex.inputdatalength &= 0x0000FFFF;
1483 #endif
1484
1485         do {
1486                 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1487                 if (rc == -EAGAIN)
1488                         tr.again_counter++;
1489 #ifdef CONFIG_ZCRYPT_DEBUG
1490                 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1491                         break;
1492 #endif
1493         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1494         /* on failure: retry once again after a requested rescan */
1495         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1496                 do {
1497                         rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1498                         if (rc == -EAGAIN)
1499                                 tr.again_counter++;
1500                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1501         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1502                 rc = -EIO;
1503         if (rc) {
1504                 ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc);
1505                 return rc;
1506         }
1507         return put_user(mex.outputdatalength, &umex->outputdatalength);
1508 }
1509
1510 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1511 {
1512         int rc;
1513         struct zcrypt_track tr;
1514         struct ica_rsa_modexpo_crt crt;
1515         struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
1516
1517         memset(&tr, 0, sizeof(tr));
1518         if (copy_from_user(&crt, ucrt, sizeof(crt)))
1519                 return -EFAULT;
1520
1521 #ifdef CONFIG_ZCRYPT_DEBUG
1522         if (crt.inputdatalength & (1U << 31)) {
1523                 if (!capable(CAP_SYS_ADMIN))
1524                         return -EPERM;
1525                 tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
1526         }
1527         crt.inputdatalength &= 0x0000FFFF;
1528 #endif
1529
1530         do {
1531                 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1532                 if (rc == -EAGAIN)
1533                         tr.again_counter++;
1534 #ifdef CONFIG_ZCRYPT_DEBUG
1535                 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1536                         break;
1537 #endif
1538         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1539         /* on failure: retry once again after a requested rescan */
1540         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1541                 do {
1542                         rc = zcrypt_rsa_crt(perms, &tr, &crt);
1543                         if (rc == -EAGAIN)
1544                                 tr.again_counter++;
1545                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1546         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1547                 rc = -EIO;
1548         if (rc) {
1549                 ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc);
1550                 return rc;
1551         }
1552         return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1553 }
1554
1555 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1556 {
1557         int rc;
1558         struct ica_xcRB xcrb;
1559         struct zcrypt_track tr;
1560         struct ica_xcRB __user *uxcrb = (void __user *)arg;
1561
1562         memset(&tr, 0, sizeof(tr));
1563         if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1564                 return -EFAULT;
1565
1566 #ifdef CONFIG_ZCRYPT_DEBUG
1567         if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) {
1568                 if (!capable(CAP_SYS_ADMIN))
1569                         return -EPERM;
1570                 tr.fi.cmd = (u16)(xcrb.status >> 16);
1571         }
1572         xcrb.status = 0;
1573 #endif
1574
1575         do {
1576                 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
1577                 if (rc == -EAGAIN)
1578                         tr.again_counter++;
1579 #ifdef CONFIG_ZCRYPT_DEBUG
1580                 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1581                         break;
1582 #endif
1583         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1584         /* on failure: retry once again after a requested rescan */
1585         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1586                 do {
1587                         rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
1588                         if (rc == -EAGAIN)
1589                                 tr.again_counter++;
1590                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1591         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1592                 rc = -EIO;
1593         if (rc)
1594                 ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
1595                                rc, xcrb.status);
1596         if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1597                 return -EFAULT;
1598         return rc;
1599 }
1600
1601 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1602 {
1603         int rc;
1604         struct ep11_urb xcrb;
1605         struct zcrypt_track tr;
1606         struct ep11_urb __user *uxcrb = (void __user *)arg;
1607
1608         memset(&tr, 0, sizeof(tr));
1609         if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1610                 return -EFAULT;
1611
1612 #ifdef CONFIG_ZCRYPT_DEBUG
1613         if (xcrb.req_len & (1ULL << 63)) {
1614                 if (!capable(CAP_SYS_ADMIN))
1615                         return -EPERM;
1616                 tr.fi.cmd = (u16)(xcrb.req_len >> 48);
1617         }
1618         xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
1619 #endif
1620
1621         do {
1622                 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1623                 if (rc == -EAGAIN)
1624                         tr.again_counter++;
1625 #ifdef CONFIG_ZCRYPT_DEBUG
1626                 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1627                         break;
1628 #endif
1629         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1630         /* on failure: retry once again after a requested rescan */
1631         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1632                 do {
1633                         rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1634                         if (rc == -EAGAIN)
1635                                 tr.again_counter++;
1636                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1637         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1638                 rc = -EIO;
1639         if (rc)
1640                 ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc);
1641         if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1642                 return -EFAULT;
1643         return rc;
1644 }
1645
1646 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1647                                   unsigned long arg)
1648 {
1649         int rc;
1650         struct ap_perms *perms =
1651                 (struct ap_perms *)filp->private_data;
1652
1653         rc = zcrypt_check_ioctl(perms, cmd);
1654         if (rc)
1655                 return rc;
1656
1657         switch (cmd) {
1658         case ICARSAMODEXPO:
1659                 return icarsamodexpo_ioctl(perms, arg);
1660         case ICARSACRT:
1661                 return icarsacrt_ioctl(perms, arg);
1662         case ZSECSENDCPRB:
1663                 return zsecsendcprb_ioctl(perms, arg);
1664         case ZSENDEP11CPRB:
1665                 return zsendep11cprb_ioctl(perms, arg);
1666         case ZCRYPT_DEVICE_STATUS: {
1667                 struct zcrypt_device_status_ext *device_status;
1668                 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1669                         * sizeof(struct zcrypt_device_status_ext);
1670
1671                 device_status = kzalloc(total_size, GFP_KERNEL);
1672                 if (!device_status)
1673                         return -ENOMEM;
1674                 zcrypt_device_status_mask_ext(device_status);
1675                 if (copy_to_user((char __user *)arg, device_status,
1676                                  total_size))
1677                         rc = -EFAULT;
1678                 kfree(device_status);
1679                 return rc;
1680         }
1681         case ZCRYPT_STATUS_MASK: {
1682                 char status[AP_DEVICES];
1683
1684                 zcrypt_status_mask(status, AP_DEVICES);
1685                 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1686                         return -EFAULT;
1687                 return 0;
1688         }
1689         case ZCRYPT_QDEPTH_MASK: {
1690                 char qdepth[AP_DEVICES];
1691
1692                 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1693                 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1694                         return -EFAULT;
1695                 return 0;
1696         }
1697         case ZCRYPT_PERDEV_REQCNT: {
1698                 u32 *reqcnt;
1699
1700                 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1701                 if (!reqcnt)
1702                         return -ENOMEM;
1703                 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1704                 if (copy_to_user((int __user *)arg, reqcnt,
1705                                  sizeof(u32) * AP_DEVICES))
1706                         rc = -EFAULT;
1707                 kfree(reqcnt);
1708                 return rc;
1709         }
1710         case Z90STAT_REQUESTQ_COUNT:
1711                 return put_user(zcrypt_requestq_count(), (int __user *)arg);
1712         case Z90STAT_PENDINGQ_COUNT:
1713                 return put_user(zcrypt_pendingq_count(), (int __user *)arg);
1714         case Z90STAT_TOTALOPEN_COUNT:
1715                 return put_user(atomic_read(&zcrypt_open_count),
1716                                 (int __user *)arg);
1717         case Z90STAT_DOMAIN_INDEX:
1718                 return put_user(ap_domain_index, (int __user *)arg);
1719         /*
1720          * Deprecated ioctls
1721          */
1722         case ZDEVICESTATUS: {
1723                 /* the old ioctl supports only 64 adapters */
1724                 struct zcrypt_device_status *device_status;
1725                 size_t total_size = MAX_ZDEV_ENTRIES
1726                         * sizeof(struct zcrypt_device_status);
1727
1728                 device_status = kzalloc(total_size, GFP_KERNEL);
1729                 if (!device_status)
1730                         return -ENOMEM;
1731                 zcrypt_device_status_mask(device_status);
1732                 if (copy_to_user((char __user *)arg, device_status,
1733                                  total_size))
1734                         rc = -EFAULT;
1735                 kfree(device_status);
1736                 return rc;
1737         }
1738         case Z90STAT_STATUS_MASK: {
1739                 /* the old ioctl supports only 64 adapters */
1740                 char status[MAX_ZDEV_CARDIDS];
1741
1742                 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1743                 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1744                         return -EFAULT;
1745                 return 0;
1746         }
1747         case Z90STAT_QDEPTH_MASK: {
1748                 /* the old ioctl supports only 64 adapters */
1749                 char qdepth[MAX_ZDEV_CARDIDS];
1750
1751                 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1752                 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1753                         return -EFAULT;
1754                 return 0;
1755         }
1756         case Z90STAT_PERDEV_REQCNT: {
1757                 /* the old ioctl supports only 64 adapters */
1758                 u32 reqcnt[MAX_ZDEV_CARDIDS];
1759
1760                 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1761                 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
1762                         return -EFAULT;
1763                 return 0;
1764         }
1765         /* unknown ioctl number */
1766         default:
1767                 ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd);
1768                 return -ENOIOCTLCMD;
1769         }
1770 }
1771
1772 #ifdef CONFIG_COMPAT
1773 /*
1774  * ioctl32 conversion routines
1775  */
1776 struct compat_ica_rsa_modexpo {
1777         compat_uptr_t   inputdata;
1778         unsigned int    inputdatalength;
1779         compat_uptr_t   outputdata;
1780         unsigned int    outputdatalength;
1781         compat_uptr_t   b_key;
1782         compat_uptr_t   n_modulus;
1783 };
1784
1785 static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
1786                             unsigned int cmd, unsigned long arg)
1787 {
1788         struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
1789         struct compat_ica_rsa_modexpo mex32;
1790         struct ica_rsa_modexpo mex64;
1791         struct zcrypt_track tr;
1792         long rc;
1793
1794         memset(&tr, 0, sizeof(tr));
1795         if (copy_from_user(&mex32, umex32, sizeof(mex32)))
1796                 return -EFAULT;
1797         mex64.inputdata = compat_ptr(mex32.inputdata);
1798         mex64.inputdatalength = mex32.inputdatalength;
1799         mex64.outputdata = compat_ptr(mex32.outputdata);
1800         mex64.outputdatalength = mex32.outputdatalength;
1801         mex64.b_key = compat_ptr(mex32.b_key);
1802         mex64.n_modulus = compat_ptr(mex32.n_modulus);
1803         do {
1804                 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1805                 if (rc == -EAGAIN)
1806                         tr.again_counter++;
1807         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1808         /* on failure: retry once again after a requested rescan */
1809         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1810                 do {
1811                         rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1812                         if (rc == -EAGAIN)
1813                                 tr.again_counter++;
1814                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1815         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1816                 rc = -EIO;
1817         if (rc)
1818                 return rc;
1819         return put_user(mex64.outputdatalength,
1820                         &umex32->outputdatalength);
1821 }
1822
1823 struct compat_ica_rsa_modexpo_crt {
1824         compat_uptr_t   inputdata;
1825         unsigned int    inputdatalength;
1826         compat_uptr_t   outputdata;
1827         unsigned int    outputdatalength;
1828         compat_uptr_t   bp_key;
1829         compat_uptr_t   bq_key;
1830         compat_uptr_t   np_prime;
1831         compat_uptr_t   nq_prime;
1832         compat_uptr_t   u_mult_inv;
1833 };
1834
1835 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
1836                                 unsigned int cmd, unsigned long arg)
1837 {
1838         struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1839         struct compat_ica_rsa_modexpo_crt crt32;
1840         struct ica_rsa_modexpo_crt crt64;
1841         struct zcrypt_track tr;
1842         long rc;
1843
1844         memset(&tr, 0, sizeof(tr));
1845         if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1846                 return -EFAULT;
1847         crt64.inputdata = compat_ptr(crt32.inputdata);
1848         crt64.inputdatalength = crt32.inputdatalength;
1849         crt64.outputdata = compat_ptr(crt32.outputdata);
1850         crt64.outputdatalength = crt32.outputdatalength;
1851         crt64.bp_key = compat_ptr(crt32.bp_key);
1852         crt64.bq_key = compat_ptr(crt32.bq_key);
1853         crt64.np_prime = compat_ptr(crt32.np_prime);
1854         crt64.nq_prime = compat_ptr(crt32.nq_prime);
1855         crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1856         do {
1857                 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1858                 if (rc == -EAGAIN)
1859                         tr.again_counter++;
1860         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1861         /* on failure: retry once again after a requested rescan */
1862         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1863                 do {
1864                         rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1865                         if (rc == -EAGAIN)
1866                                 tr.again_counter++;
1867                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1868         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1869                 rc = -EIO;
1870         if (rc)
1871                 return rc;
1872         return put_user(crt64.outputdatalength,
1873                         &ucrt32->outputdatalength);
1874 }
1875
1876 struct compat_ica_xcrb {
1877         unsigned short  agent_ID;
1878         unsigned int    user_defined;
1879         unsigned short  request_ID;
1880         unsigned int    request_control_blk_length;
1881         unsigned char   padding1[16 - sizeof(compat_uptr_t)];
1882         compat_uptr_t   request_control_blk_addr;
1883         unsigned int    request_data_length;
1884         char            padding2[16 - sizeof(compat_uptr_t)];
1885         compat_uptr_t   request_data_address;
1886         unsigned int    reply_control_blk_length;
1887         char            padding3[16 - sizeof(compat_uptr_t)];
1888         compat_uptr_t   reply_control_blk_addr;
1889         unsigned int    reply_data_length;
1890         char            padding4[16 - sizeof(compat_uptr_t)];
1891         compat_uptr_t   reply_data_addr;
1892         unsigned short  priority_window;
1893         unsigned int    status;
1894 } __packed;
1895
1896 static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
1897                          unsigned int cmd, unsigned long arg)
1898 {
1899         struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg);
1900         struct compat_ica_xcrb xcrb32;
1901         struct zcrypt_track tr;
1902         struct ica_xcRB xcrb64;
1903         long rc;
1904
1905         memset(&tr, 0, sizeof(tr));
1906         if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32)))
1907                 return -EFAULT;
1908         xcrb64.agent_ID = xcrb32.agent_ID;
1909         xcrb64.user_defined = xcrb32.user_defined;
1910         xcrb64.request_ID = xcrb32.request_ID;
1911         xcrb64.request_control_blk_length =
1912                 xcrb32.request_control_blk_length;
1913         xcrb64.request_control_blk_addr =
1914                 compat_ptr(xcrb32.request_control_blk_addr);
1915         xcrb64.request_data_length =
1916                 xcrb32.request_data_length;
1917         xcrb64.request_data_address =
1918                 compat_ptr(xcrb32.request_data_address);
1919         xcrb64.reply_control_blk_length =
1920                 xcrb32.reply_control_blk_length;
1921         xcrb64.reply_control_blk_addr =
1922                 compat_ptr(xcrb32.reply_control_blk_addr);
1923         xcrb64.reply_data_length = xcrb32.reply_data_length;
1924         xcrb64.reply_data_addr =
1925                 compat_ptr(xcrb32.reply_data_addr);
1926         xcrb64.priority_window = xcrb32.priority_window;
1927         xcrb64.status = xcrb32.status;
1928         do {
1929                 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
1930                 if (rc == -EAGAIN)
1931                         tr.again_counter++;
1932         } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1933         /* on failure: retry once again after a requested rescan */
1934         if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1935                 do {
1936                         rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
1937                         if (rc == -EAGAIN)
1938                                 tr.again_counter++;
1939                 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1940         if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1941                 rc = -EIO;
1942         xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
1943         xcrb32.reply_data_length = xcrb64.reply_data_length;
1944         xcrb32.status = xcrb64.status;
1945         if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32)))
1946                 return -EFAULT;
1947         return rc;
1948 }
1949
1950 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1951                                 unsigned long arg)
1952 {
1953         int rc;
1954         struct ap_perms *perms =
1955                 (struct ap_perms *)filp->private_data;
1956
1957         rc = zcrypt_check_ioctl(perms, cmd);
1958         if (rc)
1959                 return rc;
1960
1961         if (cmd == ICARSAMODEXPO)
1962                 return trans_modexpo32(perms, filp, cmd, arg);
1963         if (cmd == ICARSACRT)
1964                 return trans_modexpo_crt32(perms, filp, cmd, arg);
1965         if (cmd == ZSECSENDCPRB)
1966                 return trans_xcrb32(perms, filp, cmd, arg);
1967         return zcrypt_unlocked_ioctl(filp, cmd, arg);
1968 }
1969 #endif
1970
1971 /*
1972  * Misc device file operations.
1973  */
1974 static const struct file_operations zcrypt_fops = {
1975         .owner          = THIS_MODULE,
1976         .read           = zcrypt_read,
1977         .write          = zcrypt_write,
1978         .unlocked_ioctl = zcrypt_unlocked_ioctl,
1979 #ifdef CONFIG_COMPAT
1980         .compat_ioctl   = zcrypt_compat_ioctl,
1981 #endif
1982         .open           = zcrypt_open,
1983         .release        = zcrypt_release,
1984         .llseek         = no_llseek,
1985 };
1986
1987 /*
1988  * Misc device.
1989  */
1990 static struct miscdevice zcrypt_misc_device = {
1991         .minor      = MISC_DYNAMIC_MINOR,
1992         .name       = "z90crypt",
1993         .fops       = &zcrypt_fops,
1994 };
1995
1996 static int zcrypt_rng_device_count;
1997 static u32 *zcrypt_rng_buffer;
1998 static int zcrypt_rng_buffer_index;
1999 static DEFINE_MUTEX(zcrypt_rng_mutex);
2000
2001 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
2002 {
2003         int rc;
2004
2005         /*
2006          * We don't need locking here because the RNG API guarantees serialized
2007          * read method calls.
2008          */
2009         if (zcrypt_rng_buffer_index == 0) {
2010                 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
2011                 /* on failure: retry once again after a requested rescan */
2012                 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
2013                         rc = zcrypt_rng((char *)zcrypt_rng_buffer);
2014                 if (rc < 0)
2015                         return -EIO;
2016                 zcrypt_rng_buffer_index = rc / sizeof(*data);
2017         }
2018         *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
2019         return sizeof(*data);
2020 }
2021
2022 static struct hwrng zcrypt_rng_dev = {
2023         .name           = "zcrypt",
2024         .data_read      = zcrypt_rng_data_read,
2025         .quality        = 990,
2026 };
2027
2028 int zcrypt_rng_device_add(void)
2029 {
2030         int rc = 0;
2031
2032         mutex_lock(&zcrypt_rng_mutex);
2033         if (zcrypt_rng_device_count == 0) {
2034                 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
2035                 if (!zcrypt_rng_buffer) {
2036                         rc = -ENOMEM;
2037                         goto out;
2038                 }
2039                 zcrypt_rng_buffer_index = 0;
2040                 rc = hwrng_register(&zcrypt_rng_dev);
2041                 if (rc)
2042                         goto out_free;
2043                 zcrypt_rng_device_count = 1;
2044         } else {
2045                 zcrypt_rng_device_count++;
2046         }
2047         mutex_unlock(&zcrypt_rng_mutex);
2048         return 0;
2049
2050 out_free:
2051         free_page((unsigned long)zcrypt_rng_buffer);
2052 out:
2053         mutex_unlock(&zcrypt_rng_mutex);
2054         return rc;
2055 }
2056
2057 void zcrypt_rng_device_remove(void)
2058 {
2059         mutex_lock(&zcrypt_rng_mutex);
2060         zcrypt_rng_device_count--;
2061         if (zcrypt_rng_device_count == 0) {
2062                 hwrng_unregister(&zcrypt_rng_dev);
2063                 free_page((unsigned long)zcrypt_rng_buffer);
2064         }
2065         mutex_unlock(&zcrypt_rng_mutex);
2066 }
2067
2068 /*
2069  * Wait until the zcrypt api is operational.
2070  * The AP bus scan and the binding of ap devices to device drivers is
2071  * an asynchronous job. This function waits until these initial jobs
2072  * are done and so the zcrypt api should be ready to serve crypto
2073  * requests - if there are resources available. The function uses an
2074  * internal timeout of 60s. The very first caller will either wait for
2075  * ap bus bindings complete or the timeout happens. This state will be
2076  * remembered for further callers which will only be blocked until a
2077  * decision is made (timeout or bindings complete).
2078  * On timeout -ETIME is returned, on success the return value is 0.
2079  */
2080 int zcrypt_wait_api_operational(void)
2081 {
2082         static DEFINE_MUTEX(zcrypt_wait_api_lock);
2083         static int zcrypt_wait_api_state;
2084         int rc;
2085
2086         rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
2087         if (rc)
2088                 return rc;
2089
2090         switch (zcrypt_wait_api_state) {
2091         case 0:
2092                 /* initial state, invoke wait for the ap bus complete */
2093                 rc = ap_wait_init_apqn_bindings_complete(
2094                         msecs_to_jiffies(60 * 1000));
2095                 switch (rc) {
2096                 case 0:
2097                         /* ap bus bindings are complete */
2098                         zcrypt_wait_api_state = 1;
2099                         break;
2100                 case -EINTR:
2101                         /* interrupted, go back to caller */
2102                         break;
2103                 case -ETIME:
2104                         /* timeout */
2105                         ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
2106                                         __func__);
2107                         zcrypt_wait_api_state = -ETIME;
2108                         break;
2109                 default:
2110                         /* other failure */
2111                         ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n",
2112                                        __func__, rc);
2113                         break;
2114                 }
2115                 break;
2116         case 1:
2117                 /* a previous caller already found ap bus bindings complete */
2118                 rc = 0;
2119                 break;
2120         default:
2121                 /* a previous caller had timeout or other failure */
2122                 rc = zcrypt_wait_api_state;
2123                 break;
2124         }
2125
2126         mutex_unlock(&zcrypt_wait_api_lock);
2127
2128         return rc;
2129 }
2130 EXPORT_SYMBOL(zcrypt_wait_api_operational);
2131
2132 int __init zcrypt_debug_init(void)
2133 {
2134         zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
2135                                          DBF_MAX_SPRINTF_ARGS * sizeof(long));
2136         debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
2137         debug_set_level(zcrypt_dbf_info, DBF_ERR);
2138
2139         return 0;
2140 }
2141
2142 void zcrypt_debug_exit(void)
2143 {
2144         debug_unregister(zcrypt_dbf_info);
2145 }
2146
2147 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2148
2149 static int __init zcdn_init(void)
2150 {
2151         int rc;
2152
2153         /* create a new class 'zcrypt' */
2154         zcrypt_class = class_create(ZCRYPT_NAME);
2155         if (IS_ERR(zcrypt_class)) {
2156                 rc = PTR_ERR(zcrypt_class);
2157                 goto out_class_create_failed;
2158         }
2159         zcrypt_class->dev_release = zcdn_device_release;
2160
2161         /* alloc device minor range */
2162         rc = alloc_chrdev_region(&zcrypt_devt,
2163                                  0, ZCRYPT_MAX_MINOR_NODES,
2164                                  ZCRYPT_NAME);
2165         if (rc)
2166                 goto out_alloc_chrdev_failed;
2167
2168         cdev_init(&zcrypt_cdev, &zcrypt_fops);
2169         zcrypt_cdev.owner = THIS_MODULE;
2170         rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2171         if (rc)
2172                 goto out_cdev_add_failed;
2173
2174         /* need some class specific sysfs attributes */
2175         rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
2176         if (rc)
2177                 goto out_class_create_file_1_failed;
2178         rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
2179         if (rc)
2180                 goto out_class_create_file_2_failed;
2181
2182         return 0;
2183
2184 out_class_create_file_2_failed:
2185         class_remove_file(zcrypt_class, &class_attr_zcdn_create);
2186 out_class_create_file_1_failed:
2187         cdev_del(&zcrypt_cdev);
2188 out_cdev_add_failed:
2189         unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2190 out_alloc_chrdev_failed:
2191         class_destroy(zcrypt_class);
2192 out_class_create_failed:
2193         return rc;
2194 }
2195
2196 static void zcdn_exit(void)
2197 {
2198         class_remove_file(zcrypt_class, &class_attr_zcdn_create);
2199         class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
2200         zcdn_destroy_all();
2201         cdev_del(&zcrypt_cdev);
2202         unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2203         class_destroy(zcrypt_class);
2204 }
2205
2206 #endif
2207
2208 /*
2209  * zcrypt_api_init(): Module initialization.
2210  *
2211  * The module initialization code.
2212  */
2213 int __init zcrypt_api_init(void)
2214 {
2215         int rc;
2216
2217         rc = zcrypt_debug_init();
2218         if (rc)
2219                 goto out;
2220
2221 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2222         rc = zcdn_init();
2223         if (rc)
2224                 goto out;
2225 #endif
2226
2227         /* Register the request sprayer. */
2228         rc = misc_register(&zcrypt_misc_device);
2229         if (rc < 0)
2230                 goto out_misc_register_failed;
2231
2232         zcrypt_msgtype6_init();
2233         zcrypt_msgtype50_init();
2234
2235         return 0;
2236
2237 out_misc_register_failed:
2238 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2239         zcdn_exit();
2240 #endif
2241         zcrypt_debug_exit();
2242 out:
2243         return rc;
2244 }
2245
2246 /*
2247  * zcrypt_api_exit(): Module termination.
2248  *
2249  * The module termination code.
2250  */
2251 void __exit zcrypt_api_exit(void)
2252 {
2253 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2254         zcdn_exit();
2255 #endif
2256         misc_deregister(&zcrypt_misc_device);
2257         zcrypt_msgtype6_exit();
2258         zcrypt_msgtype50_exit();
2259         zcrypt_ccamisc_exit();
2260         zcrypt_ep11misc_exit();
2261         zcrypt_debug_exit();
2262 }
2263
2264 module_init(zcrypt_api_init);
2265 module_exit(zcrypt_api_exit);
This page took 0.164615 seconds and 4 git commands to generate.