1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright IBM Corp. 2001, 2018
4 * Author(s): Robert Burroughs
15 #define KMSG_COMPONENT "zcrypt"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/interrupt.h>
21 #include <linux/miscdevice.h>
23 #include <linux/compat.h>
24 #include <linux/slab.h>
25 #include <linux/atomic.h>
26 #include <linux/uaccess.h>
27 #include <linux/hw_random.h>
28 #include <linux/debugfs.h>
29 #include <linux/cdev.h>
30 #include <linux/ctype.h>
31 #include <linux/capability.h>
32 #include <asm/debug.h>
34 #define CREATE_TRACE_POINTS
35 #include <asm/trace/zcrypt.h>
37 #include "zcrypt_api.h"
38 #include "zcrypt_debug.h"
40 #include "zcrypt_msgtype6.h"
41 #include "zcrypt_msgtype50.h"
42 #include "zcrypt_ccamisc.h"
43 #include "zcrypt_ep11misc.h"
48 MODULE_AUTHOR("IBM Corporation");
49 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
50 "Copyright IBM Corp. 2001, 2012");
51 MODULE_LICENSE("GPL");
54 * zcrypt tracepoint functions
56 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
57 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
59 DEFINE_SPINLOCK(zcrypt_list_lock);
60 LIST_HEAD(zcrypt_card_list);
62 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
64 static LIST_HEAD(zcrypt_ops_list);
66 /* Zcrypt related debug feature stuff. */
67 debug_info_t *zcrypt_dbf_info;
70 * Process a rescan of the transport layer.
71 * Runs a synchronous AP bus rescan.
72 * Returns true if something has changed (for example the
73 * bus scan has found and build up new devices) and it is
74 * worth to do a retry. Otherwise false is returned meaning
75 * no changes on the AP bus level.
77 static inline bool zcrypt_process_rescan(void)
79 return ap_bus_force_rescan();
82 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
84 list_add_tail(&zops->list, &zcrypt_ops_list);
87 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
89 list_del_init(&zops->list);
92 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
94 struct zcrypt_ops *zops;
96 list_for_each_entry(zops, &zcrypt_ops_list, list)
97 if (zops->variant == variant &&
98 (!strncmp(zops->name, name, sizeof(zops->name))))
102 EXPORT_SYMBOL(zcrypt_msgtype);
105 * Multi device nodes extension functions.
110 static void zcdn_device_release(struct device *dev);
111 static const struct class zcrypt_class = {
113 .dev_release = zcdn_device_release,
115 static dev_t zcrypt_devt;
116 static struct cdev zcrypt_cdev;
119 struct device device;
120 struct ap_perms perms;
123 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
125 #define ZCDN_MAX_NAME 32
127 static int zcdn_create(const char *name);
128 static int zcdn_destroy(const char *name);
131 * Find zcdn device by name.
132 * Returns reference to the zcdn device which needs to be released
133 * with put_device() after use.
135 static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
137 struct device *dev = class_find_device_by_name(&zcrypt_class, name);
139 return dev ? to_zcdn_dev(dev) : NULL;
143 * Find zcdn device by devt value.
144 * Returns reference to the zcdn device which needs to be released
145 * with put_device() after use.
147 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
149 struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
151 return dev ? to_zcdn_dev(dev) : NULL;
154 static ssize_t ioctlmask_show(struct device *dev,
155 struct device_attribute *attr,
158 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
161 if (mutex_lock_interruptible(&ap_perms_mutex))
164 n = sysfs_emit(buf, "0x");
165 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
166 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
167 n += sysfs_emit_at(buf, n, "\n");
169 mutex_unlock(&ap_perms_mutex);
174 static ssize_t ioctlmask_store(struct device *dev,
175 struct device_attribute *attr,
176 const char *buf, size_t count)
179 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
181 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
182 AP_IOCTLS, &ap_perms_mutex);
189 static DEVICE_ATTR_RW(ioctlmask);
191 static ssize_t apmask_show(struct device *dev,
192 struct device_attribute *attr,
195 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
198 if (mutex_lock_interruptible(&ap_perms_mutex))
201 n = sysfs_emit(buf, "0x");
202 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
203 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
204 n += sysfs_emit_at(buf, n, "\n");
206 mutex_unlock(&ap_perms_mutex);
211 static ssize_t apmask_store(struct device *dev,
212 struct device_attribute *attr,
213 const char *buf, size_t count)
216 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
218 rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
219 AP_DEVICES, &ap_perms_mutex);
226 static DEVICE_ATTR_RW(apmask);
228 static ssize_t aqmask_show(struct device *dev,
229 struct device_attribute *attr,
232 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
235 if (mutex_lock_interruptible(&ap_perms_mutex))
238 n = sysfs_emit(buf, "0x");
239 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
240 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
241 n += sysfs_emit_at(buf, n, "\n");
243 mutex_unlock(&ap_perms_mutex);
248 static ssize_t aqmask_store(struct device *dev,
249 struct device_attribute *attr,
250 const char *buf, size_t count)
253 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
255 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
256 AP_DOMAINS, &ap_perms_mutex);
263 static DEVICE_ATTR_RW(aqmask);
265 static ssize_t admask_show(struct device *dev,
266 struct device_attribute *attr,
269 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
272 if (mutex_lock_interruptible(&ap_perms_mutex))
275 n = sysfs_emit(buf, "0x");
276 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
277 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
278 n += sysfs_emit_at(buf, n, "\n");
280 mutex_unlock(&ap_perms_mutex);
285 static ssize_t admask_store(struct device *dev,
286 struct device_attribute *attr,
287 const char *buf, size_t count)
290 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
292 rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
293 AP_DOMAINS, &ap_perms_mutex);
300 static DEVICE_ATTR_RW(admask);
302 static struct attribute *zcdn_dev_attrs[] = {
303 &dev_attr_ioctlmask.attr,
304 &dev_attr_apmask.attr,
305 &dev_attr_aqmask.attr,
306 &dev_attr_admask.attr,
310 static struct attribute_group zcdn_dev_attr_group = {
311 .attrs = zcdn_dev_attrs
314 static const struct attribute_group *zcdn_dev_attr_groups[] = {
315 &zcdn_dev_attr_group,
319 static ssize_t zcdn_create_store(const struct class *class,
320 const struct class_attribute *attr,
321 const char *buf, size_t count)
324 char name[ZCDN_MAX_NAME];
326 strscpy(name, skip_spaces(buf), sizeof(name));
328 rc = zcdn_create(strim(name));
330 return rc ? rc : count;
333 static const struct class_attribute class_attr_zcdn_create =
334 __ATTR(create, 0600, NULL, zcdn_create_store);
336 static ssize_t zcdn_destroy_store(const struct class *class,
337 const struct class_attribute *attr,
338 const char *buf, size_t count)
341 char name[ZCDN_MAX_NAME];
343 strscpy(name, skip_spaces(buf), sizeof(name));
345 rc = zcdn_destroy(strim(name));
347 return rc ? rc : count;
350 static const struct class_attribute class_attr_zcdn_destroy =
351 __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
353 static void zcdn_device_release(struct device *dev)
355 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
357 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
358 __func__, MAJOR(dev->devt), MINOR(dev->devt));
363 static int zcdn_create(const char *name)
367 struct zcdn_device *zcdndev;
369 if (mutex_lock_interruptible(&ap_perms_mutex))
372 /* check if device node with this name already exists */
374 zcdndev = find_zcdndev_by_name(name);
376 put_device(&zcdndev->device);
382 /* find an unused minor number */
383 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
384 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
385 zcdndev = find_zcdndev_by_devt(devt);
387 put_device(&zcdndev->device);
391 if (i == ZCRYPT_MAX_MINOR_NODES) {
396 /* alloc and prepare a new zcdn device */
397 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
402 zcdndev->device.release = zcdn_device_release;
403 zcdndev->device.class = &zcrypt_class;
404 zcdndev->device.devt = devt;
405 zcdndev->device.groups = zcdn_dev_attr_groups;
407 rc = dev_set_name(&zcdndev->device, "%s", name);
409 rc = dev_set_name(&zcdndev->device, ZCRYPT_NAME "_%d", (int)MINOR(devt));
414 rc = device_register(&zcdndev->device);
416 put_device(&zcdndev->device);
420 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
421 __func__, MAJOR(devt), MINOR(devt));
424 mutex_unlock(&ap_perms_mutex);
428 static int zcdn_destroy(const char *name)
431 struct zcdn_device *zcdndev;
433 if (mutex_lock_interruptible(&ap_perms_mutex))
436 /* try to find this zcdn device */
437 zcdndev = find_zcdndev_by_name(name);
444 * The zcdn device is not hard destroyed. It is subject to
445 * reference counting and thus just needs to be unregistered.
447 put_device(&zcdndev->device);
448 device_unregister(&zcdndev->device);
451 mutex_unlock(&ap_perms_mutex);
455 static void zcdn_destroy_all(void)
459 struct zcdn_device *zcdndev;
461 mutex_lock(&ap_perms_mutex);
462 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
463 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
464 zcdndev = find_zcdndev_by_devt(devt);
466 put_device(&zcdndev->device);
467 device_unregister(&zcdndev->device);
470 mutex_unlock(&ap_perms_mutex);
474 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
476 * This function is not supported beyond zcrypt 1.3.1.
478 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
479 size_t count, loff_t *f_pos)
485 * zcrypt_write(): Not allowed.
487 * Write is not allowed
489 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
490 size_t count, loff_t *f_pos)
496 * zcrypt_open(): Count number of users.
498 * Device open function to count number of users.
500 static int zcrypt_open(struct inode *inode, struct file *filp)
502 struct ap_perms *perms = &ap_perms;
504 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
505 struct zcdn_device *zcdndev;
507 if (mutex_lock_interruptible(&ap_perms_mutex))
509 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
510 /* find returns a reference, no get_device() needed */
511 mutex_unlock(&ap_perms_mutex);
513 perms = &zcdndev->perms;
515 filp->private_data = (void *)perms;
517 atomic_inc(&zcrypt_open_count);
518 return stream_open(inode, filp);
522 * zcrypt_release(): Count number of users.
524 * Device close function to count number of users.
526 static int zcrypt_release(struct inode *inode, struct file *filp)
528 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
529 struct zcdn_device *zcdndev;
531 mutex_lock(&ap_perms_mutex);
532 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
533 mutex_unlock(&ap_perms_mutex);
535 /* 2 puts here: one for find, one for open */
536 put_device(&zcdndev->device);
537 put_device(&zcdndev->device);
541 atomic_dec(&zcrypt_open_count);
545 static inline int zcrypt_check_ioctl(struct ap_perms *perms,
549 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
551 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
552 if (test_bit_inv(ioctlnr, perms->ioctlm))
557 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
558 __func__, ioctlnr, rc);
563 static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
565 return test_bit_inv(card, perms->apm) ? true : false;
568 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
570 return test_bit_inv(queue, perms->aqm) ? true : false;
573 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
574 struct zcrypt_queue *zq,
575 struct module **pmod,
578 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
581 zcrypt_queue_get(zq);
582 get_device(&zq->queue->ap_dev.device);
583 atomic_add(weight, &zc->load);
584 atomic_add(weight, &zq->load);
586 *pmod = zq->queue->ap_dev.device.driver->owner;
590 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
591 struct zcrypt_queue *zq,
596 atomic_sub(weight, &zc->load);
597 atomic_sub(weight, &zq->load);
598 put_device(&zq->queue->ap_dev.device);
599 zcrypt_queue_put(zq);
604 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
605 struct zcrypt_card *pref_zc,
607 unsigned int pref_weight)
611 weight += atomic_read(&zc->load);
612 pref_weight += atomic_read(&pref_zc->load);
613 if (weight == pref_weight)
614 return atomic64_read(&zc->card->total_request_count) <
615 atomic64_read(&pref_zc->card->total_request_count);
616 return weight < pref_weight;
619 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
620 struct zcrypt_queue *pref_zq,
622 unsigned int pref_weight)
626 weight += atomic_read(&zq->load);
627 pref_weight += atomic_read(&pref_zq->load);
628 if (weight == pref_weight)
629 return zq->queue->total_request_count <
630 pref_zq->queue->total_request_count;
631 return weight < pref_weight;
637 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
638 struct zcrypt_track *tr,
639 struct ica_rsa_modexpo *mex)
641 struct zcrypt_card *zc, *pref_zc;
642 struct zcrypt_queue *zq, *pref_zq;
643 struct ap_message ap_msg;
644 unsigned int wgt = 0, pref_wgt = 0;
645 unsigned int func_code;
646 int cpen, qpen, qid = 0, rc = -ENODEV;
649 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
651 ap_init_message(&ap_msg);
653 if (mex->outputdatalength < mex->inputdatalength) {
660 * As long as outputdatalength is big enough, we can set the
661 * outputdatalength equal to the inputdatalength, since that is the
662 * number of bytes we will copy in any case
664 mex->outputdatalength = mex->inputdatalength;
666 rc = get_rsa_modex_fc(mex, &func_code);
672 spin_lock(&zcrypt_list_lock);
673 for_each_zcrypt_card(zc) {
674 /* Check for usable accelerator or CCA card */
675 if (!zc->online || !zc->card->config || zc->card->chkstop ||
676 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
678 /* Check for size limits */
679 if (zc->min_mod_size > mex->inputdatalength ||
680 zc->max_mod_size < mex->inputdatalength)
682 /* check if device node has admission for this card */
683 if (!zcrypt_check_card(perms, zc->card->id))
685 /* get weight index of the card device */
686 wgt = zc->speed_rating[func_code];
687 /* penalty if this msg was previously sent via this card */
688 cpen = (tr && tr->again_counter && tr->last_qid &&
689 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
690 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
691 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
693 for_each_zcrypt_queue(zq, zc) {
694 /* check if device is usable and eligible */
695 if (!zq->online || !zq->ops->rsa_modexpo ||
696 !ap_queue_usable(zq->queue))
698 /* check if device node has admission for this queue */
699 if (!zcrypt_check_queue(perms,
700 AP_QID_QUEUE(zq->queue->qid)))
702 /* penalty if the msg was previously sent at this qid */
703 qpen = (tr && tr->again_counter && tr->last_qid &&
704 tr->last_qid == zq->queue->qid) ?
705 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
706 if (!zcrypt_queue_compare(zq, pref_zq,
707 wgt + cpen + qpen, pref_wgt))
711 pref_wgt = wgt + cpen + qpen;
714 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
715 spin_unlock(&zcrypt_list_lock);
718 pr_debug("no matching queue found => ENODEV\n");
723 qid = pref_zq->queue->qid;
724 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
726 spin_lock(&zcrypt_list_lock);
727 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
728 spin_unlock(&zcrypt_list_lock);
731 ap_release_message(&ap_msg);
736 trace_s390_zcrypt_rep(mex, func_code, rc,
737 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
741 static long zcrypt_rsa_crt(struct ap_perms *perms,
742 struct zcrypt_track *tr,
743 struct ica_rsa_modexpo_crt *crt)
745 struct zcrypt_card *zc, *pref_zc;
746 struct zcrypt_queue *zq, *pref_zq;
747 struct ap_message ap_msg;
748 unsigned int wgt = 0, pref_wgt = 0;
749 unsigned int func_code;
750 int cpen, qpen, qid = 0, rc = -ENODEV;
753 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
755 ap_init_message(&ap_msg);
757 if (crt->outputdatalength < crt->inputdatalength) {
764 * As long as outputdatalength is big enough, we can set the
765 * outputdatalength equal to the inputdatalength, since that is the
766 * number of bytes we will copy in any case
768 crt->outputdatalength = crt->inputdatalength;
770 rc = get_rsa_crt_fc(crt, &func_code);
776 spin_lock(&zcrypt_list_lock);
777 for_each_zcrypt_card(zc) {
778 /* Check for usable accelerator or CCA card */
779 if (!zc->online || !zc->card->config || zc->card->chkstop ||
780 !(zc->card->hwinfo.accel || zc->card->hwinfo.cca))
782 /* Check for size limits */
783 if (zc->min_mod_size > crt->inputdatalength ||
784 zc->max_mod_size < crt->inputdatalength)
786 /* check if device node has admission for this card */
787 if (!zcrypt_check_card(perms, zc->card->id))
789 /* get weight index of the card device */
790 wgt = zc->speed_rating[func_code];
791 /* penalty if this msg was previously sent via this card */
792 cpen = (tr && tr->again_counter && tr->last_qid &&
793 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
794 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
795 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
797 for_each_zcrypt_queue(zq, zc) {
798 /* check if device is usable and eligible */
799 if (!zq->online || !zq->ops->rsa_modexpo_crt ||
800 !ap_queue_usable(zq->queue))
802 /* check if device node has admission for this queue */
803 if (!zcrypt_check_queue(perms,
804 AP_QID_QUEUE(zq->queue->qid)))
806 /* penalty if the msg was previously sent at this qid */
807 qpen = (tr && tr->again_counter && tr->last_qid &&
808 tr->last_qid == zq->queue->qid) ?
809 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
810 if (!zcrypt_queue_compare(zq, pref_zq,
811 wgt + cpen + qpen, pref_wgt))
815 pref_wgt = wgt + cpen + qpen;
818 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
819 spin_unlock(&zcrypt_list_lock);
822 pr_debug("no matching queue found => ENODEV\n");
827 qid = pref_zq->queue->qid;
828 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
830 spin_lock(&zcrypt_list_lock);
831 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
832 spin_unlock(&zcrypt_list_lock);
835 ap_release_message(&ap_msg);
840 trace_s390_zcrypt_rep(crt, func_code, rc,
841 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
845 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
846 struct zcrypt_track *tr,
847 struct ica_xcRB *xcrb)
849 struct zcrypt_card *zc, *pref_zc;
850 struct zcrypt_queue *zq, *pref_zq;
851 struct ap_message ap_msg;
852 unsigned int wgt = 0, pref_wgt = 0;
853 unsigned int func_code;
854 unsigned short *domain, tdom;
855 int cpen, qpen, qid = 0, rc = -ENODEV;
858 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
861 ap_init_message(&ap_msg);
863 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
866 print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
867 ap_msg.msg, ap_msg.len, false);
870 if (perms != &ap_perms && tdom < AP_DOMAINS) {
871 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
872 if (!test_bit_inv(tdom, perms->adm)) {
876 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
882 * If a valid target domain is set and this domain is NOT a usage
883 * domain but a control only domain, autoselect target domain.
885 if (tdom < AP_DOMAINS &&
886 !ap_test_config_usage_domain(tdom) &&
887 ap_test_config_ctrl_domain(tdom))
892 spin_lock(&zcrypt_list_lock);
893 for_each_zcrypt_card(zc) {
894 /* Check for usable CCA card */
895 if (!zc->online || !zc->card->config || zc->card->chkstop ||
896 !zc->card->hwinfo.cca)
898 /* Check for user selected CCA card */
899 if (xcrb->user_defined != AUTOSELECT &&
900 xcrb->user_defined != zc->card->id)
902 /* check if request size exceeds card max msg size */
903 if (ap_msg.len > zc->card->maxmsgsize)
905 /* check if device node has admission for this card */
906 if (!zcrypt_check_card(perms, zc->card->id))
908 /* get weight index of the card device */
909 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
910 /* penalty if this msg was previously sent via this card */
911 cpen = (tr && tr->again_counter && tr->last_qid &&
912 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
913 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
914 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
916 for_each_zcrypt_queue(zq, zc) {
917 /* check for device usable and eligible */
918 if (!zq->online || !zq->ops->send_cprb ||
919 !ap_queue_usable(zq->queue) ||
920 (tdom != AUTOSEL_DOM &&
921 tdom != AP_QID_QUEUE(zq->queue->qid)))
923 /* check if device node has admission for this queue */
924 if (!zcrypt_check_queue(perms,
925 AP_QID_QUEUE(zq->queue->qid)))
927 /* penalty if the msg was previously sent at this qid */
928 qpen = (tr && tr->again_counter && tr->last_qid &&
929 tr->last_qid == zq->queue->qid) ?
930 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
931 if (!zcrypt_queue_compare(zq, pref_zq,
932 wgt + cpen + qpen, pref_wgt))
936 pref_wgt = wgt + cpen + qpen;
939 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
940 spin_unlock(&zcrypt_list_lock);
943 pr_debug("no match for address %02x.%04x => ENODEV\n",
944 xcrb->user_defined, *domain);
949 /* in case of auto select, provide the correct domain */
950 qid = pref_zq->queue->qid;
951 if (*domain == AUTOSEL_DOM)
952 *domain = AP_QID_QUEUE(qid);
954 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
956 print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
957 ap_msg.msg, ap_msg.len, false);
960 spin_lock(&zcrypt_list_lock);
961 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
962 spin_unlock(&zcrypt_list_lock);
965 ap_release_message(&ap_msg);
970 trace_s390_zcrypt_rep(xcrb, func_code, rc,
971 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
975 long zcrypt_send_cprb(struct ica_xcRB *xcrb)
977 struct zcrypt_track tr;
980 memset(&tr, 0, sizeof(tr));
983 rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
984 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
986 /* on ENODEV failure: retry once again after a requested rescan */
987 if (rc == -ENODEV && zcrypt_process_rescan())
989 rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
990 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
991 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
994 pr_debug("rc=%d\n", rc);
998 EXPORT_SYMBOL(zcrypt_send_cprb);
1000 static bool is_desired_ep11_card(unsigned int dev_id,
1001 unsigned short target_num,
1002 struct ep11_target_dev *targets)
1004 while (target_num-- > 0) {
1005 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
1012 static bool is_desired_ep11_queue(unsigned int dev_qid,
1013 unsigned short target_num,
1014 struct ep11_target_dev *targets)
1016 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
1018 while (target_num-- > 0) {
1019 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
1020 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
1027 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
1028 struct zcrypt_track *tr,
1029 struct ep11_urb *xcrb)
1031 struct zcrypt_card *zc, *pref_zc;
1032 struct zcrypt_queue *zq, *pref_zq;
1033 struct ep11_target_dev *targets;
1034 unsigned short target_num;
1035 unsigned int wgt = 0, pref_wgt = 0;
1036 unsigned int func_code, domain;
1037 struct ap_message ap_msg;
1038 int cpen, qpen, qid = 0, rc = -ENODEV;
1041 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1043 ap_init_message(&ap_msg);
1045 target_num = (unsigned short)xcrb->targets_num;
1047 /* empty list indicates autoselect (all available targets) */
1049 if (target_num != 0) {
1050 struct ep11_target_dev __user *uptr;
1052 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
1059 uptr = (struct ep11_target_dev __force __user *)xcrb->targets;
1060 if (z_copy_from_user(userspace, targets, uptr,
1061 target_num * sizeof(*targets))) {
1068 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
1071 print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
1072 ap_msg.msg, ap_msg.len, false);
1074 if (perms != &ap_perms && domain < AUTOSEL_DOM) {
1075 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
1076 if (!test_bit_inv(domain, perms->adm)) {
1080 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
1088 spin_lock(&zcrypt_list_lock);
1089 for_each_zcrypt_card(zc) {
1090 /* Check for usable EP11 card */
1091 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1092 !zc->card->hwinfo.ep11)
1094 /* Check for user selected EP11 card */
1096 !is_desired_ep11_card(zc->card->id, target_num, targets))
1098 /* check if request size exceeds card max msg size */
1099 if (ap_msg.len > zc->card->maxmsgsize)
1101 /* check if device node has admission for this card */
1102 if (!zcrypt_check_card(perms, zc->card->id))
1104 /* get weight index of the card device */
1105 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1106 /* penalty if this msg was previously sent via this card */
1107 cpen = (tr && tr->again_counter && tr->last_qid &&
1108 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1109 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1110 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1112 for_each_zcrypt_queue(zq, zc) {
1113 /* check if device is usable and eligible */
1114 if (!zq->online || !zq->ops->send_ep11_cprb ||
1115 !ap_queue_usable(zq->queue) ||
1117 !is_desired_ep11_queue(zq->queue->qid,
1118 target_num, targets)))
1120 /* check if device node has admission for this queue */
1121 if (!zcrypt_check_queue(perms,
1122 AP_QID_QUEUE(zq->queue->qid)))
1124 /* penalty if the msg was previously sent at this qid */
1125 qpen = (tr && tr->again_counter && tr->last_qid &&
1126 tr->last_qid == zq->queue->qid) ?
1127 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1128 if (!zcrypt_queue_compare(zq, pref_zq,
1129 wgt + cpen + qpen, pref_wgt))
1133 pref_wgt = wgt + cpen + qpen;
1136 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1137 spin_unlock(&zcrypt_list_lock);
1140 if (targets && target_num == 1) {
1141 pr_debug("no match for address %02x.%04x => ENODEV\n",
1142 (int)targets->ap_id, (int)targets->dom_id);
1143 } else if (targets) {
1144 pr_debug("no match for %d target addrs => ENODEV\n",
1147 pr_debug("no match for address ff.ffff => ENODEV\n");
1153 qid = pref_zq->queue->qid;
1154 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1156 print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
1157 ap_msg.msg, ap_msg.len, false);
1160 spin_lock(&zcrypt_list_lock);
1161 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1162 spin_unlock(&zcrypt_list_lock);
1167 ap_release_message(&ap_msg);
1172 trace_s390_zcrypt_rep(xcrb, func_code, rc,
1173 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1177 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
1179 struct zcrypt_track tr;
1182 memset(&tr, 0, sizeof(tr));
1185 rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
1186 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1188 /* on ENODEV failure: retry once again after a requested rescan */
1189 if (rc == -ENODEV && zcrypt_process_rescan())
1191 rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
1192 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1193 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1196 pr_debug("rc=%d\n", rc);
1200 EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1202 static long zcrypt_rng(char *buffer)
1204 struct zcrypt_card *zc, *pref_zc;
1205 struct zcrypt_queue *zq, *pref_zq;
1206 unsigned int wgt = 0, pref_wgt = 0;
1207 unsigned int func_code;
1208 struct ap_message ap_msg;
1209 unsigned int domain;
1210 int qid = 0, rc = -ENODEV;
1213 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1215 ap_init_message(&ap_msg);
1216 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
1222 spin_lock(&zcrypt_list_lock);
1223 for_each_zcrypt_card(zc) {
1224 /* Check for usable CCA card */
1225 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1226 !zc->card->hwinfo.cca)
1228 /* get weight index of the card device */
1229 wgt = zc->speed_rating[func_code];
1230 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1232 for_each_zcrypt_queue(zq, zc) {
1233 /* check if device is usable and eligible */
1234 if (!zq->online || !zq->ops->rng ||
1235 !ap_queue_usable(zq->queue))
1237 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1244 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1245 spin_unlock(&zcrypt_list_lock);
1248 pr_debug("no matching queue found => ENODEV\n");
1253 qid = pref_zq->queue->qid;
1254 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1256 spin_lock(&zcrypt_list_lock);
1257 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1258 spin_unlock(&zcrypt_list_lock);
1261 ap_release_message(&ap_msg);
1262 trace_s390_zcrypt_rep(buffer, func_code, rc,
1263 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1267 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1269 struct zcrypt_card *zc;
1270 struct zcrypt_queue *zq;
1271 struct zcrypt_device_status *stat;
1274 memset(devstatus, 0, MAX_ZDEV_ENTRIES
1275 * sizeof(struct zcrypt_device_status));
1277 spin_lock(&zcrypt_list_lock);
1278 for_each_zcrypt_card(zc) {
1279 for_each_zcrypt_queue(zq, zc) {
1280 card = AP_QID_CARD(zq->queue->qid);
1281 if (card >= MAX_ZDEV_CARDIDS)
1283 queue = AP_QID_QUEUE(zq->queue->qid);
1284 stat = &devstatus[card * AP_DOMAINS + queue];
1285 stat->hwtype = zc->card->ap_dev.device_type;
1286 stat->functions = zc->card->hwinfo.fac >> 26;
1287 stat->qid = zq->queue->qid;
1288 stat->online = zq->online ? 0x01 : 0x00;
1291 spin_unlock(&zcrypt_list_lock);
1294 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
1296 struct zcrypt_card *zc;
1297 struct zcrypt_queue *zq;
1298 struct zcrypt_device_status_ext *stat;
1301 spin_lock(&zcrypt_list_lock);
1302 for_each_zcrypt_card(zc) {
1303 for_each_zcrypt_queue(zq, zc) {
1304 card = AP_QID_CARD(zq->queue->qid);
1305 queue = AP_QID_QUEUE(zq->queue->qid);
1306 stat = &devstatus[card * AP_DOMAINS + queue];
1307 stat->hwtype = zc->card->ap_dev.device_type;
1308 stat->functions = zc->card->hwinfo.fac >> 26;
1309 stat->qid = zq->queue->qid;
1310 stat->online = zq->online ? 0x01 : 0x00;
1313 spin_unlock(&zcrypt_list_lock);
1315 EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1317 int zcrypt_device_status_ext(int card, int queue,
1318 struct zcrypt_device_status_ext *devstat)
1320 struct zcrypt_card *zc;
1321 struct zcrypt_queue *zq;
1323 memset(devstat, 0, sizeof(*devstat));
1325 spin_lock(&zcrypt_list_lock);
1326 for_each_zcrypt_card(zc) {
1327 for_each_zcrypt_queue(zq, zc) {
1328 if (card == AP_QID_CARD(zq->queue->qid) &&
1329 queue == AP_QID_QUEUE(zq->queue->qid)) {
1330 devstat->hwtype = zc->card->ap_dev.device_type;
1331 devstat->functions = zc->card->hwinfo.fac >> 26;
1332 devstat->qid = zq->queue->qid;
1333 devstat->online = zq->online ? 0x01 : 0x00;
1334 spin_unlock(&zcrypt_list_lock);
1339 spin_unlock(&zcrypt_list_lock);
1343 EXPORT_SYMBOL(zcrypt_device_status_ext);
1345 static void zcrypt_status_mask(char status[], size_t max_adapters)
1347 struct zcrypt_card *zc;
1348 struct zcrypt_queue *zq;
1351 memset(status, 0, max_adapters);
1352 spin_lock(&zcrypt_list_lock);
1353 for_each_zcrypt_card(zc) {
1354 for_each_zcrypt_queue(zq, zc) {
1355 card = AP_QID_CARD(zq->queue->qid);
1356 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1357 card >= max_adapters)
1359 status[card] = zc->online ? zc->user_space_type : 0x0d;
1362 spin_unlock(&zcrypt_list_lock);
1365 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1367 struct zcrypt_card *zc;
1368 struct zcrypt_queue *zq;
1371 memset(qdepth, 0, max_adapters);
1372 spin_lock(&zcrypt_list_lock);
1374 for_each_zcrypt_card(zc) {
1375 for_each_zcrypt_queue(zq, zc) {
1376 card = AP_QID_CARD(zq->queue->qid);
1377 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1378 card >= max_adapters)
1380 spin_lock(&zq->queue->lock);
1382 zq->queue->pendingq_count +
1383 zq->queue->requestq_count;
1384 spin_unlock(&zq->queue->lock);
1388 spin_unlock(&zcrypt_list_lock);
1391 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1393 struct zcrypt_card *zc;
1394 struct zcrypt_queue *zq;
1398 memset(reqcnt, 0, sizeof(int) * max_adapters);
1399 spin_lock(&zcrypt_list_lock);
1401 for_each_zcrypt_card(zc) {
1402 for_each_zcrypt_queue(zq, zc) {
1403 card = AP_QID_CARD(zq->queue->qid);
1404 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1405 card >= max_adapters)
1407 spin_lock(&zq->queue->lock);
1408 cnt = zq->queue->total_request_count;
1409 spin_unlock(&zq->queue->lock);
1410 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
1414 spin_unlock(&zcrypt_list_lock);
1417 static int zcrypt_pendingq_count(void)
1419 struct zcrypt_card *zc;
1420 struct zcrypt_queue *zq;
1424 spin_lock(&zcrypt_list_lock);
1426 for_each_zcrypt_card(zc) {
1427 for_each_zcrypt_queue(zq, zc) {
1428 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1430 spin_lock(&zq->queue->lock);
1431 pendingq_count += zq->queue->pendingq_count;
1432 spin_unlock(&zq->queue->lock);
1436 spin_unlock(&zcrypt_list_lock);
1437 return pendingq_count;
1440 static int zcrypt_requestq_count(void)
1442 struct zcrypt_card *zc;
1443 struct zcrypt_queue *zq;
1447 spin_lock(&zcrypt_list_lock);
1449 for_each_zcrypt_card(zc) {
1450 for_each_zcrypt_queue(zq, zc) {
1451 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1453 spin_lock(&zq->queue->lock);
1454 requestq_count += zq->queue->requestq_count;
1455 spin_unlock(&zq->queue->lock);
1459 spin_unlock(&zcrypt_list_lock);
1460 return requestq_count;
1463 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1466 struct zcrypt_track tr;
1467 struct ica_rsa_modexpo mex;
1468 struct ica_rsa_modexpo __user *umex = (void __user *)arg;
1470 memset(&tr, 0, sizeof(tr));
1471 if (copy_from_user(&mex, umex, sizeof(mex)))
1475 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1476 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1478 /* on ENODEV failure: retry once again after a requested rescan */
1479 if (rc == -ENODEV && zcrypt_process_rescan())
1481 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1482 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1483 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1486 pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
1489 return put_user(mex.outputdatalength, &umex->outputdatalength);
1492 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1495 struct zcrypt_track tr;
1496 struct ica_rsa_modexpo_crt crt;
1497 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
1499 memset(&tr, 0, sizeof(tr));
1500 if (copy_from_user(&crt, ucrt, sizeof(crt)))
1504 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1505 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1507 /* on ENODEV failure: retry once again after a requested rescan */
1508 if (rc == -ENODEV && zcrypt_process_rescan())
1510 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1511 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1512 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1515 pr_debug("ioctl ICARSACRT rc=%d\n", rc);
1518 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1521 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1524 struct ica_xcRB xcrb;
1525 struct zcrypt_track tr;
1526 struct ica_xcRB __user *uxcrb = (void __user *)arg;
1528 memset(&tr, 0, sizeof(tr));
1529 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1533 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
1534 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1536 /* on ENODEV failure: retry once again after a requested rescan */
1537 if (rc == -ENODEV && zcrypt_process_rescan())
1539 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
1540 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1541 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1544 pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
1546 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1551 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1554 struct ep11_urb xcrb;
1555 struct zcrypt_track tr;
1556 struct ep11_urb __user *uxcrb = (void __user *)arg;
1558 memset(&tr, 0, sizeof(tr));
1559 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1563 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1564 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1566 /* on ENODEV failure: retry once again after a requested rescan */
1567 if (rc == -ENODEV && zcrypt_process_rescan())
1569 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1570 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1571 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1574 pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
1575 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1580 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1584 struct ap_perms *perms =
1585 (struct ap_perms *)filp->private_data;
1587 rc = zcrypt_check_ioctl(perms, cmd);
1593 return icarsamodexpo_ioctl(perms, arg);
1595 return icarsacrt_ioctl(perms, arg);
1597 return zsecsendcprb_ioctl(perms, arg);
1599 return zsendep11cprb_ioctl(perms, arg);
1600 case ZCRYPT_DEVICE_STATUS: {
1601 struct zcrypt_device_status_ext *device_status;
1602 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1603 * sizeof(struct zcrypt_device_status_ext);
1605 device_status = kvcalloc(MAX_ZDEV_ENTRIES_EXT,
1606 sizeof(struct zcrypt_device_status_ext),
1610 zcrypt_device_status_mask_ext(device_status);
1611 if (copy_to_user((char __user *)arg, device_status,
1614 kvfree(device_status);
1617 case ZCRYPT_STATUS_MASK: {
1618 char status[AP_DEVICES];
1620 zcrypt_status_mask(status, AP_DEVICES);
1621 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1625 case ZCRYPT_QDEPTH_MASK: {
1626 char qdepth[AP_DEVICES];
1628 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1629 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1633 case ZCRYPT_PERDEV_REQCNT: {
1636 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1639 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1640 if (copy_to_user((int __user *)arg, reqcnt,
1641 sizeof(u32) * AP_DEVICES))
1646 case Z90STAT_REQUESTQ_COUNT:
1647 return put_user(zcrypt_requestq_count(), (int __user *)arg);
1648 case Z90STAT_PENDINGQ_COUNT:
1649 return put_user(zcrypt_pendingq_count(), (int __user *)arg);
1650 case Z90STAT_TOTALOPEN_COUNT:
1651 return put_user(atomic_read(&zcrypt_open_count),
1653 case Z90STAT_DOMAIN_INDEX:
1654 return put_user(ap_domain_index, (int __user *)arg);
1658 case ZDEVICESTATUS: {
1659 /* the old ioctl supports only 64 adapters */
1660 struct zcrypt_device_status *device_status;
1661 size_t total_size = MAX_ZDEV_ENTRIES
1662 * sizeof(struct zcrypt_device_status);
1664 device_status = kzalloc(total_size, GFP_KERNEL);
1667 zcrypt_device_status_mask(device_status);
1668 if (copy_to_user((char __user *)arg, device_status,
1671 kfree(device_status);
1674 case Z90STAT_STATUS_MASK: {
1675 /* the old ioctl supports only 64 adapters */
1676 char status[MAX_ZDEV_CARDIDS];
1678 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1679 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1683 case Z90STAT_QDEPTH_MASK: {
1684 /* the old ioctl supports only 64 adapters */
1685 char qdepth[MAX_ZDEV_CARDIDS];
1687 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1688 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1692 case Z90STAT_PERDEV_REQCNT: {
1693 /* the old ioctl supports only 64 adapters */
1694 u32 reqcnt[MAX_ZDEV_CARDIDS];
1696 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1697 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
1701 /* unknown ioctl number */
1703 pr_debug("unknown ioctl 0x%08x\n", cmd);
1704 return -ENOIOCTLCMD;
1708 #ifdef CONFIG_COMPAT
1710 * ioctl32 conversion routines
1712 struct compat_ica_rsa_modexpo {
1713 compat_uptr_t inputdata;
1714 unsigned int inputdatalength;
1715 compat_uptr_t outputdata;
1716 unsigned int outputdatalength;
1717 compat_uptr_t b_key;
1718 compat_uptr_t n_modulus;
1721 static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
1722 unsigned int cmd, unsigned long arg)
1724 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
1725 struct compat_ica_rsa_modexpo mex32;
1726 struct ica_rsa_modexpo mex64;
1727 struct zcrypt_track tr;
1730 memset(&tr, 0, sizeof(tr));
1731 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
1733 mex64.inputdata = compat_ptr(mex32.inputdata);
1734 mex64.inputdatalength = mex32.inputdatalength;
1735 mex64.outputdata = compat_ptr(mex32.outputdata);
1736 mex64.outputdatalength = mex32.outputdatalength;
1737 mex64.b_key = compat_ptr(mex32.b_key);
1738 mex64.n_modulus = compat_ptr(mex32.n_modulus);
1740 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1741 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1743 /* on ENODEV failure: retry once again after a requested rescan */
1744 if (rc == -ENODEV && zcrypt_process_rescan())
1746 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1747 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1748 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1752 return put_user(mex64.outputdatalength,
1753 &umex32->outputdatalength);
1756 struct compat_ica_rsa_modexpo_crt {
1757 compat_uptr_t inputdata;
1758 unsigned int inputdatalength;
1759 compat_uptr_t outputdata;
1760 unsigned int outputdatalength;
1761 compat_uptr_t bp_key;
1762 compat_uptr_t bq_key;
1763 compat_uptr_t np_prime;
1764 compat_uptr_t nq_prime;
1765 compat_uptr_t u_mult_inv;
1768 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
1769 unsigned int cmd, unsigned long arg)
1771 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1772 struct compat_ica_rsa_modexpo_crt crt32;
1773 struct ica_rsa_modexpo_crt crt64;
1774 struct zcrypt_track tr;
1777 memset(&tr, 0, sizeof(tr));
1778 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1780 crt64.inputdata = compat_ptr(crt32.inputdata);
1781 crt64.inputdatalength = crt32.inputdatalength;
1782 crt64.outputdata = compat_ptr(crt32.outputdata);
1783 crt64.outputdatalength = crt32.outputdatalength;
1784 crt64.bp_key = compat_ptr(crt32.bp_key);
1785 crt64.bq_key = compat_ptr(crt32.bq_key);
1786 crt64.np_prime = compat_ptr(crt32.np_prime);
1787 crt64.nq_prime = compat_ptr(crt32.nq_prime);
1788 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1790 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1791 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1793 /* on ENODEV failure: retry once again after a requested rescan */
1794 if (rc == -ENODEV && zcrypt_process_rescan())
1796 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1797 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1798 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1802 return put_user(crt64.outputdatalength,
1803 &ucrt32->outputdatalength);
1806 struct compat_ica_xcrb {
1807 unsigned short agent_ID;
1808 unsigned int user_defined;
1809 unsigned short request_ID;
1810 unsigned int request_control_blk_length;
1811 unsigned char padding1[16 - sizeof(compat_uptr_t)];
1812 compat_uptr_t request_control_blk_addr;
1813 unsigned int request_data_length;
1814 char padding2[16 - sizeof(compat_uptr_t)];
1815 compat_uptr_t request_data_address;
1816 unsigned int reply_control_blk_length;
1817 char padding3[16 - sizeof(compat_uptr_t)];
1818 compat_uptr_t reply_control_blk_addr;
1819 unsigned int reply_data_length;
1820 char padding4[16 - sizeof(compat_uptr_t)];
1821 compat_uptr_t reply_data_addr;
1822 unsigned short priority_window;
1823 unsigned int status;
1826 static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
1827 unsigned int cmd, unsigned long arg)
1829 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg);
1830 struct compat_ica_xcrb xcrb32;
1831 struct zcrypt_track tr;
1832 struct ica_xcRB xcrb64;
1835 memset(&tr, 0, sizeof(tr));
1836 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32)))
1838 xcrb64.agent_ID = xcrb32.agent_ID;
1839 xcrb64.user_defined = xcrb32.user_defined;
1840 xcrb64.request_ID = xcrb32.request_ID;
1841 xcrb64.request_control_blk_length =
1842 xcrb32.request_control_blk_length;
1843 xcrb64.request_control_blk_addr =
1844 compat_ptr(xcrb32.request_control_blk_addr);
1845 xcrb64.request_data_length =
1846 xcrb32.request_data_length;
1847 xcrb64.request_data_address =
1848 compat_ptr(xcrb32.request_data_address);
1849 xcrb64.reply_control_blk_length =
1850 xcrb32.reply_control_blk_length;
1851 xcrb64.reply_control_blk_addr =
1852 compat_ptr(xcrb32.reply_control_blk_addr);
1853 xcrb64.reply_data_length = xcrb32.reply_data_length;
1854 xcrb64.reply_data_addr =
1855 compat_ptr(xcrb32.reply_data_addr);
1856 xcrb64.priority_window = xcrb32.priority_window;
1857 xcrb64.status = xcrb32.status;
1859 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
1860 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1862 /* on ENODEV failure: retry once again after a requested rescan */
1863 if (rc == -ENODEV && zcrypt_process_rescan())
1865 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
1866 } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
1867 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1869 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
1870 xcrb32.reply_data_length = xcrb64.reply_data_length;
1871 xcrb32.status = xcrb64.status;
1872 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32)))
1877 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1881 struct ap_perms *perms =
1882 (struct ap_perms *)filp->private_data;
1884 rc = zcrypt_check_ioctl(perms, cmd);
1888 if (cmd == ICARSAMODEXPO)
1889 return trans_modexpo32(perms, filp, cmd, arg);
1890 if (cmd == ICARSACRT)
1891 return trans_modexpo_crt32(perms, filp, cmd, arg);
1892 if (cmd == ZSECSENDCPRB)
1893 return trans_xcrb32(perms, filp, cmd, arg);
1894 return zcrypt_unlocked_ioctl(filp, cmd, arg);
1899 * Misc device file operations.
1901 static const struct file_operations zcrypt_fops = {
1902 .owner = THIS_MODULE,
1903 .read = zcrypt_read,
1904 .write = zcrypt_write,
1905 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1906 #ifdef CONFIG_COMPAT
1907 .compat_ioctl = zcrypt_compat_ioctl,
1909 .open = zcrypt_open,
1910 .release = zcrypt_release,
1916 static struct miscdevice zcrypt_misc_device = {
1917 .minor = MISC_DYNAMIC_MINOR,
1919 .fops = &zcrypt_fops,
1922 static int zcrypt_rng_device_count;
1923 static u32 *zcrypt_rng_buffer;
1924 static int zcrypt_rng_buffer_index;
1925 static DEFINE_MUTEX(zcrypt_rng_mutex);
1927 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
1932 * We don't need locking here because the RNG API guarantees serialized
1933 * read method calls.
1935 if (zcrypt_rng_buffer_index == 0) {
1936 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1937 /* on ENODEV failure: retry once again after an AP bus rescan */
1938 if (rc == -ENODEV && zcrypt_process_rescan())
1939 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
1942 zcrypt_rng_buffer_index = rc / sizeof(*data);
1944 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
1945 return sizeof(*data);
1948 static struct hwrng zcrypt_rng_dev = {
1950 .data_read = zcrypt_rng_data_read,
1954 int zcrypt_rng_device_add(void)
1958 mutex_lock(&zcrypt_rng_mutex);
1959 if (zcrypt_rng_device_count == 0) {
1960 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
1961 if (!zcrypt_rng_buffer) {
1965 zcrypt_rng_buffer_index = 0;
1966 rc = hwrng_register(&zcrypt_rng_dev);
1969 zcrypt_rng_device_count = 1;
1971 zcrypt_rng_device_count++;
1973 mutex_unlock(&zcrypt_rng_mutex);
1977 free_page((unsigned long)zcrypt_rng_buffer);
1979 mutex_unlock(&zcrypt_rng_mutex);
1983 void zcrypt_rng_device_remove(void)
1985 mutex_lock(&zcrypt_rng_mutex);
1986 zcrypt_rng_device_count--;
1987 if (zcrypt_rng_device_count == 0) {
1988 hwrng_unregister(&zcrypt_rng_dev);
1989 free_page((unsigned long)zcrypt_rng_buffer);
1991 mutex_unlock(&zcrypt_rng_mutex);
1995 * Wait until the zcrypt api is operational.
1996 * The AP bus scan and the binding of ap devices to device drivers is
1997 * an asynchronous job. This function waits until these initial jobs
1998 * are done and so the zcrypt api should be ready to serve crypto
1999 * requests - if there are resources available. The function uses an
2000 * internal timeout of 30s. The very first caller will either wait for
2001 * ap bus bindings complete or the timeout happens. This state will be
2002 * remembered for further callers which will only be blocked until a
2003 * decision is made (timeout or bindings complete).
2004 * On timeout -ETIME is returned, on success the return value is 0.
2006 int zcrypt_wait_api_operational(void)
2008 static DEFINE_MUTEX(zcrypt_wait_api_lock);
2009 static int zcrypt_wait_api_state;
2012 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
2016 switch (zcrypt_wait_api_state) {
2018 /* initial state, invoke wait for the ap bus complete */
2019 rc = ap_wait_apqn_bindings_complete(
2020 msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
2023 /* ap bus bindings are complete */
2024 zcrypt_wait_api_state = 1;
2027 /* interrupted, go back to caller */
2031 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
2033 zcrypt_wait_api_state = -ETIME;
2037 pr_debug("ap_wait_init_apqn_bindings_complete()=%d\n", rc);
2042 /* a previous caller already found ap bus bindings complete */
2046 /* a previous caller had timeout or other failure */
2047 rc = zcrypt_wait_api_state;
2051 mutex_unlock(&zcrypt_wait_api_lock);
2055 EXPORT_SYMBOL(zcrypt_wait_api_operational);
2057 int __init zcrypt_debug_init(void)
2059 zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
2060 ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
2061 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
2062 debug_set_level(zcrypt_dbf_info, DBF_ERR);
2067 void zcrypt_debug_exit(void)
2069 debug_unregister(zcrypt_dbf_info);
2072 static int __init zcdn_init(void)
2076 /* create a new class 'zcrypt' */
2077 rc = class_register(&zcrypt_class);
2079 goto out_class_register_failed;
2081 /* alloc device minor range */
2082 rc = alloc_chrdev_region(&zcrypt_devt,
2083 0, ZCRYPT_MAX_MINOR_NODES,
2086 goto out_alloc_chrdev_failed;
2088 cdev_init(&zcrypt_cdev, &zcrypt_fops);
2089 zcrypt_cdev.owner = THIS_MODULE;
2090 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2092 goto out_cdev_add_failed;
2094 /* need some class specific sysfs attributes */
2095 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
2097 goto out_class_create_file_1_failed;
2098 rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
2100 goto out_class_create_file_2_failed;
2104 out_class_create_file_2_failed:
2105 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
2106 out_class_create_file_1_failed:
2107 cdev_del(&zcrypt_cdev);
2108 out_cdev_add_failed:
2109 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2110 out_alloc_chrdev_failed:
2111 class_unregister(&zcrypt_class);
2112 out_class_register_failed:
2116 static void zcdn_exit(void)
2118 class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
2119 class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
2121 cdev_del(&zcrypt_cdev);
2122 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2123 class_unregister(&zcrypt_class);
2127 * zcrypt_api_init(): Module initialization.
2129 * The module initialization code.
2131 int __init zcrypt_api_init(void)
2135 rc = zcrypt_debug_init();
2143 /* Register the request sprayer. */
2144 rc = misc_register(&zcrypt_misc_device);
2146 goto out_misc_register_failed;
2148 zcrypt_msgtype6_init();
2149 zcrypt_msgtype50_init();
2153 out_misc_register_failed:
2155 zcrypt_debug_exit();
2161 * zcrypt_api_exit(): Module termination.
2163 * The module termination code.
2165 void __exit zcrypt_api_exit(void)
2168 misc_deregister(&zcrypt_misc_device);
2169 zcrypt_msgtype6_exit();
2170 zcrypt_msgtype50_exit();
2171 zcrypt_ccamisc_exit();
2172 zcrypt_ep11misc_exit();
2173 zcrypt_debug_exit();
2176 module_init(zcrypt_api_init);
2177 module_exit(zcrypt_api_exit);