1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright IBM Corp. 2001, 2018
4 * Author(s): Robert Burroughs
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
18 #include <linux/miscdevice.h>
20 #include <linux/compat.h>
21 #include <linux/slab.h>
22 #include <linux/atomic.h>
23 #include <linux/uaccess.h>
24 #include <linux/hw_random.h>
25 #include <linux/debugfs.h>
26 #include <linux/cdev.h>
27 #include <linux/ctype.h>
28 #include <linux/capability.h>
29 #include <asm/debug.h>
31 #define CREATE_TRACE_POINTS
32 #include <asm/trace/zcrypt.h>
34 #include "zcrypt_api.h"
35 #include "zcrypt_debug.h"
37 #include "zcrypt_msgtype6.h"
38 #include "zcrypt_msgtype50.h"
39 #include "zcrypt_ccamisc.h"
40 #include "zcrypt_ep11misc.h"
45 MODULE_AUTHOR("IBM Corporation");
46 MODULE_DESCRIPTION("Cryptographic Coprocessor interface, " \
47 "Copyright IBM Corp. 2001, 2012");
48 MODULE_LICENSE("GPL");
51 * zcrypt tracepoint functions
53 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_req);
54 EXPORT_TRACEPOINT_SYMBOL(s390_zcrypt_rep);
56 DEFINE_SPINLOCK(zcrypt_list_lock);
57 LIST_HEAD(zcrypt_card_list);
59 static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
60 static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
62 atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
63 EXPORT_SYMBOL(zcrypt_rescan_req);
65 static LIST_HEAD(zcrypt_ops_list);
67 /* Zcrypt related debug feature stuff. */
68 debug_info_t *zcrypt_dbf_info;
71 * Process a rescan of the transport layer.
73 * Returns 1, if the rescan has been processed, otherwise 0.
75 static inline int zcrypt_process_rescan(void)
77 if (atomic_read(&zcrypt_rescan_req)) {
78 atomic_set(&zcrypt_rescan_req, 0);
79 atomic_inc(&zcrypt_rescan_count);
80 ap_bus_force_rescan();
81 ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__,
82 atomic_inc_return(&zcrypt_rescan_count));
88 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
90 list_add_tail(&zops->list, &zcrypt_ops_list);
93 void zcrypt_msgtype_unregister(struct zcrypt_ops *zops)
95 list_del_init(&zops->list);
98 struct zcrypt_ops *zcrypt_msgtype(unsigned char *name, int variant)
100 struct zcrypt_ops *zops;
102 list_for_each_entry(zops, &zcrypt_ops_list, list)
103 if (zops->variant == variant &&
104 (!strncmp(zops->name, name, sizeof(zops->name))))
108 EXPORT_SYMBOL(zcrypt_msgtype);
111 * Multi device nodes extension functions.
114 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
118 static struct class *zcrypt_class;
119 static dev_t zcrypt_devt;
120 static struct cdev zcrypt_cdev;
123 struct device device;
124 struct ap_perms perms;
127 #define to_zcdn_dev(x) container_of((x), struct zcdn_device, device)
129 #define ZCDN_MAX_NAME 32
131 static int zcdn_create(const char *name);
132 static int zcdn_destroy(const char *name);
135 * Find zcdn device by name.
136 * Returns reference to the zcdn device which needs to be released
137 * with put_device() after use.
139 static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
141 struct device *dev = class_find_device_by_name(zcrypt_class, name);
143 return dev ? to_zcdn_dev(dev) : NULL;
147 * Find zcdn device by devt value.
148 * Returns reference to the zcdn device which needs to be released
149 * with put_device() after use.
151 static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
153 struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
155 return dev ? to_zcdn_dev(dev) : NULL;
158 static ssize_t ioctlmask_show(struct device *dev,
159 struct device_attribute *attr,
162 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
165 if (mutex_lock_interruptible(&ap_perms_mutex))
168 n = sysfs_emit(buf, "0x");
169 for (i = 0; i < sizeof(zcdndev->perms.ioctlm) / sizeof(long); i++)
170 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.ioctlm[i]);
171 n += sysfs_emit_at(buf, n, "\n");
173 mutex_unlock(&ap_perms_mutex);
178 static ssize_t ioctlmask_store(struct device *dev,
179 struct device_attribute *attr,
180 const char *buf, size_t count)
183 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
185 rc = ap_parse_mask_str(buf, zcdndev->perms.ioctlm,
186 AP_IOCTLS, &ap_perms_mutex);
193 static DEVICE_ATTR_RW(ioctlmask);
195 static ssize_t apmask_show(struct device *dev,
196 struct device_attribute *attr,
199 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
202 if (mutex_lock_interruptible(&ap_perms_mutex))
205 n = sysfs_emit(buf, "0x");
206 for (i = 0; i < sizeof(zcdndev->perms.apm) / sizeof(long); i++)
207 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.apm[i]);
208 n += sysfs_emit_at(buf, n, "\n");
210 mutex_unlock(&ap_perms_mutex);
215 static ssize_t apmask_store(struct device *dev,
216 struct device_attribute *attr,
217 const char *buf, size_t count)
220 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
222 rc = ap_parse_mask_str(buf, zcdndev->perms.apm,
223 AP_DEVICES, &ap_perms_mutex);
230 static DEVICE_ATTR_RW(apmask);
232 static ssize_t aqmask_show(struct device *dev,
233 struct device_attribute *attr,
236 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
239 if (mutex_lock_interruptible(&ap_perms_mutex))
242 n = sysfs_emit(buf, "0x");
243 for (i = 0; i < sizeof(zcdndev->perms.aqm) / sizeof(long); i++)
244 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.aqm[i]);
245 n += sysfs_emit_at(buf, n, "\n");
247 mutex_unlock(&ap_perms_mutex);
252 static ssize_t aqmask_store(struct device *dev,
253 struct device_attribute *attr,
254 const char *buf, size_t count)
257 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
259 rc = ap_parse_mask_str(buf, zcdndev->perms.aqm,
260 AP_DOMAINS, &ap_perms_mutex);
267 static DEVICE_ATTR_RW(aqmask);
269 static ssize_t admask_show(struct device *dev,
270 struct device_attribute *attr,
273 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
276 if (mutex_lock_interruptible(&ap_perms_mutex))
279 n = sysfs_emit(buf, "0x");
280 for (i = 0; i < sizeof(zcdndev->perms.adm) / sizeof(long); i++)
281 n += sysfs_emit_at(buf, n, "%016lx", zcdndev->perms.adm[i]);
282 n += sysfs_emit_at(buf, n, "\n");
284 mutex_unlock(&ap_perms_mutex);
289 static ssize_t admask_store(struct device *dev,
290 struct device_attribute *attr,
291 const char *buf, size_t count)
294 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
296 rc = ap_parse_mask_str(buf, zcdndev->perms.adm,
297 AP_DOMAINS, &ap_perms_mutex);
304 static DEVICE_ATTR_RW(admask);
306 static struct attribute *zcdn_dev_attrs[] = {
307 &dev_attr_ioctlmask.attr,
308 &dev_attr_apmask.attr,
309 &dev_attr_aqmask.attr,
310 &dev_attr_admask.attr,
314 static struct attribute_group zcdn_dev_attr_group = {
315 .attrs = zcdn_dev_attrs
318 static const struct attribute_group *zcdn_dev_attr_groups[] = {
319 &zcdn_dev_attr_group,
323 static ssize_t zcdn_create_store(const struct class *class,
324 const struct class_attribute *attr,
325 const char *buf, size_t count)
328 char name[ZCDN_MAX_NAME];
330 strscpy(name, skip_spaces(buf), sizeof(name));
332 rc = zcdn_create(strim(name));
334 return rc ? rc : count;
337 static const struct class_attribute class_attr_zcdn_create =
338 __ATTR(create, 0600, NULL, zcdn_create_store);
340 static ssize_t zcdn_destroy_store(const struct class *class,
341 const struct class_attribute *attr,
342 const char *buf, size_t count)
345 char name[ZCDN_MAX_NAME];
347 strscpy(name, skip_spaces(buf), sizeof(name));
349 rc = zcdn_destroy(strim(name));
351 return rc ? rc : count;
354 static const struct class_attribute class_attr_zcdn_destroy =
355 __ATTR(destroy, 0600, NULL, zcdn_destroy_store);
357 static void zcdn_device_release(struct device *dev)
359 struct zcdn_device *zcdndev = to_zcdn_dev(dev);
361 ZCRYPT_DBF_INFO("%s releasing zcdn device %d:%d\n",
362 __func__, MAJOR(dev->devt), MINOR(dev->devt));
367 static int zcdn_create(const char *name)
371 char nodename[ZCDN_MAX_NAME];
372 struct zcdn_device *zcdndev;
374 if (mutex_lock_interruptible(&ap_perms_mutex))
377 /* check if device node with this name already exists */
379 zcdndev = find_zcdndev_by_name(name);
381 put_device(&zcdndev->device);
387 /* find an unused minor number */
388 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
389 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
390 zcdndev = find_zcdndev_by_devt(devt);
392 put_device(&zcdndev->device);
396 if (i == ZCRYPT_MAX_MINOR_NODES) {
401 /* alloc and prepare a new zcdn device */
402 zcdndev = kzalloc(sizeof(*zcdndev), GFP_KERNEL);
407 zcdndev->device.release = zcdn_device_release;
408 zcdndev->device.class = zcrypt_class;
409 zcdndev->device.devt = devt;
410 zcdndev->device.groups = zcdn_dev_attr_groups;
412 strncpy(nodename, name, sizeof(nodename));
414 snprintf(nodename, sizeof(nodename),
415 ZCRYPT_NAME "_%d", (int)MINOR(devt));
416 nodename[sizeof(nodename) - 1] = '\0';
417 if (dev_set_name(&zcdndev->device, nodename)) {
421 rc = device_register(&zcdndev->device);
423 put_device(&zcdndev->device);
427 ZCRYPT_DBF_INFO("%s created zcdn device %d:%d\n",
428 __func__, MAJOR(devt), MINOR(devt));
431 mutex_unlock(&ap_perms_mutex);
435 static int zcdn_destroy(const char *name)
438 struct zcdn_device *zcdndev;
440 if (mutex_lock_interruptible(&ap_perms_mutex))
443 /* try to find this zcdn device */
444 zcdndev = find_zcdndev_by_name(name);
451 * The zcdn device is not hard destroyed. It is subject to
452 * reference counting and thus just needs to be unregistered.
454 put_device(&zcdndev->device);
455 device_unregister(&zcdndev->device);
458 mutex_unlock(&ap_perms_mutex);
462 static void zcdn_destroy_all(void)
466 struct zcdn_device *zcdndev;
468 mutex_lock(&ap_perms_mutex);
469 for (i = 0; i < ZCRYPT_MAX_MINOR_NODES; i++) {
470 devt = MKDEV(MAJOR(zcrypt_devt), MINOR(zcrypt_devt) + i);
471 zcdndev = find_zcdndev_by_devt(devt);
473 put_device(&zcdndev->device);
474 device_unregister(&zcdndev->device);
477 mutex_unlock(&ap_perms_mutex);
483 * zcrypt_read (): Not supported beyond zcrypt 1.3.1.
485 * This function is not supported beyond zcrypt 1.3.1.
487 static ssize_t zcrypt_read(struct file *filp, char __user *buf,
488 size_t count, loff_t *f_pos)
494 * zcrypt_write(): Not allowed.
496 * Write is not allowed
498 static ssize_t zcrypt_write(struct file *filp, const char __user *buf,
499 size_t count, loff_t *f_pos)
505 * zcrypt_open(): Count number of users.
507 * Device open function to count number of users.
509 static int zcrypt_open(struct inode *inode, struct file *filp)
511 struct ap_perms *perms = &ap_perms;
513 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
514 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
515 struct zcdn_device *zcdndev;
517 if (mutex_lock_interruptible(&ap_perms_mutex))
519 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
520 /* find returns a reference, no get_device() needed */
521 mutex_unlock(&ap_perms_mutex);
523 perms = &zcdndev->perms;
526 filp->private_data = (void *)perms;
528 atomic_inc(&zcrypt_open_count);
529 return stream_open(inode, filp);
533 * zcrypt_release(): Count number of users.
535 * Device close function to count number of users.
537 static int zcrypt_release(struct inode *inode, struct file *filp)
539 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
540 if (filp->f_inode->i_cdev == &zcrypt_cdev) {
541 struct zcdn_device *zcdndev;
543 mutex_lock(&ap_perms_mutex);
544 zcdndev = find_zcdndev_by_devt(filp->f_inode->i_rdev);
545 mutex_unlock(&ap_perms_mutex);
547 /* 2 puts here: one for find, one for open */
548 put_device(&zcdndev->device);
549 put_device(&zcdndev->device);
554 atomic_dec(&zcrypt_open_count);
558 static inline int zcrypt_check_ioctl(struct ap_perms *perms,
562 int ioctlnr = (cmd & _IOC_NRMASK) >> _IOC_NRSHIFT;
564 if (ioctlnr > 0 && ioctlnr < AP_IOCTLS) {
565 if (test_bit_inv(ioctlnr, perms->ioctlm))
570 ZCRYPT_DBF_WARN("%s ioctl check failed: ioctlnr=0x%04x rc=%d\n",
571 __func__, ioctlnr, rc);
576 static inline bool zcrypt_check_card(struct ap_perms *perms, int card)
578 return test_bit_inv(card, perms->apm) ? true : false;
581 static inline bool zcrypt_check_queue(struct ap_perms *perms, int queue)
583 return test_bit_inv(queue, perms->aqm) ? true : false;
586 static inline struct zcrypt_queue *zcrypt_pick_queue(struct zcrypt_card *zc,
587 struct zcrypt_queue *zq,
588 struct module **pmod,
591 if (!zq || !try_module_get(zq->queue->ap_dev.device.driver->owner))
593 zcrypt_queue_get(zq);
594 get_device(&zq->queue->ap_dev.device);
595 atomic_add(weight, &zc->load);
596 atomic_add(weight, &zq->load);
598 *pmod = zq->queue->ap_dev.device.driver->owner;
602 static inline void zcrypt_drop_queue(struct zcrypt_card *zc,
603 struct zcrypt_queue *zq,
608 atomic_sub(weight, &zc->load);
609 atomic_sub(weight, &zq->load);
610 put_device(&zq->queue->ap_dev.device);
611 zcrypt_queue_put(zq);
615 static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
616 struct zcrypt_card *pref_zc,
618 unsigned int pref_weight)
622 weight += atomic_read(&zc->load);
623 pref_weight += atomic_read(&pref_zc->load);
624 if (weight == pref_weight)
625 return atomic64_read(&zc->card->total_request_count) <
626 atomic64_read(&pref_zc->card->total_request_count);
627 return weight < pref_weight;
630 static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
631 struct zcrypt_queue *pref_zq,
633 unsigned int pref_weight)
637 weight += atomic_read(&zq->load);
638 pref_weight += atomic_read(&pref_zq->load);
639 if (weight == pref_weight)
640 return zq->queue->total_request_count <
641 pref_zq->queue->total_request_count;
642 return weight < pref_weight;
648 static long zcrypt_rsa_modexpo(struct ap_perms *perms,
649 struct zcrypt_track *tr,
650 struct ica_rsa_modexpo *mex)
652 struct zcrypt_card *zc, *pref_zc;
653 struct zcrypt_queue *zq, *pref_zq;
654 struct ap_message ap_msg;
655 unsigned int wgt = 0, pref_wgt = 0;
656 unsigned int func_code;
657 int cpen, qpen, qid = 0, rc = -ENODEV;
660 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
662 ap_init_message(&ap_msg);
664 #ifdef CONFIG_ZCRYPT_DEBUG
665 if (tr && tr->fi.cmd)
666 ap_msg.fi.cmd = tr->fi.cmd;
669 if (mex->outputdatalength < mex->inputdatalength) {
676 * As long as outputdatalength is big enough, we can set the
677 * outputdatalength equal to the inputdatalength, since that is the
678 * number of bytes we will copy in any case
680 mex->outputdatalength = mex->inputdatalength;
682 rc = get_rsa_modex_fc(mex, &func_code);
688 spin_lock(&zcrypt_list_lock);
689 for_each_zcrypt_card(zc) {
690 /* Check for usable accelarator or CCA card */
691 if (!zc->online || !zc->card->config || zc->card->chkstop ||
692 !(zc->card->functions & 0x18000000))
694 /* Check for size limits */
695 if (zc->min_mod_size > mex->inputdatalength ||
696 zc->max_mod_size < mex->inputdatalength)
698 /* check if device node has admission for this card */
699 if (!zcrypt_check_card(perms, zc->card->id))
701 /* get weight index of the card device */
702 wgt = zc->speed_rating[func_code];
703 /* penalty if this msg was previously sent via this card */
704 cpen = (tr && tr->again_counter && tr->last_qid &&
705 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
706 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
707 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
709 for_each_zcrypt_queue(zq, zc) {
710 /* check if device is usable and eligible */
711 if (!zq->online || !zq->ops->rsa_modexpo ||
712 !zq->queue->config || zq->queue->chkstop)
714 /* check if device node has admission for this queue */
715 if (!zcrypt_check_queue(perms,
716 AP_QID_QUEUE(zq->queue->qid)))
718 /* penalty if the msg was previously sent at this qid */
719 qpen = (tr && tr->again_counter && tr->last_qid &&
720 tr->last_qid == zq->queue->qid) ?
721 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
722 if (!zcrypt_queue_compare(zq, pref_zq,
723 wgt + cpen + qpen, pref_wgt))
727 pref_wgt = wgt + cpen + qpen;
730 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
731 spin_unlock(&zcrypt_list_lock);
734 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
740 qid = pref_zq->queue->qid;
741 rc = pref_zq->ops->rsa_modexpo(pref_zq, mex, &ap_msg);
743 spin_lock(&zcrypt_list_lock);
744 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
745 spin_unlock(&zcrypt_list_lock);
748 ap_release_message(&ap_msg);
753 trace_s390_zcrypt_rep(mex, func_code, rc,
754 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
758 static long zcrypt_rsa_crt(struct ap_perms *perms,
759 struct zcrypt_track *tr,
760 struct ica_rsa_modexpo_crt *crt)
762 struct zcrypt_card *zc, *pref_zc;
763 struct zcrypt_queue *zq, *pref_zq;
764 struct ap_message ap_msg;
765 unsigned int wgt = 0, pref_wgt = 0;
766 unsigned int func_code;
767 int cpen, qpen, qid = 0, rc = -ENODEV;
770 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
772 ap_init_message(&ap_msg);
774 #ifdef CONFIG_ZCRYPT_DEBUG
775 if (tr && tr->fi.cmd)
776 ap_msg.fi.cmd = tr->fi.cmd;
779 if (crt->outputdatalength < crt->inputdatalength) {
786 * As long as outputdatalength is big enough, we can set the
787 * outputdatalength equal to the inputdatalength, since that is the
788 * number of bytes we will copy in any case
790 crt->outputdatalength = crt->inputdatalength;
792 rc = get_rsa_crt_fc(crt, &func_code);
798 spin_lock(&zcrypt_list_lock);
799 for_each_zcrypt_card(zc) {
800 /* Check for usable accelarator or CCA card */
801 if (!zc->online || !zc->card->config || zc->card->chkstop ||
802 !(zc->card->functions & 0x18000000))
804 /* Check for size limits */
805 if (zc->min_mod_size > crt->inputdatalength ||
806 zc->max_mod_size < crt->inputdatalength)
808 /* check if device node has admission for this card */
809 if (!zcrypt_check_card(perms, zc->card->id))
811 /* get weight index of the card device */
812 wgt = zc->speed_rating[func_code];
813 /* penalty if this msg was previously sent via this card */
814 cpen = (tr && tr->again_counter && tr->last_qid &&
815 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
816 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
817 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
819 for_each_zcrypt_queue(zq, zc) {
820 /* check if device is usable and eligible */
821 if (!zq->online || !zq->ops->rsa_modexpo_crt ||
822 !zq->queue->config || zq->queue->chkstop)
824 /* check if device node has admission for this queue */
825 if (!zcrypt_check_queue(perms,
826 AP_QID_QUEUE(zq->queue->qid)))
828 /* penalty if the msg was previously sent at this qid */
829 qpen = (tr && tr->again_counter && tr->last_qid &&
830 tr->last_qid == zq->queue->qid) ?
831 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
832 if (!zcrypt_queue_compare(zq, pref_zq,
833 wgt + cpen + qpen, pref_wgt))
837 pref_wgt = wgt + cpen + qpen;
840 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
841 spin_unlock(&zcrypt_list_lock);
844 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
850 qid = pref_zq->queue->qid;
851 rc = pref_zq->ops->rsa_modexpo_crt(pref_zq, crt, &ap_msg);
853 spin_lock(&zcrypt_list_lock);
854 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
855 spin_unlock(&zcrypt_list_lock);
858 ap_release_message(&ap_msg);
863 trace_s390_zcrypt_rep(crt, func_code, rc,
864 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
868 static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
869 struct zcrypt_track *tr,
870 struct ica_xcRB *xcrb)
872 struct zcrypt_card *zc, *pref_zc;
873 struct zcrypt_queue *zq, *pref_zq;
874 struct ap_message ap_msg;
875 unsigned int wgt = 0, pref_wgt = 0;
876 unsigned int func_code;
877 unsigned short *domain, tdom;
878 int cpen, qpen, qid = 0, rc = -ENODEV;
881 trace_s390_zcrypt_req(xcrb, TB_ZSECSENDCPRB);
884 ap_init_message(&ap_msg);
886 #ifdef CONFIG_ZCRYPT_DEBUG
887 if (tr && tr->fi.cmd)
888 ap_msg.fi.cmd = tr->fi.cmd;
889 if (tr && tr->fi.action == AP_FI_ACTION_CCA_AGENT_FF) {
890 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid agent_ID 'FF'\n",
891 __func__, tr->fi.cmd);
892 xcrb->agent_ID = 0x4646;
896 rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
901 if (perms != &ap_perms && tdom < AP_DOMAINS) {
902 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
903 if (!test_bit_inv(tdom, perms->adm)) {
907 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
913 * If a valid target domain is set and this domain is NOT a usage
914 * domain but a control only domain, autoselect target domain.
916 if (tdom < AP_DOMAINS &&
917 !ap_test_config_usage_domain(tdom) &&
918 ap_test_config_ctrl_domain(tdom))
923 spin_lock(&zcrypt_list_lock);
924 for_each_zcrypt_card(zc) {
925 /* Check for usable CCA card */
926 if (!zc->online || !zc->card->config || zc->card->chkstop ||
927 !(zc->card->functions & 0x10000000))
929 /* Check for user selected CCA card */
930 if (xcrb->user_defined != AUTOSELECT &&
931 xcrb->user_defined != zc->card->id)
933 /* check if request size exceeds card max msg size */
934 if (ap_msg.len > zc->card->maxmsgsize)
936 /* check if device node has admission for this card */
937 if (!zcrypt_check_card(perms, zc->card->id))
939 /* get weight index of the card device */
940 wgt = speed_idx_cca(func_code) * zc->speed_rating[SECKEY];
941 /* penalty if this msg was previously sent via this card */
942 cpen = (tr && tr->again_counter && tr->last_qid &&
943 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
944 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
945 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
947 for_each_zcrypt_queue(zq, zc) {
948 /* check for device usable and eligible */
949 if (!zq->online || !zq->ops->send_cprb ||
950 !zq->queue->config || zq->queue->chkstop ||
951 (tdom != AUTOSEL_DOM &&
952 tdom != AP_QID_QUEUE(zq->queue->qid)))
954 /* check if device node has admission for this queue */
955 if (!zcrypt_check_queue(perms,
956 AP_QID_QUEUE(zq->queue->qid)))
958 /* penalty if the msg was previously sent at this qid */
959 qpen = (tr && tr->again_counter && tr->last_qid &&
960 tr->last_qid == zq->queue->qid) ?
961 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
962 if (!zcrypt_queue_compare(zq, pref_zq,
963 wgt + cpen + qpen, pref_wgt))
967 pref_wgt = wgt + cpen + qpen;
970 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
971 spin_unlock(&zcrypt_list_lock);
974 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
975 __func__, xcrb->user_defined, *domain);
980 /* in case of auto select, provide the correct domain */
981 qid = pref_zq->queue->qid;
982 if (*domain == AUTOSEL_DOM)
983 *domain = AP_QID_QUEUE(qid);
985 #ifdef CONFIG_ZCRYPT_DEBUG
986 if (tr && tr->fi.action == AP_FI_ACTION_CCA_DOM_INVAL) {
987 ZCRYPT_DBF_WARN("%s fi cmd 0x%04x: forcing invalid domain\n",
988 __func__, tr->fi.cmd);
993 rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
995 spin_lock(&zcrypt_list_lock);
996 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
997 spin_unlock(&zcrypt_list_lock);
1000 ap_release_message(&ap_msg);
1005 trace_s390_zcrypt_rep(xcrb, func_code, rc,
1006 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1010 long zcrypt_send_cprb(struct ica_xcRB *xcrb)
1012 return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb);
1014 EXPORT_SYMBOL(zcrypt_send_cprb);
1016 static bool is_desired_ep11_card(unsigned int dev_id,
1017 unsigned short target_num,
1018 struct ep11_target_dev *targets)
1020 while (target_num-- > 0) {
1021 if (targets->ap_id == dev_id || targets->ap_id == AUTOSEL_AP)
1028 static bool is_desired_ep11_queue(unsigned int dev_qid,
1029 unsigned short target_num,
1030 struct ep11_target_dev *targets)
1032 int card = AP_QID_CARD(dev_qid), dom = AP_QID_QUEUE(dev_qid);
1034 while (target_num-- > 0) {
1035 if ((targets->ap_id == card || targets->ap_id == AUTOSEL_AP) &&
1036 (targets->dom_id == dom || targets->dom_id == AUTOSEL_DOM))
1043 static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
1044 struct zcrypt_track *tr,
1045 struct ep11_urb *xcrb)
1047 struct zcrypt_card *zc, *pref_zc;
1048 struct zcrypt_queue *zq, *pref_zq;
1049 struct ep11_target_dev *targets;
1050 unsigned short target_num;
1051 unsigned int wgt = 0, pref_wgt = 0;
1052 unsigned int func_code, domain;
1053 struct ap_message ap_msg;
1054 int cpen, qpen, qid = 0, rc = -ENODEV;
1057 trace_s390_zcrypt_req(xcrb, TP_ZSENDEP11CPRB);
1059 ap_init_message(&ap_msg);
1061 #ifdef CONFIG_ZCRYPT_DEBUG
1062 if (tr && tr->fi.cmd)
1063 ap_msg.fi.cmd = tr->fi.cmd;
1066 target_num = (unsigned short)xcrb->targets_num;
1068 /* empty list indicates autoselect (all available targets) */
1070 if (target_num != 0) {
1071 struct ep11_target_dev __user *uptr;
1073 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
1080 uptr = (struct ep11_target_dev __force __user *)xcrb->targets;
1081 if (z_copy_from_user(userspace, targets, uptr,
1082 target_num * sizeof(*targets))) {
1089 rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
1093 if (perms != &ap_perms && domain < AUTOSEL_DOM) {
1094 if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
1095 if (!test_bit_inv(domain, perms->adm)) {
1099 } else if ((ap_msg.flags & AP_MSG_FLAG_USAGE) == 0) {
1107 spin_lock(&zcrypt_list_lock);
1108 for_each_zcrypt_card(zc) {
1109 /* Check for usable EP11 card */
1110 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1111 !(zc->card->functions & 0x04000000))
1113 /* Check for user selected EP11 card */
1115 !is_desired_ep11_card(zc->card->id, target_num, targets))
1117 /* check if request size exceeds card max msg size */
1118 if (ap_msg.len > zc->card->maxmsgsize)
1120 /* check if device node has admission for this card */
1121 if (!zcrypt_check_card(perms, zc->card->id))
1123 /* get weight index of the card device */
1124 wgt = speed_idx_ep11(func_code) * zc->speed_rating[SECKEY];
1125 /* penalty if this msg was previously sent via this card */
1126 cpen = (tr && tr->again_counter && tr->last_qid &&
1127 AP_QID_CARD(tr->last_qid) == zc->card->id) ?
1128 TRACK_AGAIN_CARD_WEIGHT_PENALTY : 0;
1129 if (!zcrypt_card_compare(zc, pref_zc, wgt + cpen, pref_wgt))
1131 for_each_zcrypt_queue(zq, zc) {
1132 /* check if device is usable and eligible */
1133 if (!zq->online || !zq->ops->send_ep11_cprb ||
1134 !zq->queue->config || zq->queue->chkstop ||
1136 !is_desired_ep11_queue(zq->queue->qid,
1137 target_num, targets)))
1139 /* check if device node has admission for this queue */
1140 if (!zcrypt_check_queue(perms,
1141 AP_QID_QUEUE(zq->queue->qid)))
1143 /* penalty if the msg was previously sent at this qid */
1144 qpen = (tr && tr->again_counter && tr->last_qid &&
1145 tr->last_qid == zq->queue->qid) ?
1146 TRACK_AGAIN_QUEUE_WEIGHT_PENALTY : 0;
1147 if (!zcrypt_queue_compare(zq, pref_zq,
1148 wgt + cpen + qpen, pref_wgt))
1152 pref_wgt = wgt + cpen + qpen;
1155 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1156 spin_unlock(&zcrypt_list_lock);
1159 if (targets && target_num == 1) {
1160 ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
1161 __func__, (int)targets->ap_id,
1162 (int)targets->dom_id);
1163 } else if (targets) {
1164 ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n",
1165 __func__, (int)target_num);
1167 ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n",
1174 qid = pref_zq->queue->qid;
1175 rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
1177 spin_lock(&zcrypt_list_lock);
1178 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1179 spin_unlock(&zcrypt_list_lock);
1184 ap_release_message(&ap_msg);
1189 trace_s390_zcrypt_rep(xcrb, func_code, rc,
1190 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1194 long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
1196 return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
1198 EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
1200 static long zcrypt_rng(char *buffer)
1202 struct zcrypt_card *zc, *pref_zc;
1203 struct zcrypt_queue *zq, *pref_zq;
1204 unsigned int wgt = 0, pref_wgt = 0;
1205 unsigned int func_code;
1206 struct ap_message ap_msg;
1207 unsigned int domain;
1208 int qid = 0, rc = -ENODEV;
1211 trace_s390_zcrypt_req(buffer, TP_HWRNGCPRB);
1213 ap_init_message(&ap_msg);
1214 rc = prep_rng_ap_msg(&ap_msg, &func_code, &domain);
1220 spin_lock(&zcrypt_list_lock);
1221 for_each_zcrypt_card(zc) {
1222 /* Check for usable CCA card */
1223 if (!zc->online || !zc->card->config || zc->card->chkstop ||
1224 !(zc->card->functions & 0x10000000))
1226 /* get weight index of the card device */
1227 wgt = zc->speed_rating[func_code];
1228 if (!zcrypt_card_compare(zc, pref_zc, wgt, pref_wgt))
1230 for_each_zcrypt_queue(zq, zc) {
1231 /* check if device is usable and eligible */
1232 if (!zq->online || !zq->ops->rng ||
1233 !zq->queue->config || zq->queue->chkstop)
1235 if (!zcrypt_queue_compare(zq, pref_zq, wgt, pref_wgt))
1242 pref_zq = zcrypt_pick_queue(pref_zc, pref_zq, &mod, wgt);
1243 spin_unlock(&zcrypt_list_lock);
1246 ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
1252 qid = pref_zq->queue->qid;
1253 rc = pref_zq->ops->rng(pref_zq, buffer, &ap_msg);
1255 spin_lock(&zcrypt_list_lock);
1256 zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
1257 spin_unlock(&zcrypt_list_lock);
1260 ap_release_message(&ap_msg);
1261 trace_s390_zcrypt_rep(buffer, func_code, rc,
1262 AP_QID_CARD(qid), AP_QID_QUEUE(qid));
1266 static void zcrypt_device_status_mask(struct zcrypt_device_status *devstatus)
1268 struct zcrypt_card *zc;
1269 struct zcrypt_queue *zq;
1270 struct zcrypt_device_status *stat;
1273 memset(devstatus, 0, MAX_ZDEV_ENTRIES
1274 * sizeof(struct zcrypt_device_status));
1276 spin_lock(&zcrypt_list_lock);
1277 for_each_zcrypt_card(zc) {
1278 for_each_zcrypt_queue(zq, zc) {
1279 card = AP_QID_CARD(zq->queue->qid);
1280 if (card >= MAX_ZDEV_CARDIDS)
1282 queue = AP_QID_QUEUE(zq->queue->qid);
1283 stat = &devstatus[card * AP_DOMAINS + queue];
1284 stat->hwtype = zc->card->ap_dev.device_type;
1285 stat->functions = zc->card->functions >> 26;
1286 stat->qid = zq->queue->qid;
1287 stat->online = zq->online ? 0x01 : 0x00;
1290 spin_unlock(&zcrypt_list_lock);
1293 void zcrypt_device_status_mask_ext(struct zcrypt_device_status_ext *devstatus)
1295 struct zcrypt_card *zc;
1296 struct zcrypt_queue *zq;
1297 struct zcrypt_device_status_ext *stat;
1300 memset(devstatus, 0, MAX_ZDEV_ENTRIES_EXT
1301 * sizeof(struct zcrypt_device_status_ext));
1303 spin_lock(&zcrypt_list_lock);
1304 for_each_zcrypt_card(zc) {
1305 for_each_zcrypt_queue(zq, zc) {
1306 card = AP_QID_CARD(zq->queue->qid);
1307 queue = AP_QID_QUEUE(zq->queue->qid);
1308 stat = &devstatus[card * AP_DOMAINS + queue];
1309 stat->hwtype = zc->card->ap_dev.device_type;
1310 stat->functions = zc->card->functions >> 26;
1311 stat->qid = zq->queue->qid;
1312 stat->online = zq->online ? 0x01 : 0x00;
1315 spin_unlock(&zcrypt_list_lock);
1317 EXPORT_SYMBOL(zcrypt_device_status_mask_ext);
1319 int zcrypt_device_status_ext(int card, int queue,
1320 struct zcrypt_device_status_ext *devstat)
1322 struct zcrypt_card *zc;
1323 struct zcrypt_queue *zq;
1325 memset(devstat, 0, sizeof(*devstat));
1327 spin_lock(&zcrypt_list_lock);
1328 for_each_zcrypt_card(zc) {
1329 for_each_zcrypt_queue(zq, zc) {
1330 if (card == AP_QID_CARD(zq->queue->qid) &&
1331 queue == AP_QID_QUEUE(zq->queue->qid)) {
1332 devstat->hwtype = zc->card->ap_dev.device_type;
1333 devstat->functions = zc->card->functions >> 26;
1334 devstat->qid = zq->queue->qid;
1335 devstat->online = zq->online ? 0x01 : 0x00;
1336 spin_unlock(&zcrypt_list_lock);
1341 spin_unlock(&zcrypt_list_lock);
1345 EXPORT_SYMBOL(zcrypt_device_status_ext);
1347 static void zcrypt_status_mask(char status[], size_t max_adapters)
1349 struct zcrypt_card *zc;
1350 struct zcrypt_queue *zq;
1353 memset(status, 0, max_adapters);
1354 spin_lock(&zcrypt_list_lock);
1355 for_each_zcrypt_card(zc) {
1356 for_each_zcrypt_queue(zq, zc) {
1357 card = AP_QID_CARD(zq->queue->qid);
1358 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1359 card >= max_adapters)
1361 status[card] = zc->online ? zc->user_space_type : 0x0d;
1364 spin_unlock(&zcrypt_list_lock);
1367 static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
1369 struct zcrypt_card *zc;
1370 struct zcrypt_queue *zq;
1373 memset(qdepth, 0, max_adapters);
1374 spin_lock(&zcrypt_list_lock);
1376 for_each_zcrypt_card(zc) {
1377 for_each_zcrypt_queue(zq, zc) {
1378 card = AP_QID_CARD(zq->queue->qid);
1379 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1380 card >= max_adapters)
1382 spin_lock(&zq->queue->lock);
1384 zq->queue->pendingq_count +
1385 zq->queue->requestq_count;
1386 spin_unlock(&zq->queue->lock);
1390 spin_unlock(&zcrypt_list_lock);
1393 static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
1395 struct zcrypt_card *zc;
1396 struct zcrypt_queue *zq;
1400 memset(reqcnt, 0, sizeof(int) * max_adapters);
1401 spin_lock(&zcrypt_list_lock);
1403 for_each_zcrypt_card(zc) {
1404 for_each_zcrypt_queue(zq, zc) {
1405 card = AP_QID_CARD(zq->queue->qid);
1406 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index ||
1407 card >= max_adapters)
1409 spin_lock(&zq->queue->lock);
1410 cnt = zq->queue->total_request_count;
1411 spin_unlock(&zq->queue->lock);
1412 reqcnt[card] = (cnt < UINT_MAX) ? (u32)cnt : UINT_MAX;
1416 spin_unlock(&zcrypt_list_lock);
1419 static int zcrypt_pendingq_count(void)
1421 struct zcrypt_card *zc;
1422 struct zcrypt_queue *zq;
1426 spin_lock(&zcrypt_list_lock);
1428 for_each_zcrypt_card(zc) {
1429 for_each_zcrypt_queue(zq, zc) {
1430 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1432 spin_lock(&zq->queue->lock);
1433 pendingq_count += zq->queue->pendingq_count;
1434 spin_unlock(&zq->queue->lock);
1438 spin_unlock(&zcrypt_list_lock);
1439 return pendingq_count;
1442 static int zcrypt_requestq_count(void)
1444 struct zcrypt_card *zc;
1445 struct zcrypt_queue *zq;
1449 spin_lock(&zcrypt_list_lock);
1451 for_each_zcrypt_card(zc) {
1452 for_each_zcrypt_queue(zq, zc) {
1453 if (AP_QID_QUEUE(zq->queue->qid) != ap_domain_index)
1455 spin_lock(&zq->queue->lock);
1456 requestq_count += zq->queue->requestq_count;
1457 spin_unlock(&zq->queue->lock);
1461 spin_unlock(&zcrypt_list_lock);
1462 return requestq_count;
1465 static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
1468 struct zcrypt_track tr;
1469 struct ica_rsa_modexpo mex;
1470 struct ica_rsa_modexpo __user *umex = (void __user *)arg;
1472 memset(&tr, 0, sizeof(tr));
1473 if (copy_from_user(&mex, umex, sizeof(mex)))
1476 #ifdef CONFIG_ZCRYPT_DEBUG
1477 if (mex.inputdatalength & (1U << 31)) {
1478 if (!capable(CAP_SYS_ADMIN))
1480 tr.fi.cmd = (u16)(mex.inputdatalength >> 16);
1482 mex.inputdatalength &= 0x0000FFFF;
1486 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1489 #ifdef CONFIG_ZCRYPT_DEBUG
1490 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1493 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1494 /* on failure: retry once again after a requested rescan */
1495 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1497 rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
1500 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1501 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1504 ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc);
1507 return put_user(mex.outputdatalength, &umex->outputdatalength);
1510 static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
1513 struct zcrypt_track tr;
1514 struct ica_rsa_modexpo_crt crt;
1515 struct ica_rsa_modexpo_crt __user *ucrt = (void __user *)arg;
1517 memset(&tr, 0, sizeof(tr));
1518 if (copy_from_user(&crt, ucrt, sizeof(crt)))
1521 #ifdef CONFIG_ZCRYPT_DEBUG
1522 if (crt.inputdatalength & (1U << 31)) {
1523 if (!capable(CAP_SYS_ADMIN))
1525 tr.fi.cmd = (u16)(crt.inputdatalength >> 16);
1527 crt.inputdatalength &= 0x0000FFFF;
1531 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1534 #ifdef CONFIG_ZCRYPT_DEBUG
1535 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1538 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1539 /* on failure: retry once again after a requested rescan */
1540 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1542 rc = zcrypt_rsa_crt(perms, &tr, &crt);
1545 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1546 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1549 ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc);
1552 return put_user(crt.outputdatalength, &ucrt->outputdatalength);
1555 static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
1558 struct ica_xcRB xcrb;
1559 struct zcrypt_track tr;
1560 struct ica_xcRB __user *uxcrb = (void __user *)arg;
1562 memset(&tr, 0, sizeof(tr));
1563 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1566 #ifdef CONFIG_ZCRYPT_DEBUG
1567 if ((xcrb.status & 0x8000FFFF) == 0x80004649 /* 'FI' */) {
1568 if (!capable(CAP_SYS_ADMIN))
1570 tr.fi.cmd = (u16)(xcrb.status >> 16);
1576 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
1579 #ifdef CONFIG_ZCRYPT_DEBUG
1580 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1583 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1584 /* on failure: retry once again after a requested rescan */
1585 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1587 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
1590 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1591 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1594 ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
1596 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1601 static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
1604 struct ep11_urb xcrb;
1605 struct zcrypt_track tr;
1606 struct ep11_urb __user *uxcrb = (void __user *)arg;
1608 memset(&tr, 0, sizeof(tr));
1609 if (copy_from_user(&xcrb, uxcrb, sizeof(xcrb)))
1612 #ifdef CONFIG_ZCRYPT_DEBUG
1613 if (xcrb.req_len & (1ULL << 63)) {
1614 if (!capable(CAP_SYS_ADMIN))
1616 tr.fi.cmd = (u16)(xcrb.req_len >> 48);
1618 xcrb.req_len &= 0x0000FFFFFFFFFFFFULL;
1622 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1625 #ifdef CONFIG_ZCRYPT_DEBUG
1626 if (rc == -EAGAIN && (tr.fi.flags & AP_FI_FLAG_NO_RETRY))
1629 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1630 /* on failure: retry once again after a requested rescan */
1631 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1633 rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
1636 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1637 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1640 ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc);
1641 if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
1646 static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
1650 struct ap_perms *perms =
1651 (struct ap_perms *)filp->private_data;
1653 rc = zcrypt_check_ioctl(perms, cmd);
1659 return icarsamodexpo_ioctl(perms, arg);
1661 return icarsacrt_ioctl(perms, arg);
1663 return zsecsendcprb_ioctl(perms, arg);
1665 return zsendep11cprb_ioctl(perms, arg);
1666 case ZCRYPT_DEVICE_STATUS: {
1667 struct zcrypt_device_status_ext *device_status;
1668 size_t total_size = MAX_ZDEV_ENTRIES_EXT
1669 * sizeof(struct zcrypt_device_status_ext);
1671 device_status = kzalloc(total_size, GFP_KERNEL);
1674 zcrypt_device_status_mask_ext(device_status);
1675 if (copy_to_user((char __user *)arg, device_status,
1678 kfree(device_status);
1681 case ZCRYPT_STATUS_MASK: {
1682 char status[AP_DEVICES];
1684 zcrypt_status_mask(status, AP_DEVICES);
1685 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1689 case ZCRYPT_QDEPTH_MASK: {
1690 char qdepth[AP_DEVICES];
1692 zcrypt_qdepth_mask(qdepth, AP_DEVICES);
1693 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1697 case ZCRYPT_PERDEV_REQCNT: {
1700 reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
1703 zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
1704 if (copy_to_user((int __user *)arg, reqcnt,
1705 sizeof(u32) * AP_DEVICES))
1710 case Z90STAT_REQUESTQ_COUNT:
1711 return put_user(zcrypt_requestq_count(), (int __user *)arg);
1712 case Z90STAT_PENDINGQ_COUNT:
1713 return put_user(zcrypt_pendingq_count(), (int __user *)arg);
1714 case Z90STAT_TOTALOPEN_COUNT:
1715 return put_user(atomic_read(&zcrypt_open_count),
1717 case Z90STAT_DOMAIN_INDEX:
1718 return put_user(ap_domain_index, (int __user *)arg);
1722 case ZDEVICESTATUS: {
1723 /* the old ioctl supports only 64 adapters */
1724 struct zcrypt_device_status *device_status;
1725 size_t total_size = MAX_ZDEV_ENTRIES
1726 * sizeof(struct zcrypt_device_status);
1728 device_status = kzalloc(total_size, GFP_KERNEL);
1731 zcrypt_device_status_mask(device_status);
1732 if (copy_to_user((char __user *)arg, device_status,
1735 kfree(device_status);
1738 case Z90STAT_STATUS_MASK: {
1739 /* the old ioctl supports only 64 adapters */
1740 char status[MAX_ZDEV_CARDIDS];
1742 zcrypt_status_mask(status, MAX_ZDEV_CARDIDS);
1743 if (copy_to_user((char __user *)arg, status, sizeof(status)))
1747 case Z90STAT_QDEPTH_MASK: {
1748 /* the old ioctl supports only 64 adapters */
1749 char qdepth[MAX_ZDEV_CARDIDS];
1751 zcrypt_qdepth_mask(qdepth, MAX_ZDEV_CARDIDS);
1752 if (copy_to_user((char __user *)arg, qdepth, sizeof(qdepth)))
1756 case Z90STAT_PERDEV_REQCNT: {
1757 /* the old ioctl supports only 64 adapters */
1758 u32 reqcnt[MAX_ZDEV_CARDIDS];
1760 zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
1761 if (copy_to_user((int __user *)arg, reqcnt, sizeof(reqcnt)))
1765 /* unknown ioctl number */
1767 ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd);
1768 return -ENOIOCTLCMD;
1772 #ifdef CONFIG_COMPAT
1774 * ioctl32 conversion routines
1776 struct compat_ica_rsa_modexpo {
1777 compat_uptr_t inputdata;
1778 unsigned int inputdatalength;
1779 compat_uptr_t outputdata;
1780 unsigned int outputdatalength;
1781 compat_uptr_t b_key;
1782 compat_uptr_t n_modulus;
1785 static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
1786 unsigned int cmd, unsigned long arg)
1788 struct compat_ica_rsa_modexpo __user *umex32 = compat_ptr(arg);
1789 struct compat_ica_rsa_modexpo mex32;
1790 struct ica_rsa_modexpo mex64;
1791 struct zcrypt_track tr;
1794 memset(&tr, 0, sizeof(tr));
1795 if (copy_from_user(&mex32, umex32, sizeof(mex32)))
1797 mex64.inputdata = compat_ptr(mex32.inputdata);
1798 mex64.inputdatalength = mex32.inputdatalength;
1799 mex64.outputdata = compat_ptr(mex32.outputdata);
1800 mex64.outputdatalength = mex32.outputdatalength;
1801 mex64.b_key = compat_ptr(mex32.b_key);
1802 mex64.n_modulus = compat_ptr(mex32.n_modulus);
1804 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1807 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1808 /* on failure: retry once again after a requested rescan */
1809 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1811 rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
1814 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1815 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1819 return put_user(mex64.outputdatalength,
1820 &umex32->outputdatalength);
1823 struct compat_ica_rsa_modexpo_crt {
1824 compat_uptr_t inputdata;
1825 unsigned int inputdatalength;
1826 compat_uptr_t outputdata;
1827 unsigned int outputdatalength;
1828 compat_uptr_t bp_key;
1829 compat_uptr_t bq_key;
1830 compat_uptr_t np_prime;
1831 compat_uptr_t nq_prime;
1832 compat_uptr_t u_mult_inv;
1835 static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
1836 unsigned int cmd, unsigned long arg)
1838 struct compat_ica_rsa_modexpo_crt __user *ucrt32 = compat_ptr(arg);
1839 struct compat_ica_rsa_modexpo_crt crt32;
1840 struct ica_rsa_modexpo_crt crt64;
1841 struct zcrypt_track tr;
1844 memset(&tr, 0, sizeof(tr));
1845 if (copy_from_user(&crt32, ucrt32, sizeof(crt32)))
1847 crt64.inputdata = compat_ptr(crt32.inputdata);
1848 crt64.inputdatalength = crt32.inputdatalength;
1849 crt64.outputdata = compat_ptr(crt32.outputdata);
1850 crt64.outputdatalength = crt32.outputdatalength;
1851 crt64.bp_key = compat_ptr(crt32.bp_key);
1852 crt64.bq_key = compat_ptr(crt32.bq_key);
1853 crt64.np_prime = compat_ptr(crt32.np_prime);
1854 crt64.nq_prime = compat_ptr(crt32.nq_prime);
1855 crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
1857 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1860 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1861 /* on failure: retry once again after a requested rescan */
1862 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1864 rc = zcrypt_rsa_crt(perms, &tr, &crt64);
1867 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1868 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1872 return put_user(crt64.outputdatalength,
1873 &ucrt32->outputdatalength);
1876 struct compat_ica_xcrb {
1877 unsigned short agent_ID;
1878 unsigned int user_defined;
1879 unsigned short request_ID;
1880 unsigned int request_control_blk_length;
1881 unsigned char padding1[16 - sizeof(compat_uptr_t)];
1882 compat_uptr_t request_control_blk_addr;
1883 unsigned int request_data_length;
1884 char padding2[16 - sizeof(compat_uptr_t)];
1885 compat_uptr_t request_data_address;
1886 unsigned int reply_control_blk_length;
1887 char padding3[16 - sizeof(compat_uptr_t)];
1888 compat_uptr_t reply_control_blk_addr;
1889 unsigned int reply_data_length;
1890 char padding4[16 - sizeof(compat_uptr_t)];
1891 compat_uptr_t reply_data_addr;
1892 unsigned short priority_window;
1893 unsigned int status;
1896 static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
1897 unsigned int cmd, unsigned long arg)
1899 struct compat_ica_xcrb __user *uxcrb32 = compat_ptr(arg);
1900 struct compat_ica_xcrb xcrb32;
1901 struct zcrypt_track tr;
1902 struct ica_xcRB xcrb64;
1905 memset(&tr, 0, sizeof(tr));
1906 if (copy_from_user(&xcrb32, uxcrb32, sizeof(xcrb32)))
1908 xcrb64.agent_ID = xcrb32.agent_ID;
1909 xcrb64.user_defined = xcrb32.user_defined;
1910 xcrb64.request_ID = xcrb32.request_ID;
1911 xcrb64.request_control_blk_length =
1912 xcrb32.request_control_blk_length;
1913 xcrb64.request_control_blk_addr =
1914 compat_ptr(xcrb32.request_control_blk_addr);
1915 xcrb64.request_data_length =
1916 xcrb32.request_data_length;
1917 xcrb64.request_data_address =
1918 compat_ptr(xcrb32.request_data_address);
1919 xcrb64.reply_control_blk_length =
1920 xcrb32.reply_control_blk_length;
1921 xcrb64.reply_control_blk_addr =
1922 compat_ptr(xcrb32.reply_control_blk_addr);
1923 xcrb64.reply_data_length = xcrb32.reply_data_length;
1924 xcrb64.reply_data_addr =
1925 compat_ptr(xcrb32.reply_data_addr);
1926 xcrb64.priority_window = xcrb32.priority_window;
1927 xcrb64.status = xcrb32.status;
1929 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
1932 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1933 /* on failure: retry once again after a requested rescan */
1934 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
1936 rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
1939 } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
1940 if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
1942 xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
1943 xcrb32.reply_data_length = xcrb64.reply_data_length;
1944 xcrb32.status = xcrb64.status;
1945 if (copy_to_user(uxcrb32, &xcrb32, sizeof(xcrb32)))
1950 static long zcrypt_compat_ioctl(struct file *filp, unsigned int cmd,
1954 struct ap_perms *perms =
1955 (struct ap_perms *)filp->private_data;
1957 rc = zcrypt_check_ioctl(perms, cmd);
1961 if (cmd == ICARSAMODEXPO)
1962 return trans_modexpo32(perms, filp, cmd, arg);
1963 if (cmd == ICARSACRT)
1964 return trans_modexpo_crt32(perms, filp, cmd, arg);
1965 if (cmd == ZSECSENDCPRB)
1966 return trans_xcrb32(perms, filp, cmd, arg);
1967 return zcrypt_unlocked_ioctl(filp, cmd, arg);
1972 * Misc device file operations.
1974 static const struct file_operations zcrypt_fops = {
1975 .owner = THIS_MODULE,
1976 .read = zcrypt_read,
1977 .write = zcrypt_write,
1978 .unlocked_ioctl = zcrypt_unlocked_ioctl,
1979 #ifdef CONFIG_COMPAT
1980 .compat_ioctl = zcrypt_compat_ioctl,
1982 .open = zcrypt_open,
1983 .release = zcrypt_release,
1984 .llseek = no_llseek,
1990 static struct miscdevice zcrypt_misc_device = {
1991 .minor = MISC_DYNAMIC_MINOR,
1993 .fops = &zcrypt_fops,
1996 static int zcrypt_rng_device_count;
1997 static u32 *zcrypt_rng_buffer;
1998 static int zcrypt_rng_buffer_index;
1999 static DEFINE_MUTEX(zcrypt_rng_mutex);
2001 static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
2006 * We don't need locking here because the RNG API guarantees serialized
2007 * read method calls.
2009 if (zcrypt_rng_buffer_index == 0) {
2010 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
2011 /* on failure: retry once again after a requested rescan */
2012 if ((rc == -ENODEV) && (zcrypt_process_rescan()))
2013 rc = zcrypt_rng((char *)zcrypt_rng_buffer);
2016 zcrypt_rng_buffer_index = rc / sizeof(*data);
2018 *data = zcrypt_rng_buffer[--zcrypt_rng_buffer_index];
2019 return sizeof(*data);
2022 static struct hwrng zcrypt_rng_dev = {
2024 .data_read = zcrypt_rng_data_read,
2028 int zcrypt_rng_device_add(void)
2032 mutex_lock(&zcrypt_rng_mutex);
2033 if (zcrypt_rng_device_count == 0) {
2034 zcrypt_rng_buffer = (u32 *)get_zeroed_page(GFP_KERNEL);
2035 if (!zcrypt_rng_buffer) {
2039 zcrypt_rng_buffer_index = 0;
2040 rc = hwrng_register(&zcrypt_rng_dev);
2043 zcrypt_rng_device_count = 1;
2045 zcrypt_rng_device_count++;
2047 mutex_unlock(&zcrypt_rng_mutex);
2051 free_page((unsigned long)zcrypt_rng_buffer);
2053 mutex_unlock(&zcrypt_rng_mutex);
2057 void zcrypt_rng_device_remove(void)
2059 mutex_lock(&zcrypt_rng_mutex);
2060 zcrypt_rng_device_count--;
2061 if (zcrypt_rng_device_count == 0) {
2062 hwrng_unregister(&zcrypt_rng_dev);
2063 free_page((unsigned long)zcrypt_rng_buffer);
2065 mutex_unlock(&zcrypt_rng_mutex);
2069 * Wait until the zcrypt api is operational.
2070 * The AP bus scan and the binding of ap devices to device drivers is
2071 * an asynchronous job. This function waits until these initial jobs
2072 * are done and so the zcrypt api should be ready to serve crypto
2073 * requests - if there are resources available. The function uses an
2074 * internal timeout of 60s. The very first caller will either wait for
2075 * ap bus bindings complete or the timeout happens. This state will be
2076 * remembered for further callers which will only be blocked until a
2077 * decision is made (timeout or bindings complete).
2078 * On timeout -ETIME is returned, on success the return value is 0.
2080 int zcrypt_wait_api_operational(void)
2082 static DEFINE_MUTEX(zcrypt_wait_api_lock);
2083 static int zcrypt_wait_api_state;
2086 rc = mutex_lock_interruptible(&zcrypt_wait_api_lock);
2090 switch (zcrypt_wait_api_state) {
2092 /* initial state, invoke wait for the ap bus complete */
2093 rc = ap_wait_init_apqn_bindings_complete(
2094 msecs_to_jiffies(60 * 1000));
2097 /* ap bus bindings are complete */
2098 zcrypt_wait_api_state = 1;
2101 /* interrupted, go back to caller */
2105 ZCRYPT_DBF_WARN("%s ap_wait_init_apqn_bindings_complete()=ETIME\n",
2107 zcrypt_wait_api_state = -ETIME;
2111 ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n",
2117 /* a previous caller already found ap bus bindings complete */
2121 /* a previous caller had timeout or other failure */
2122 rc = zcrypt_wait_api_state;
2126 mutex_unlock(&zcrypt_wait_api_lock);
2130 EXPORT_SYMBOL(zcrypt_wait_api_operational);
2132 int __init zcrypt_debug_init(void)
2134 zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
2135 DBF_MAX_SPRINTF_ARGS * sizeof(long));
2136 debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
2137 debug_set_level(zcrypt_dbf_info, DBF_ERR);
2142 void zcrypt_debug_exit(void)
2144 debug_unregister(zcrypt_dbf_info);
2147 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2149 static int __init zcdn_init(void)
2153 /* create a new class 'zcrypt' */
2154 zcrypt_class = class_create(ZCRYPT_NAME);
2155 if (IS_ERR(zcrypt_class)) {
2156 rc = PTR_ERR(zcrypt_class);
2157 goto out_class_create_failed;
2159 zcrypt_class->dev_release = zcdn_device_release;
2161 /* alloc device minor range */
2162 rc = alloc_chrdev_region(&zcrypt_devt,
2163 0, ZCRYPT_MAX_MINOR_NODES,
2166 goto out_alloc_chrdev_failed;
2168 cdev_init(&zcrypt_cdev, &zcrypt_fops);
2169 zcrypt_cdev.owner = THIS_MODULE;
2170 rc = cdev_add(&zcrypt_cdev, zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2172 goto out_cdev_add_failed;
2174 /* need some class specific sysfs attributes */
2175 rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
2177 goto out_class_create_file_1_failed;
2178 rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
2180 goto out_class_create_file_2_failed;
2184 out_class_create_file_2_failed:
2185 class_remove_file(zcrypt_class, &class_attr_zcdn_create);
2186 out_class_create_file_1_failed:
2187 cdev_del(&zcrypt_cdev);
2188 out_cdev_add_failed:
2189 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2190 out_alloc_chrdev_failed:
2191 class_destroy(zcrypt_class);
2192 out_class_create_failed:
2196 static void zcdn_exit(void)
2198 class_remove_file(zcrypt_class, &class_attr_zcdn_create);
2199 class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
2201 cdev_del(&zcrypt_cdev);
2202 unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
2203 class_destroy(zcrypt_class);
2209 * zcrypt_api_init(): Module initialization.
2211 * The module initialization code.
2213 int __init zcrypt_api_init(void)
2217 rc = zcrypt_debug_init();
2221 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2227 /* Register the request sprayer. */
2228 rc = misc_register(&zcrypt_misc_device);
2230 goto out_misc_register_failed;
2232 zcrypt_msgtype6_init();
2233 zcrypt_msgtype50_init();
2237 out_misc_register_failed:
2238 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2241 zcrypt_debug_exit();
2247 * zcrypt_api_exit(): Module termination.
2249 * The module termination code.
2251 void __exit zcrypt_api_exit(void)
2253 #ifdef CONFIG_ZCRYPT_MULTIDEVNODES
2256 misc_deregister(&zcrypt_misc_device);
2257 zcrypt_msgtype6_exit();
2258 zcrypt_msgtype50_exit();
2259 zcrypt_ccamisc_exit();
2260 zcrypt_ep11misc_exit();
2261 zcrypt_debug_exit();
2264 module_init(zcrypt_api_init);
2265 module_exit(zcrypt_api_exit);