1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2016
6 * Adjunct processor bus, queue related code.
9 #define KMSG_COMPONENT "ap"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 #include <linux/init.h>
13 #include <linux/slab.h>
14 #include <asm/facility.h>
19 static void __ap_flush_queue(struct ap_queue *aq);
22 * some AP queue helper functions
25 static inline bool ap_q_supports_bind(struct ap_queue *aq)
27 return ap_test_bit(&aq->card->functions, AP_FUNC_EP11) ||
28 ap_test_bit(&aq->card->functions, AP_FUNC_ACCEL);
31 static inline bool ap_q_supports_assoc(struct ap_queue *aq)
33 return ap_test_bit(&aq->card->functions, AP_FUNC_EP11);
37 * ap_queue_enable_irq(): Enable interrupt support on this AP queue.
39 * @ind: the notification indicator byte
41 * Enables interruption on AP queue via ap_aqic(). Based on the return
42 * value it waits a while and tests the AP queue if interrupts
43 * have been switched on using ap_test_queue().
45 static int ap_queue_enable_irq(struct ap_queue *aq, void *ind)
47 union ap_qirq_ctrl qirqctrl = { .value = 0 };
48 struct ap_queue_status status;
51 qirqctrl.isc = AP_ISC;
52 status = ap_aqic(aq->qid, qirqctrl, virt_to_phys(ind));
55 switch (status.response_code) {
56 case AP_RESPONSE_NORMAL:
57 case AP_RESPONSE_OTHERWISE_CHANGED:
59 case AP_RESPONSE_Q_NOT_AVAIL:
60 case AP_RESPONSE_DECONFIGURED:
61 case AP_RESPONSE_CHECKSTOPPED:
62 case AP_RESPONSE_INVALID_ADDRESS:
63 pr_err("Registering adapter interrupts for AP device %02x.%04x failed\n",
65 AP_QID_QUEUE(aq->qid));
67 case AP_RESPONSE_RESET_IN_PROGRESS:
68 case AP_RESPONSE_BUSY:
75 * __ap_send(): Send message to adjunct processor queue.
76 * @qid: The AP queue number
77 * @psmid: The program supplied message identifier
78 * @msg: The message text
79 * @msglen: The message length
80 * @special: Special Bit
82 * Returns AP queue status structure.
83 * Condition code 1 on NQAP can't happen because the L bit is 1.
84 * Condition code 2 on NQAP also means the send is incomplete,
85 * because a segment boundary was reached. The NQAP is repeated.
87 static inline struct ap_queue_status
88 __ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen,
93 return ap_nqap(qid, psmid, msg, msglen);
96 int ap_send(ap_qid_t qid, unsigned long psmid, void *msg, size_t msglen)
98 struct ap_queue_status status;
100 status = __ap_send(qid, psmid, msg, msglen, 0);
103 switch (status.response_code) {
104 case AP_RESPONSE_NORMAL:
106 case AP_RESPONSE_Q_FULL:
107 case AP_RESPONSE_RESET_IN_PROGRESS:
109 case AP_RESPONSE_REQ_FAC_NOT_INST:
111 default: /* Device is gone. */
115 EXPORT_SYMBOL(ap_send);
117 int ap_recv(ap_qid_t qid, unsigned long *psmid, void *msg, size_t msglen)
119 struct ap_queue_status status;
123 status = ap_dqap(qid, psmid, msg, msglen, NULL, NULL, NULL);
126 switch (status.response_code) {
127 case AP_RESPONSE_NORMAL:
129 case AP_RESPONSE_NO_PENDING_REPLY:
130 if (status.queue_empty)
133 case AP_RESPONSE_RESET_IN_PROGRESS:
139 EXPORT_SYMBOL(ap_recv);
141 /* State machine definitions and helpers */
143 static enum ap_sm_wait ap_sm_nop(struct ap_queue *aq)
145 return AP_SM_WAIT_NONE;
149 * ap_sm_recv(): Receive pending reply messages from an AP queue but do
150 * not change the state of the device.
151 * @aq: pointer to the AP queue
153 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
155 static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
157 struct ap_queue_status status;
158 struct ap_message *ap_msg;
161 unsigned long resgr0 = 0;
165 * DQAP loop until response code and resgr0 indicate that
166 * the msg is totally received. As we use the very same buffer
167 * the msg is overwritten with each invocation. That's intended
168 * and the receiver of the msg is informed with a msg rc code
169 * of EMSGSIZE in such a case.
172 status = ap_dqap(aq->qid, &aq->reply->psmid,
173 aq->reply->msg, aq->reply->bufsize,
174 &aq->reply->len, &reslen, &resgr0);
176 } while (status.response_code == 0xFF && resgr0 != 0);
178 switch (status.response_code) {
179 case AP_RESPONSE_NORMAL:
180 aq->queue_count = max_t(int, 0, aq->queue_count - 1);
181 if (!status.queue_empty && !aq->queue_count)
183 if (aq->queue_count > 0)
184 mod_timer(&aq->timeout,
185 jiffies + aq->request_timeout);
186 list_for_each_entry(ap_msg, &aq->pendingq, list) {
187 if (ap_msg->psmid != aq->reply->psmid)
189 list_del_init(&ap_msg->list);
190 aq->pendingq_count--;
192 ap_msg->rc = -EMSGSIZE;
193 ap_msg->receive(aq, ap_msg, NULL);
195 ap_msg->receive(aq, ap_msg, aq->reply);
201 AP_DBF_WARN("%s unassociated reply psmid=0x%016lx on 0x%02x.%04x\n",
202 __func__, aq->reply->psmid,
203 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
206 case AP_RESPONSE_NO_PENDING_REPLY:
207 if (!status.queue_empty || aq->queue_count <= 0)
209 /* The card shouldn't forget requests but who knows. */
211 list_splice_init(&aq->pendingq, &aq->requestq);
212 aq->requestq_count += aq->pendingq_count;
213 aq->pendingq_count = 0;
222 * ap_sm_read(): Receive pending reply messages from an AP queue.
223 * @aq: pointer to the AP queue
225 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
227 static enum ap_sm_wait ap_sm_read(struct ap_queue *aq)
229 struct ap_queue_status status;
232 return AP_SM_WAIT_NONE;
233 status = ap_sm_recv(aq);
235 return AP_SM_WAIT_NONE;
236 switch (status.response_code) {
237 case AP_RESPONSE_NORMAL:
238 if (aq->queue_count > 0) {
239 aq->sm_state = AP_SM_STATE_WORKING;
240 return AP_SM_WAIT_AGAIN;
242 aq->sm_state = AP_SM_STATE_IDLE;
243 return AP_SM_WAIT_NONE;
244 case AP_RESPONSE_NO_PENDING_REPLY:
245 if (aq->queue_count > 0)
246 return aq->interrupt ?
247 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
248 aq->sm_state = AP_SM_STATE_IDLE;
249 return AP_SM_WAIT_NONE;
251 aq->dev_state = AP_DEV_STATE_ERROR;
252 aq->last_err_rc = status.response_code;
253 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
254 __func__, status.response_code,
255 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
256 return AP_SM_WAIT_NONE;
261 * ap_sm_write(): Send messages from the request queue to an AP queue.
262 * @aq: pointer to the AP queue
264 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
266 static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
268 struct ap_queue_status status;
269 struct ap_message *ap_msg;
270 ap_qid_t qid = aq->qid;
272 if (aq->requestq_count <= 0)
273 return AP_SM_WAIT_NONE;
275 /* Start the next request on the queue. */
276 ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
277 #ifdef CONFIG_ZCRYPT_DEBUG
278 if (ap_msg->fi.action == AP_FI_ACTION_NQAP_QID_INVAL) {
279 AP_DBF_WARN("%s fi cmd 0x%04x: forcing invalid qid 0xFF00\n",
280 __func__, ap_msg->fi.cmd);
284 status = __ap_send(qid, ap_msg->psmid,
285 ap_msg->msg, ap_msg->len,
286 ap_msg->flags & AP_MSG_FLAG_SPECIAL);
288 return AP_SM_WAIT_NONE;
289 switch (status.response_code) {
290 case AP_RESPONSE_NORMAL:
291 aq->queue_count = max_t(int, 1, aq->queue_count + 1);
292 if (aq->queue_count == 1)
293 mod_timer(&aq->timeout, jiffies + aq->request_timeout);
294 list_move_tail(&ap_msg->list, &aq->pendingq);
295 aq->requestq_count--;
296 aq->pendingq_count++;
297 if (aq->queue_count < aq->card->queue_depth) {
298 aq->sm_state = AP_SM_STATE_WORKING;
299 return AP_SM_WAIT_AGAIN;
302 case AP_RESPONSE_Q_FULL:
303 aq->sm_state = AP_SM_STATE_QUEUE_FULL;
304 return aq->interrupt ?
305 AP_SM_WAIT_INTERRUPT : AP_SM_WAIT_HIGH_TIMEOUT;
306 case AP_RESPONSE_RESET_IN_PROGRESS:
307 aq->sm_state = AP_SM_STATE_RESET_WAIT;
308 return AP_SM_WAIT_LOW_TIMEOUT;
309 case AP_RESPONSE_INVALID_DOMAIN:
310 AP_DBF_WARN("%s RESPONSE_INVALID_DOMAIN on NQAP\n", __func__);
312 case AP_RESPONSE_MESSAGE_TOO_BIG:
313 case AP_RESPONSE_REQ_FAC_NOT_INST:
314 list_del_init(&ap_msg->list);
315 aq->requestq_count--;
316 ap_msg->rc = -EINVAL;
317 ap_msg->receive(aq, ap_msg, NULL);
318 return AP_SM_WAIT_AGAIN;
320 aq->dev_state = AP_DEV_STATE_ERROR;
321 aq->last_err_rc = status.response_code;
322 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
323 __func__, status.response_code,
324 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
325 return AP_SM_WAIT_NONE;
330 * ap_sm_read_write(): Send and receive messages to/from an AP queue.
331 * @aq: pointer to the AP queue
333 * Returns AP_SM_WAIT_NONE, AP_SM_WAIT_AGAIN, or AP_SM_WAIT_INTERRUPT
335 static enum ap_sm_wait ap_sm_read_write(struct ap_queue *aq)
337 return min(ap_sm_read(aq), ap_sm_write(aq));
341 * ap_sm_reset(): Reset an AP queue.
344 * Submit the Reset command to an AP queue.
346 static enum ap_sm_wait ap_sm_reset(struct ap_queue *aq)
348 struct ap_queue_status status;
350 status = ap_rapq(aq->qid, aq->rapq_fbit);
352 return AP_SM_WAIT_NONE;
353 switch (status.response_code) {
354 case AP_RESPONSE_NORMAL:
355 case AP_RESPONSE_RESET_IN_PROGRESS:
356 aq->sm_state = AP_SM_STATE_RESET_WAIT;
357 aq->interrupt = false;
359 return AP_SM_WAIT_LOW_TIMEOUT;
361 aq->dev_state = AP_DEV_STATE_ERROR;
362 aq->last_err_rc = status.response_code;
363 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
364 __func__, status.response_code,
365 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
366 return AP_SM_WAIT_NONE;
371 * ap_sm_reset_wait(): Test queue for completion of the reset operation
372 * @aq: pointer to the AP queue
374 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
376 static enum ap_sm_wait ap_sm_reset_wait(struct ap_queue *aq)
378 struct ap_queue_status status;
381 if (aq->queue_count > 0 && aq->reply)
382 /* Try to read a completed message and get the status */
383 status = ap_sm_recv(aq);
385 /* Get the status with TAPQ */
386 status = ap_tapq(aq->qid, NULL);
388 switch (status.response_code) {
389 case AP_RESPONSE_NORMAL:
390 lsi_ptr = ap_airq_ptr();
391 if (lsi_ptr && ap_queue_enable_irq(aq, lsi_ptr) == 0)
392 aq->sm_state = AP_SM_STATE_SETIRQ_WAIT;
394 aq->sm_state = (aq->queue_count > 0) ?
395 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
396 return AP_SM_WAIT_AGAIN;
397 case AP_RESPONSE_BUSY:
398 case AP_RESPONSE_RESET_IN_PROGRESS:
399 return AP_SM_WAIT_LOW_TIMEOUT;
400 case AP_RESPONSE_Q_NOT_AVAIL:
401 case AP_RESPONSE_DECONFIGURED:
402 case AP_RESPONSE_CHECKSTOPPED:
404 aq->dev_state = AP_DEV_STATE_ERROR;
405 aq->last_err_rc = status.response_code;
406 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
407 __func__, status.response_code,
408 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
409 return AP_SM_WAIT_NONE;
414 * ap_sm_setirq_wait(): Test queue for completion of the irq enablement
415 * @aq: pointer to the AP queue
417 * Returns AP_POLL_IMMEDIATELY, AP_POLL_AFTER_TIMEROUT or 0.
419 static enum ap_sm_wait ap_sm_setirq_wait(struct ap_queue *aq)
421 struct ap_queue_status status;
423 if (aq->queue_count > 0 && aq->reply)
424 /* Try to read a completed message and get the status */
425 status = ap_sm_recv(aq);
427 /* Get the status with TAPQ */
428 status = ap_tapq(aq->qid, NULL);
430 if (status.irq_enabled == 1) {
431 /* Irqs are now enabled */
432 aq->interrupt = true;
433 aq->sm_state = (aq->queue_count > 0) ?
434 AP_SM_STATE_WORKING : AP_SM_STATE_IDLE;
437 switch (status.response_code) {
438 case AP_RESPONSE_NORMAL:
439 if (aq->queue_count > 0)
440 return AP_SM_WAIT_AGAIN;
442 case AP_RESPONSE_NO_PENDING_REPLY:
443 return AP_SM_WAIT_LOW_TIMEOUT;
445 aq->dev_state = AP_DEV_STATE_ERROR;
446 aq->last_err_rc = status.response_code;
447 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
448 __func__, status.response_code,
449 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
450 return AP_SM_WAIT_NONE;
455 * ap_sm_assoc_wait(): Test queue for completion of a pending
456 * association request.
457 * @aq: pointer to the AP queue
459 static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
461 struct ap_queue_status status;
462 struct ap_tapq_gr2 info;
464 status = ap_test_queue(aq->qid, 1, &info);
465 /* handle asynchronous error on this queue */
466 if (status.async && status.response_code) {
467 aq->dev_state = AP_DEV_STATE_ERROR;
468 aq->last_err_rc = status.response_code;
469 AP_DBF_WARN("%s asynch RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
470 __func__, status.response_code,
471 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
472 return AP_SM_WAIT_NONE;
474 if (status.response_code > AP_RESPONSE_BUSY) {
475 aq->dev_state = AP_DEV_STATE_ERROR;
476 aq->last_err_rc = status.response_code;
477 AP_DBF_WARN("%s RC 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
478 __func__, status.response_code,
479 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
480 return AP_SM_WAIT_NONE;
486 /* association is through */
487 aq->sm_state = AP_SM_STATE_IDLE;
488 AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
489 __func__, AP_QID_CARD(aq->qid),
490 AP_QID_QUEUE(aq->qid), aq->assoc_idx);
491 return AP_SM_WAIT_NONE;
492 case AP_BS_Q_USABLE_NO_SECURE_KEY:
493 /* association still pending */
494 return AP_SM_WAIT_LOW_TIMEOUT;
496 /* reset from 'outside' happened or no idea at all */
497 aq->assoc_idx = ASSOC_IDX_INVALID;
498 aq->dev_state = AP_DEV_STATE_ERROR;
499 aq->last_err_rc = status.response_code;
500 AP_DBF_WARN("%s bs 0x%02x on 0x%02x.%04x -> AP_DEV_STATE_ERROR\n",
502 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
503 return AP_SM_WAIT_NONE;
508 * AP state machine jump table
510 static ap_func_t *ap_jumptable[NR_AP_SM_STATES][NR_AP_SM_EVENTS] = {
511 [AP_SM_STATE_RESET_START] = {
512 [AP_SM_EVENT_POLL] = ap_sm_reset,
513 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
515 [AP_SM_STATE_RESET_WAIT] = {
516 [AP_SM_EVENT_POLL] = ap_sm_reset_wait,
517 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
519 [AP_SM_STATE_SETIRQ_WAIT] = {
520 [AP_SM_EVENT_POLL] = ap_sm_setirq_wait,
521 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
523 [AP_SM_STATE_IDLE] = {
524 [AP_SM_EVENT_POLL] = ap_sm_write,
525 [AP_SM_EVENT_TIMEOUT] = ap_sm_nop,
527 [AP_SM_STATE_WORKING] = {
528 [AP_SM_EVENT_POLL] = ap_sm_read_write,
529 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
531 [AP_SM_STATE_QUEUE_FULL] = {
532 [AP_SM_EVENT_POLL] = ap_sm_read,
533 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
535 [AP_SM_STATE_ASSOC_WAIT] = {
536 [AP_SM_EVENT_POLL] = ap_sm_assoc_wait,
537 [AP_SM_EVENT_TIMEOUT] = ap_sm_reset,
541 enum ap_sm_wait ap_sm_event(struct ap_queue *aq, enum ap_sm_event event)
543 if (aq->config && !aq->chkstop &&
544 aq->dev_state > AP_DEV_STATE_UNINITIATED)
545 return ap_jumptable[aq->sm_state][event](aq);
547 return AP_SM_WAIT_NONE;
550 enum ap_sm_wait ap_sm_event_loop(struct ap_queue *aq, enum ap_sm_event event)
552 enum ap_sm_wait wait;
554 while ((wait = ap_sm_event(aq, event)) == AP_SM_WAIT_AGAIN)
560 * AP queue related attributes.
562 static ssize_t request_count_show(struct device *dev,
563 struct device_attribute *attr,
566 struct ap_queue *aq = to_ap_queue(dev);
570 spin_lock_bh(&aq->lock);
571 if (aq->dev_state > AP_DEV_STATE_UNINITIATED) {
572 req_cnt = aq->total_request_count;
575 spin_unlock_bh(&aq->lock);
578 return sysfs_emit(buf, "%llu\n", req_cnt);
580 return sysfs_emit(buf, "-\n");
583 static ssize_t request_count_store(struct device *dev,
584 struct device_attribute *attr,
585 const char *buf, size_t count)
587 struct ap_queue *aq = to_ap_queue(dev);
589 spin_lock_bh(&aq->lock);
590 aq->total_request_count = 0;
591 spin_unlock_bh(&aq->lock);
596 static DEVICE_ATTR_RW(request_count);
598 static ssize_t requestq_count_show(struct device *dev,
599 struct device_attribute *attr, char *buf)
601 struct ap_queue *aq = to_ap_queue(dev);
602 unsigned int reqq_cnt = 0;
604 spin_lock_bh(&aq->lock);
605 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
606 reqq_cnt = aq->requestq_count;
607 spin_unlock_bh(&aq->lock);
608 return sysfs_emit(buf, "%d\n", reqq_cnt);
611 static DEVICE_ATTR_RO(requestq_count);
613 static ssize_t pendingq_count_show(struct device *dev,
614 struct device_attribute *attr, char *buf)
616 struct ap_queue *aq = to_ap_queue(dev);
617 unsigned int penq_cnt = 0;
619 spin_lock_bh(&aq->lock);
620 if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
621 penq_cnt = aq->pendingq_count;
622 spin_unlock_bh(&aq->lock);
623 return sysfs_emit(buf, "%d\n", penq_cnt);
626 static DEVICE_ATTR_RO(pendingq_count);
628 static ssize_t reset_show(struct device *dev,
629 struct device_attribute *attr, char *buf)
631 struct ap_queue *aq = to_ap_queue(dev);
634 spin_lock_bh(&aq->lock);
635 switch (aq->sm_state) {
636 case AP_SM_STATE_RESET_START:
637 case AP_SM_STATE_RESET_WAIT:
638 rc = sysfs_emit(buf, "Reset in progress.\n");
640 case AP_SM_STATE_WORKING:
641 case AP_SM_STATE_QUEUE_FULL:
642 rc = sysfs_emit(buf, "Reset Timer armed.\n");
645 rc = sysfs_emit(buf, "No Reset Timer set.\n");
647 spin_unlock_bh(&aq->lock);
651 static ssize_t reset_store(struct device *dev,
652 struct device_attribute *attr,
653 const char *buf, size_t count)
655 struct ap_queue *aq = to_ap_queue(dev);
657 spin_lock_bh(&aq->lock);
658 __ap_flush_queue(aq);
659 aq->sm_state = AP_SM_STATE_RESET_START;
660 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
661 spin_unlock_bh(&aq->lock);
663 AP_DBF_INFO("%s reset queue=%02x.%04x triggered by user\n",
664 __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
669 static DEVICE_ATTR_RW(reset);
671 static ssize_t interrupt_show(struct device *dev,
672 struct device_attribute *attr, char *buf)
674 struct ap_queue *aq = to_ap_queue(dev);
677 spin_lock_bh(&aq->lock);
678 if (aq->sm_state == AP_SM_STATE_SETIRQ_WAIT)
679 rc = sysfs_emit(buf, "Enable Interrupt pending.\n");
680 else if (aq->interrupt)
681 rc = sysfs_emit(buf, "Interrupts enabled.\n");
683 rc = sysfs_emit(buf, "Interrupts disabled.\n");
684 spin_unlock_bh(&aq->lock);
688 static DEVICE_ATTR_RO(interrupt);
690 static ssize_t config_show(struct device *dev,
691 struct device_attribute *attr, char *buf)
693 struct ap_queue *aq = to_ap_queue(dev);
696 spin_lock_bh(&aq->lock);
697 rc = sysfs_emit(buf, "%d\n", aq->config ? 1 : 0);
698 spin_unlock_bh(&aq->lock);
702 static DEVICE_ATTR_RO(config);
704 static ssize_t chkstop_show(struct device *dev,
705 struct device_attribute *attr, char *buf)
707 struct ap_queue *aq = to_ap_queue(dev);
710 spin_lock_bh(&aq->lock);
711 rc = sysfs_emit(buf, "%d\n", aq->chkstop ? 1 : 0);
712 spin_unlock_bh(&aq->lock);
716 static DEVICE_ATTR_RO(chkstop);
718 static ssize_t ap_functions_show(struct device *dev,
719 struct device_attribute *attr, char *buf)
721 struct ap_queue *aq = to_ap_queue(dev);
722 struct ap_queue_status status;
723 struct ap_tapq_gr2 info;
725 status = ap_test_queue(aq->qid, 1, &info);
726 if (status.response_code > AP_RESPONSE_BUSY) {
727 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
728 __func__, status.response_code,
729 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
733 return sysfs_emit(buf, "0x%08X\n", info.fac);
736 static DEVICE_ATTR_RO(ap_functions);
738 #ifdef CONFIG_ZCRYPT_DEBUG
739 static ssize_t states_show(struct device *dev,
740 struct device_attribute *attr, char *buf)
742 struct ap_queue *aq = to_ap_queue(dev);
745 spin_lock_bh(&aq->lock);
746 /* queue device state */
747 switch (aq->dev_state) {
748 case AP_DEV_STATE_UNINITIATED:
749 rc = sysfs_emit(buf, "UNINITIATED\n");
751 case AP_DEV_STATE_OPERATING:
752 rc = sysfs_emit(buf, "OPERATING");
754 case AP_DEV_STATE_SHUTDOWN:
755 rc = sysfs_emit(buf, "SHUTDOWN");
757 case AP_DEV_STATE_ERROR:
758 rc = sysfs_emit(buf, "ERROR");
761 rc = sysfs_emit(buf, "UNKNOWN");
763 /* state machine state */
765 switch (aq->sm_state) {
766 case AP_SM_STATE_RESET_START:
767 rc += sysfs_emit_at(buf, rc, " [RESET_START]\n");
769 case AP_SM_STATE_RESET_WAIT:
770 rc += sysfs_emit_at(buf, rc, " [RESET_WAIT]\n");
772 case AP_SM_STATE_SETIRQ_WAIT:
773 rc += sysfs_emit_at(buf, rc, " [SETIRQ_WAIT]\n");
775 case AP_SM_STATE_IDLE:
776 rc += sysfs_emit_at(buf, rc, " [IDLE]\n");
778 case AP_SM_STATE_WORKING:
779 rc += sysfs_emit_at(buf, rc, " [WORKING]\n");
781 case AP_SM_STATE_QUEUE_FULL:
782 rc += sysfs_emit_at(buf, rc, " [FULL]\n");
784 case AP_SM_STATE_ASSOC_WAIT:
785 rc += sysfs_emit_at(buf, rc, " [ASSOC_WAIT]\n");
788 rc += sysfs_emit_at(buf, rc, " [UNKNOWN]\n");
791 spin_unlock_bh(&aq->lock);
795 static DEVICE_ATTR_RO(states);
797 static ssize_t last_err_rc_show(struct device *dev,
798 struct device_attribute *attr, char *buf)
800 struct ap_queue *aq = to_ap_queue(dev);
803 spin_lock_bh(&aq->lock);
804 rc = aq->last_err_rc;
805 spin_unlock_bh(&aq->lock);
808 case AP_RESPONSE_NORMAL:
809 return sysfs_emit(buf, "NORMAL\n");
810 case AP_RESPONSE_Q_NOT_AVAIL:
811 return sysfs_emit(buf, "Q_NOT_AVAIL\n");
812 case AP_RESPONSE_RESET_IN_PROGRESS:
813 return sysfs_emit(buf, "RESET_IN_PROGRESS\n");
814 case AP_RESPONSE_DECONFIGURED:
815 return sysfs_emit(buf, "DECONFIGURED\n");
816 case AP_RESPONSE_CHECKSTOPPED:
817 return sysfs_emit(buf, "CHECKSTOPPED\n");
818 case AP_RESPONSE_BUSY:
819 return sysfs_emit(buf, "BUSY\n");
820 case AP_RESPONSE_INVALID_ADDRESS:
821 return sysfs_emit(buf, "INVALID_ADDRESS\n");
822 case AP_RESPONSE_OTHERWISE_CHANGED:
823 return sysfs_emit(buf, "OTHERWISE_CHANGED\n");
824 case AP_RESPONSE_Q_FULL:
825 return sysfs_emit(buf, "Q_FULL/NO_PENDING_REPLY\n");
826 case AP_RESPONSE_INDEX_TOO_BIG:
827 return sysfs_emit(buf, "INDEX_TOO_BIG\n");
828 case AP_RESPONSE_NO_FIRST_PART:
829 return sysfs_emit(buf, "NO_FIRST_PART\n");
830 case AP_RESPONSE_MESSAGE_TOO_BIG:
831 return sysfs_emit(buf, "MESSAGE_TOO_BIG\n");
832 case AP_RESPONSE_REQ_FAC_NOT_INST:
833 return sysfs_emit(buf, "REQ_FAC_NOT_INST\n");
835 return sysfs_emit(buf, "response code %d\n", rc);
838 static DEVICE_ATTR_RO(last_err_rc);
841 static struct attribute *ap_queue_dev_attrs[] = {
842 &dev_attr_request_count.attr,
843 &dev_attr_requestq_count.attr,
844 &dev_attr_pendingq_count.attr,
845 &dev_attr_reset.attr,
846 &dev_attr_interrupt.attr,
847 &dev_attr_config.attr,
848 &dev_attr_chkstop.attr,
849 &dev_attr_ap_functions.attr,
850 #ifdef CONFIG_ZCRYPT_DEBUG
851 &dev_attr_states.attr,
852 &dev_attr_last_err_rc.attr,
857 static struct attribute_group ap_queue_dev_attr_group = {
858 .attrs = ap_queue_dev_attrs
861 static const struct attribute_group *ap_queue_dev_attr_groups[] = {
862 &ap_queue_dev_attr_group,
866 static struct device_type ap_queue_type = {
868 .groups = ap_queue_dev_attr_groups,
871 static ssize_t se_bind_show(struct device *dev,
872 struct device_attribute *attr, char *buf)
874 struct ap_queue *aq = to_ap_queue(dev);
875 struct ap_queue_status status;
876 struct ap_tapq_gr2 info;
878 if (!ap_q_supports_bind(aq))
879 return sysfs_emit(buf, "-\n");
881 status = ap_test_queue(aq->qid, 1, &info);
882 if (status.response_code > AP_RESPONSE_BUSY) {
883 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
884 __func__, status.response_code,
885 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
890 case AP_BS_Q_USABLE_NO_SECURE_KEY:
891 return sysfs_emit(buf, "bound\n");
893 return sysfs_emit(buf, "unbound\n");
897 static ssize_t se_bind_store(struct device *dev,
898 struct device_attribute *attr,
899 const char *buf, size_t count)
901 struct ap_queue *aq = to_ap_queue(dev);
902 struct ap_queue_status status;
906 if (!ap_q_supports_bind(aq))
909 /* only 0 (unbind) and 1 (bind) allowed */
910 rc = kstrtobool(buf, &value);
916 spin_lock_bh(&aq->lock);
917 if (aq->sm_state < AP_SM_STATE_IDLE) {
918 spin_unlock_bh(&aq->lock);
921 status = ap_bapq(aq->qid);
922 spin_unlock_bh(&aq->lock);
923 if (status.response_code) {
924 AP_DBF_WARN("%s RC 0x%02x on bapq(0x%02x.%04x)\n",
925 __func__, status.response_code,
926 AP_QID_CARD(aq->qid),
927 AP_QID_QUEUE(aq->qid));
931 /* unbind, set F bit arg and trigger RAPQ */
932 spin_lock_bh(&aq->lock);
933 __ap_flush_queue(aq);
935 aq->assoc_idx = ASSOC_IDX_INVALID;
936 aq->sm_state = AP_SM_STATE_RESET_START;
937 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
938 spin_unlock_bh(&aq->lock);
944 static DEVICE_ATTR_RW(se_bind);
946 static ssize_t se_associate_show(struct device *dev,
947 struct device_attribute *attr, char *buf)
949 struct ap_queue *aq = to_ap_queue(dev);
950 struct ap_queue_status status;
951 struct ap_tapq_gr2 info;
953 if (!ap_q_supports_assoc(aq))
954 return sysfs_emit(buf, "-\n");
956 status = ap_test_queue(aq->qid, 1, &info);
957 if (status.response_code > AP_RESPONSE_BUSY) {
958 AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
959 __func__, status.response_code,
960 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
966 if (aq->assoc_idx == ASSOC_IDX_INVALID) {
967 AP_DBF_WARN("%s AP_BS_Q_USABLE but invalid assoc_idx\n", __func__);
970 return sysfs_emit(buf, "associated %u\n", aq->assoc_idx);
971 case AP_BS_Q_USABLE_NO_SECURE_KEY:
972 if (aq->assoc_idx != ASSOC_IDX_INVALID)
973 return sysfs_emit(buf, "association pending\n");
976 return sysfs_emit(buf, "unassociated\n");
980 static ssize_t se_associate_store(struct device *dev,
981 struct device_attribute *attr,
982 const char *buf, size_t count)
984 struct ap_queue *aq = to_ap_queue(dev);
985 struct ap_queue_status status;
989 if (!ap_q_supports_assoc(aq))
992 /* association index needs to be >= 0 */
993 rc = kstrtouint(buf, 0, &value);
996 if (value >= ASSOC_IDX_INVALID)
999 spin_lock_bh(&aq->lock);
1001 /* sm should be in idle state */
1002 if (aq->sm_state != AP_SM_STATE_IDLE) {
1003 spin_unlock_bh(&aq->lock);
1007 /* already associated or association pending ? */
1008 if (aq->assoc_idx != ASSOC_IDX_INVALID) {
1009 spin_unlock_bh(&aq->lock);
1013 /* trigger the asynchronous association request */
1014 status = ap_aapq(aq->qid, value);
1015 switch (status.response_code) {
1016 case AP_RESPONSE_NORMAL:
1017 case AP_RESPONSE_STATE_CHANGE_IN_PROGRESS:
1018 aq->sm_state = AP_SM_STATE_ASSOC_WAIT;
1019 aq->assoc_idx = value;
1020 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1021 spin_unlock_bh(&aq->lock);
1024 spin_unlock_bh(&aq->lock);
1025 AP_DBF_WARN("%s RC 0x%02x on aapq(0x%02x.%04x)\n",
1026 __func__, status.response_code,
1027 AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
1034 static DEVICE_ATTR_RW(se_associate);
1036 static struct attribute *ap_queue_dev_sb_attrs[] = {
1037 &dev_attr_se_bind.attr,
1038 &dev_attr_se_associate.attr,
1042 static struct attribute_group ap_queue_dev_sb_attr_group = {
1043 .attrs = ap_queue_dev_sb_attrs
1046 static const struct attribute_group *ap_queue_dev_sb_attr_groups[] = {
1047 &ap_queue_dev_sb_attr_group,
1051 static void ap_queue_device_release(struct device *dev)
1053 struct ap_queue *aq = to_ap_queue(dev);
1055 spin_lock_bh(&ap_queues_lock);
1056 hash_del(&aq->hnode);
1057 spin_unlock_bh(&ap_queues_lock);
1062 struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type)
1064 struct ap_queue *aq;
1066 aq = kzalloc(sizeof(*aq), GFP_KERNEL);
1069 aq->ap_dev.device.release = ap_queue_device_release;
1070 aq->ap_dev.device.type = &ap_queue_type;
1071 aq->ap_dev.device_type = device_type;
1072 // add optional SE secure binding attributes group
1073 if (ap_sb_available() && is_prot_virt_guest())
1074 aq->ap_dev.device.groups = ap_queue_dev_sb_attr_groups;
1076 aq->interrupt = false;
1077 spin_lock_init(&aq->lock);
1078 INIT_LIST_HEAD(&aq->pendingq);
1079 INIT_LIST_HEAD(&aq->requestq);
1080 timer_setup(&aq->timeout, ap_request_timeout, 0);
1085 void ap_queue_init_reply(struct ap_queue *aq, struct ap_message *reply)
1089 spin_lock_bh(&aq->lock);
1090 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1091 spin_unlock_bh(&aq->lock);
1093 EXPORT_SYMBOL(ap_queue_init_reply);
1096 * ap_queue_message(): Queue a request to an AP device.
1097 * @aq: The AP device to queue the message to
1098 * @ap_msg: The message that is to be added
1100 int ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
1104 /* msg needs to have a valid receive-callback */
1105 BUG_ON(!ap_msg->receive);
1107 spin_lock_bh(&aq->lock);
1109 /* only allow to queue new messages if device state is ok */
1110 if (aq->dev_state == AP_DEV_STATE_OPERATING) {
1111 list_add_tail(&ap_msg->list, &aq->requestq);
1112 aq->requestq_count++;
1113 aq->total_request_count++;
1114 atomic64_inc(&aq->card->total_request_count);
1119 /* Send/receive as many request from the queue as possible. */
1120 ap_wait(ap_sm_event_loop(aq, AP_SM_EVENT_POLL));
1122 spin_unlock_bh(&aq->lock);
1126 EXPORT_SYMBOL(ap_queue_message);
1129 * ap_cancel_message(): Cancel a crypto request.
1130 * @aq: The AP device that has the message queued
1131 * @ap_msg: The message that is to be removed
1133 * Cancel a crypto request. This is done by removing the request
1134 * from the device pending or request queue. Note that the
1135 * request stays on the AP queue. When it finishes the message
1136 * reply will be discarded because the psmid can't be found.
1138 void ap_cancel_message(struct ap_queue *aq, struct ap_message *ap_msg)
1140 struct ap_message *tmp;
1142 spin_lock_bh(&aq->lock);
1143 if (!list_empty(&ap_msg->list)) {
1144 list_for_each_entry(tmp, &aq->pendingq, list)
1145 if (tmp->psmid == ap_msg->psmid) {
1146 aq->pendingq_count--;
1149 aq->requestq_count--;
1151 list_del_init(&ap_msg->list);
1153 spin_unlock_bh(&aq->lock);
1155 EXPORT_SYMBOL(ap_cancel_message);
1158 * __ap_flush_queue(): Flush requests.
1159 * @aq: Pointer to the AP queue
1161 * Flush all requests from the request/pending queue of an AP device.
1163 static void __ap_flush_queue(struct ap_queue *aq)
1165 struct ap_message *ap_msg, *next;
1167 list_for_each_entry_safe(ap_msg, next, &aq->pendingq, list) {
1168 list_del_init(&ap_msg->list);
1169 aq->pendingq_count--;
1170 ap_msg->rc = -EAGAIN;
1171 ap_msg->receive(aq, ap_msg, NULL);
1173 list_for_each_entry_safe(ap_msg, next, &aq->requestq, list) {
1174 list_del_init(&ap_msg->list);
1175 aq->requestq_count--;
1176 ap_msg->rc = -EAGAIN;
1177 ap_msg->receive(aq, ap_msg, NULL);
1179 aq->queue_count = 0;
1182 void ap_flush_queue(struct ap_queue *aq)
1184 spin_lock_bh(&aq->lock);
1185 __ap_flush_queue(aq);
1186 spin_unlock_bh(&aq->lock);
1188 EXPORT_SYMBOL(ap_flush_queue);
1190 void ap_queue_prepare_remove(struct ap_queue *aq)
1192 spin_lock_bh(&aq->lock);
1194 __ap_flush_queue(aq);
1195 /* move queue device state to SHUTDOWN in progress */
1196 aq->dev_state = AP_DEV_STATE_SHUTDOWN;
1197 spin_unlock_bh(&aq->lock);
1198 del_timer_sync(&aq->timeout);
1201 void ap_queue_remove(struct ap_queue *aq)
1204 * all messages have been flushed and the device state
1205 * is SHUTDOWN. Now reset with zero which also clears
1206 * the irq registration and move the device state
1207 * to the initial value AP_DEV_STATE_UNINITIATED.
1209 spin_lock_bh(&aq->lock);
1210 ap_zapq(aq->qid, 0);
1211 aq->dev_state = AP_DEV_STATE_UNINITIATED;
1212 spin_unlock_bh(&aq->lock);
1215 void ap_queue_init_state(struct ap_queue *aq)
1217 spin_lock_bh(&aq->lock);
1218 aq->dev_state = AP_DEV_STATE_OPERATING;
1219 aq->sm_state = AP_SM_STATE_RESET_START;
1220 aq->last_err_rc = 0;
1221 aq->assoc_idx = ASSOC_IDX_INVALID;
1222 ap_wait(ap_sm_event(aq, AP_SM_EVENT_POLL));
1223 spin_unlock_bh(&aq->lock);
1225 EXPORT_SYMBOL(ap_queue_init_state);