]> Git Repo - J-linux.git/blob - drivers/crypto/intel/qat/qat_common/qat_algs_send.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / crypto / intel / qat / qat_common / qat_algs_send.c
1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <crypto/algapi.h>
4 #include "adf_transport.h"
5 #include "qat_algs_send.h"
6 #include "qat_crypto.h"
7
8 #define ADF_MAX_RETRIES         20
9
10 static int qat_alg_send_message_retry(struct qat_alg_req *req)
11 {
12         int ret = 0, ctr = 0;
13
14         do {
15                 ret = adf_send_message(req->tx_ring, req->fw_req);
16         } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES);
17
18         if (ret == -EAGAIN)
19                 return -ENOSPC;
20
21         return -EINPROGRESS;
22 }
23
24 void qat_alg_send_backlog(struct qat_instance_backlog *backlog)
25 {
26         struct qat_alg_req *req, *tmp;
27
28         spin_lock_bh(&backlog->lock);
29         list_for_each_entry_safe(req, tmp, &backlog->list, list) {
30                 if (adf_send_message(req->tx_ring, req->fw_req)) {
31                         /* The HW ring is full. Do nothing.
32                          * qat_alg_send_backlog() will be invoked again by
33                          * another callback.
34                          */
35                         break;
36                 }
37                 list_del(&req->list);
38                 crypto_request_complete(req->base, -EINPROGRESS);
39         }
40         spin_unlock_bh(&backlog->lock);
41 }
42
43 static bool qat_alg_try_enqueue(struct qat_alg_req *req)
44 {
45         struct qat_instance_backlog *backlog = req->backlog;
46         struct adf_etr_ring_data *tx_ring = req->tx_ring;
47         u32 *fw_req = req->fw_req;
48
49         /* Check if any request is already backlogged */
50         if (!list_empty(&backlog->list))
51                 return false;
52
53         /* Check if ring is nearly full */
54         if (adf_ring_nearly_full(tx_ring))
55                 return false;
56
57         /* Try to enqueue to HW ring */
58         if (adf_send_message(tx_ring, fw_req))
59                 return false;
60
61         return true;
62 }
63
64
65 static int qat_alg_send_message_maybacklog(struct qat_alg_req *req)
66 {
67         struct qat_instance_backlog *backlog = req->backlog;
68         int ret = -EINPROGRESS;
69
70         if (qat_alg_try_enqueue(req))
71                 return ret;
72
73         spin_lock_bh(&backlog->lock);
74         if (!qat_alg_try_enqueue(req)) {
75                 list_add_tail(&req->list, &backlog->list);
76                 ret = -EBUSY;
77         }
78         spin_unlock_bh(&backlog->lock);
79
80         return ret;
81 }
82
83 int qat_alg_send_message(struct qat_alg_req *req)
84 {
85         u32 flags = req->base->flags;
86
87         if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
88                 return qat_alg_send_message_maybacklog(req);
89         else
90                 return qat_alg_send_message_retry(req);
91 }
This page took 0.030925 seconds and 4 git commands to generate.