]> Git Repo - J-linux.git/blob - drivers/crypto/intel/qat/qat_common/qat_comp_algs.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / drivers / crypto / intel / qat / qat_common / qat_comp_algs.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/crypto.h>
4 #include <crypto/acompress.h>
5 #include <crypto/internal/acompress.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/workqueue.h>
9 #include "adf_accel_devices.h"
10 #include "adf_common_drv.h"
11 #include "qat_bl.h"
12 #include "qat_comp_req.h"
13 #include "qat_compression.h"
14 #include "qat_algs_send.h"
15
16 static DEFINE_MUTEX(algs_lock);
17 static unsigned int active_devs;
18
19 enum direction {
20         DECOMPRESSION = 0,
21         COMPRESSION = 1,
22 };
23
24 struct qat_compression_req;
25
26 struct qat_compression_ctx {
27         u8 comp_ctx[QAT_COMP_CTX_SIZE];
28         struct qat_compression_instance *inst;
29         int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
30 };
31
32 struct qat_dst {
33         bool is_null;
34         int resubmitted;
35 };
36
37 struct qat_compression_req {
38         u8 req[QAT_COMP_REQ_SIZE];
39         struct qat_compression_ctx *qat_compression_ctx;
40         struct acomp_req *acompress_req;
41         struct qat_request_buffs buf;
42         enum direction dir;
43         int actual_dlen;
44         struct qat_alg_req alg_req;
45         struct work_struct resubmit;
46         struct qat_dst dst;
47 };
48
49 static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
50                                    struct qat_compression_instance *inst,
51                                    struct crypto_async_request *base)
52 {
53         struct qat_alg_req *alg_req = &qat_req->alg_req;
54
55         alg_req->fw_req = (u32 *)&qat_req->req;
56         alg_req->tx_ring = inst->dc_tx;
57         alg_req->base = base;
58         alg_req->backlog = &inst->backlog;
59
60         return qat_alg_send_message(alg_req);
61 }
62
63 static void qat_comp_resubmit(struct work_struct *work)
64 {
65         struct qat_compression_req *qat_req =
66                 container_of(work, struct qat_compression_req, resubmit);
67         struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
68         struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
69         struct qat_request_buffs *qat_bufs = &qat_req->buf;
70         struct qat_compression_instance *inst = ctx->inst;
71         struct acomp_req *areq = qat_req->acompress_req;
72         struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
73         unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
74         u8 *req = qat_req->req;
75         dma_addr_t dfbuf;
76         int ret;
77
78         areq->dlen = dlen;
79
80         dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
81                 crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
82                 qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
83
84         ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
85                                          qat_algs_alloc_flags(&areq->base));
86         if (ret)
87                 goto err;
88
89         qat_req->dst.resubmitted = true;
90
91         dfbuf = qat_req->buf.bloutp;
92         qat_comp_override_dst(req, dfbuf, dlen);
93
94         ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
95         if (ret != -ENOSPC)
96                 return;
97
98 err:
99         qat_bl_free_bufl(accel_dev, qat_bufs);
100         acomp_request_complete(areq, ret);
101 }
102
103 static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
104                                       void *resp)
105 {
106         struct acomp_req *areq = qat_req->acompress_req;
107         struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
108         struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
109         struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
110         struct qat_compression_instance *inst = ctx->inst;
111         int consumed, produced;
112         s8 cmp_err, xlt_err;
113         int res = -EBADMSG;
114         int status;
115         u8 cnv;
116
117         status = qat_comp_get_cmp_status(resp);
118         status |= qat_comp_get_xlt_status(resp);
119         cmp_err = qat_comp_get_cmp_err(resp);
120         xlt_err = qat_comp_get_xlt_err(resp);
121
122         consumed = qat_comp_get_consumed_ctr(resp);
123         produced = qat_comp_get_produced_ctr(resp);
124
125         dev_dbg(&GET_DEV(accel_dev),
126                 "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
127                 crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
128                 qat_req->dir == COMPRESSION ? "comp  " : "decomp",
129                 status ? "ERR" : "OK ",
130                 areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
131
132         areq->dlen = 0;
133
134         if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
135                 if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
136                         if (qat_req->dst.resubmitted) {
137                                 dev_dbg(&GET_DEV(accel_dev),
138                                         "Output does not fit destination buffer\n");
139                                 res = -EOVERFLOW;
140                                 goto end;
141                         }
142
143                         INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
144                         adf_misc_wq_queue_work(&qat_req->resubmit);
145                         return;
146                 }
147         }
148
149         if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
150                 goto end;
151
152         if (qat_req->dir == COMPRESSION) {
153                 cnv = qat_comp_get_cmp_cnv_flag(resp);
154                 if (unlikely(!cnv)) {
155                         dev_err(&GET_DEV(accel_dev),
156                                 "Verified compression not supported\n");
157                         goto end;
158                 }
159
160                 if (unlikely(produced > qat_req->actual_dlen)) {
161                         memset(inst->dc_data->ovf_buff, 0,
162                                inst->dc_data->ovf_buff_sz);
163                         dev_dbg(&GET_DEV(accel_dev),
164                                 "Actual buffer overflow: produced=%d, dlen=%d\n",
165                                 produced, qat_req->actual_dlen);
166                         goto end;
167                 }
168         }
169
170         res = 0;
171         areq->dlen = produced;
172
173         if (ctx->qat_comp_callback)
174                 res = ctx->qat_comp_callback(qat_req, resp);
175
176 end:
177         qat_bl_free_bufl(accel_dev, &qat_req->buf);
178         acomp_request_complete(areq, res);
179 }
180
181 void qat_comp_alg_callback(void *resp)
182 {
183         struct qat_compression_req *qat_req =
184                         (void *)(__force long)qat_comp_get_opaque(resp);
185         struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
186
187         qat_comp_generic_callback(qat_req, resp);
188
189         qat_alg_send_backlog(backlog);
190 }
191
192 static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
193 {
194         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
195         struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
196         struct qat_compression_instance *inst;
197         int node;
198
199         if (tfm->node == NUMA_NO_NODE)
200                 node = numa_node_id();
201         else
202                 node = tfm->node;
203
204         memset(ctx, 0, sizeof(*ctx));
205         inst = qat_compression_get_instance_node(node);
206         if (!inst)
207                 return -EINVAL;
208         ctx->inst = inst;
209
210         ctx->inst->build_deflate_ctx(ctx->comp_ctx);
211
212         return 0;
213 }
214
215 static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
216 {
217         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
218         struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
219
220         qat_compression_put_instance(ctx->inst);
221         memset(ctx, 0, sizeof(*ctx));
222 }
223
224 static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
225                                             unsigned int shdr, unsigned int sftr,
226                                             unsigned int dhdr, unsigned int dftr)
227 {
228         struct qat_compression_req *qat_req = acomp_request_ctx(areq);
229         struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
230         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
231         struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
232         struct qat_compression_instance *inst = ctx->inst;
233         gfp_t f = qat_algs_alloc_flags(&areq->base);
234         struct qat_sgl_to_bufl_params params = {0};
235         int slen = areq->slen - shdr - sftr;
236         int dlen = areq->dlen - dhdr - dftr;
237         dma_addr_t sfbuf, dfbuf;
238         u8 *req = qat_req->req;
239         size_t ovf_buff_sz;
240         int ret;
241
242         params.sskip = shdr;
243         params.dskip = dhdr;
244
245         if (!areq->src || !slen)
246                 return -EINVAL;
247
248         if (areq->dst && !dlen)
249                 return -EINVAL;
250
251         qat_req->dst.is_null = false;
252
253         /* Handle acomp requests that require the allocation of a destination
254          * buffer. The size of the destination buffer is double the source
255          * buffer (rounded up to the size of a page) to fit the decompressed
256          * output or an expansion on the data for compression.
257          */
258         if (!areq->dst) {
259                 qat_req->dst.is_null = true;
260
261                 dlen = round_up(2 * slen, PAGE_SIZE);
262                 areq->dst = sgl_alloc(dlen, f, NULL);
263                 if (!areq->dst)
264                         return -ENOMEM;
265
266                 dlen -= dhdr + dftr;
267                 areq->dlen = dlen;
268                 qat_req->dst.resubmitted = false;
269         }
270
271         if (dir == COMPRESSION) {
272                 params.extra_dst_buff = inst->dc_data->ovf_buff_p;
273                 ovf_buff_sz = inst->dc_data->ovf_buff_sz;
274                 params.sz_extra_dst_buff = ovf_buff_sz;
275         }
276
277         ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
278                                  &qat_req->buf, &params, f);
279         if (unlikely(ret))
280                 return ret;
281
282         sfbuf = qat_req->buf.blp;
283         dfbuf = qat_req->buf.bloutp;
284         qat_req->qat_compression_ctx = ctx;
285         qat_req->acompress_req = areq;
286         qat_req->dir = dir;
287
288         if (dir == COMPRESSION) {
289                 qat_req->actual_dlen = dlen;
290                 dlen += ovf_buff_sz;
291                 qat_comp_create_compression_req(ctx->comp_ctx, req,
292                                                 (u64)(__force long)sfbuf, slen,
293                                                 (u64)(__force long)dfbuf, dlen,
294                                                 (u64)(__force long)qat_req);
295         } else {
296                 qat_comp_create_decompression_req(ctx->comp_ctx, req,
297                                                   (u64)(__force long)sfbuf, slen,
298                                                   (u64)(__force long)dfbuf, dlen,
299                                                   (u64)(__force long)qat_req);
300         }
301
302         ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
303         if (ret == -ENOSPC)
304                 qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
305
306         return ret;
307 }
308
309 static int qat_comp_alg_compress(struct acomp_req *req)
310 {
311         return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
312 }
313
314 static int qat_comp_alg_decompress(struct acomp_req *req)
315 {
316         return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
317 }
318
319 static struct acomp_alg qat_acomp[] = { {
320         .base = {
321                 .cra_name = "deflate",
322                 .cra_driver_name = "qat_deflate",
323                 .cra_priority = 4001,
324                 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
325                 .cra_ctxsize = sizeof(struct qat_compression_ctx),
326                 .cra_module = THIS_MODULE,
327         },
328         .init = qat_comp_alg_init_tfm,
329         .exit = qat_comp_alg_exit_tfm,
330         .compress = qat_comp_alg_compress,
331         .decompress = qat_comp_alg_decompress,
332         .dst_free = sgl_free,
333         .reqsize = sizeof(struct qat_compression_req),
334 }};
335
336 int qat_comp_algs_register(void)
337 {
338         int ret = 0;
339
340         mutex_lock(&algs_lock);
341         if (++active_devs == 1)
342                 ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
343         mutex_unlock(&algs_lock);
344         return ret;
345 }
346
347 void qat_comp_algs_unregister(void)
348 {
349         mutex_lock(&algs_lock);
350         if (--active_devs == 0)
351                 crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
352         mutex_unlock(&algs_lock);
353 }
This page took 0.046211 seconds and 4 git commands to generate.