1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2022 Intel Corporation */
3 #include <linux/crypto.h>
4 #include <crypto/acompress.h>
5 #include <crypto/internal/acompress.h>
6 #include <crypto/scatterwalk.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/workqueue.h>
9 #include "adf_accel_devices.h"
10 #include "adf_common_drv.h"
12 #include "qat_comp_req.h"
13 #include "qat_compression.h"
14 #include "qat_algs_send.h"
16 static DEFINE_MUTEX(algs_lock);
17 static unsigned int active_devs;
24 struct qat_compression_req;
26 struct qat_compression_ctx {
27 u8 comp_ctx[QAT_COMP_CTX_SIZE];
28 struct qat_compression_instance *inst;
29 int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp);
37 struct qat_compression_req {
38 u8 req[QAT_COMP_REQ_SIZE];
39 struct qat_compression_ctx *qat_compression_ctx;
40 struct acomp_req *acompress_req;
41 struct qat_request_buffs buf;
44 struct qat_alg_req alg_req;
45 struct work_struct resubmit;
49 static int qat_alg_send_dc_message(struct qat_compression_req *qat_req,
50 struct qat_compression_instance *inst,
51 struct crypto_async_request *base)
53 struct qat_alg_req *alg_req = &qat_req->alg_req;
55 alg_req->fw_req = (u32 *)&qat_req->req;
56 alg_req->tx_ring = inst->dc_tx;
58 alg_req->backlog = &inst->backlog;
60 return qat_alg_send_message(alg_req);
63 static void qat_comp_resubmit(struct work_struct *work)
65 struct qat_compression_req *qat_req =
66 container_of(work, struct qat_compression_req, resubmit);
67 struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
68 struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
69 struct qat_request_buffs *qat_bufs = &qat_req->buf;
70 struct qat_compression_instance *inst = ctx->inst;
71 struct acomp_req *areq = qat_req->acompress_req;
72 struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
73 unsigned int dlen = CRYPTO_ACOMP_DST_MAX;
74 u8 *req = qat_req->req;
80 dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n",
81 crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
82 qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen);
84 ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs,
85 qat_algs_alloc_flags(&areq->base));
89 qat_req->dst.resubmitted = true;
91 dfbuf = qat_req->buf.bloutp;
92 qat_comp_override_dst(req, dfbuf, dlen);
94 ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
99 qat_bl_free_bufl(accel_dev, qat_bufs);
100 acomp_request_complete(areq, ret);
103 static void qat_comp_generic_callback(struct qat_compression_req *qat_req,
106 struct acomp_req *areq = qat_req->acompress_req;
107 struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx;
108 struct adf_accel_dev *accel_dev = ctx->inst->accel_dev;
109 struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq);
110 struct qat_compression_instance *inst = ctx->inst;
111 int consumed, produced;
117 status = qat_comp_get_cmp_status(resp);
118 status |= qat_comp_get_xlt_status(resp);
119 cmp_err = qat_comp_get_cmp_err(resp);
120 xlt_err = qat_comp_get_xlt_err(resp);
122 consumed = qat_comp_get_consumed_ctr(resp);
123 produced = qat_comp_get_produced_ctr(resp);
125 dev_dbg(&GET_DEV(accel_dev),
126 "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d",
127 crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)),
128 qat_req->dir == COMPRESSION ? "comp " : "decomp",
129 status ? "ERR" : "OK ",
130 areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err);
134 if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) {
135 if (cmp_err == ERR_CODE_OVERFLOW_ERROR) {
136 if (qat_req->dst.resubmitted) {
137 dev_dbg(&GET_DEV(accel_dev),
138 "Output does not fit destination buffer\n");
143 INIT_WORK(&qat_req->resubmit, qat_comp_resubmit);
144 adf_misc_wq_queue_work(&qat_req->resubmit);
149 if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
152 if (qat_req->dir == COMPRESSION) {
153 cnv = qat_comp_get_cmp_cnv_flag(resp);
154 if (unlikely(!cnv)) {
155 dev_err(&GET_DEV(accel_dev),
156 "Verified compression not supported\n");
160 if (unlikely(produced > qat_req->actual_dlen)) {
161 memset(inst->dc_data->ovf_buff, 0,
162 inst->dc_data->ovf_buff_sz);
163 dev_dbg(&GET_DEV(accel_dev),
164 "Actual buffer overflow: produced=%d, dlen=%d\n",
165 produced, qat_req->actual_dlen);
171 areq->dlen = produced;
173 if (ctx->qat_comp_callback)
174 res = ctx->qat_comp_callback(qat_req, resp);
177 qat_bl_free_bufl(accel_dev, &qat_req->buf);
178 acomp_request_complete(areq, res);
181 void qat_comp_alg_callback(void *resp)
183 struct qat_compression_req *qat_req =
184 (void *)(__force long)qat_comp_get_opaque(resp);
185 struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
187 qat_comp_generic_callback(qat_req, resp);
189 qat_alg_send_backlog(backlog);
192 static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm)
194 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
195 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
196 struct qat_compression_instance *inst;
199 if (tfm->node == NUMA_NO_NODE)
200 node = numa_node_id();
204 memset(ctx, 0, sizeof(*ctx));
205 inst = qat_compression_get_instance_node(node);
210 ctx->inst->build_deflate_ctx(ctx->comp_ctx);
215 static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm)
217 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
218 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
220 qat_compression_put_instance(ctx->inst);
221 memset(ctx, 0, sizeof(*ctx));
224 static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir,
225 unsigned int shdr, unsigned int sftr,
226 unsigned int dhdr, unsigned int dftr)
228 struct qat_compression_req *qat_req = acomp_request_ctx(areq);
229 struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq);
230 struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm);
231 struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm);
232 struct qat_compression_instance *inst = ctx->inst;
233 gfp_t f = qat_algs_alloc_flags(&areq->base);
234 struct qat_sgl_to_bufl_params params = {0};
235 int slen = areq->slen - shdr - sftr;
236 int dlen = areq->dlen - dhdr - dftr;
237 dma_addr_t sfbuf, dfbuf;
238 u8 *req = qat_req->req;
245 if (!areq->src || !slen)
248 if (areq->dst && !dlen)
251 qat_req->dst.is_null = false;
253 /* Handle acomp requests that require the allocation of a destination
254 * buffer. The size of the destination buffer is double the source
255 * buffer (rounded up to the size of a page) to fit the decompressed
256 * output or an expansion on the data for compression.
259 qat_req->dst.is_null = true;
261 dlen = round_up(2 * slen, PAGE_SIZE);
262 areq->dst = sgl_alloc(dlen, f, NULL);
268 qat_req->dst.resubmitted = false;
271 if (dir == COMPRESSION) {
272 params.extra_dst_buff = inst->dc_data->ovf_buff_p;
273 ovf_buff_sz = inst->dc_data->ovf_buff_sz;
274 params.sz_extra_dst_buff = ovf_buff_sz;
277 ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst,
278 &qat_req->buf, ¶ms, f);
282 sfbuf = qat_req->buf.blp;
283 dfbuf = qat_req->buf.bloutp;
284 qat_req->qat_compression_ctx = ctx;
285 qat_req->acompress_req = areq;
288 if (dir == COMPRESSION) {
289 qat_req->actual_dlen = dlen;
291 qat_comp_create_compression_req(ctx->comp_ctx, req,
292 (u64)(__force long)sfbuf, slen,
293 (u64)(__force long)dfbuf, dlen,
294 (u64)(__force long)qat_req);
296 qat_comp_create_decompression_req(ctx->comp_ctx, req,
297 (u64)(__force long)sfbuf, slen,
298 (u64)(__force long)dfbuf, dlen,
299 (u64)(__force long)qat_req);
302 ret = qat_alg_send_dc_message(qat_req, inst, &areq->base);
304 qat_bl_free_bufl(inst->accel_dev, &qat_req->buf);
309 static int qat_comp_alg_compress(struct acomp_req *req)
311 return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0);
314 static int qat_comp_alg_decompress(struct acomp_req *req)
316 return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0);
319 static struct acomp_alg qat_acomp[] = { {
321 .cra_name = "deflate",
322 .cra_driver_name = "qat_deflate",
323 .cra_priority = 4001,
324 .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY,
325 .cra_ctxsize = sizeof(struct qat_compression_ctx),
326 .cra_module = THIS_MODULE,
328 .init = qat_comp_alg_init_tfm,
329 .exit = qat_comp_alg_exit_tfm,
330 .compress = qat_comp_alg_compress,
331 .decompress = qat_comp_alg_decompress,
332 .dst_free = sgl_free,
333 .reqsize = sizeof(struct qat_compression_req),
336 int qat_comp_algs_register(void)
340 mutex_lock(&algs_lock);
341 if (++active_devs == 1)
342 ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
343 mutex_unlock(&algs_lock);
347 void qat_comp_algs_unregister(void)
349 mutex_lock(&algs_lock);
350 if (--active_devs == 0)
351 crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp));
352 mutex_unlock(&algs_lock);