1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2014 - 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/dma-mapping.h>
6 #include <linux/scatterlist.h>
7 #include <linux/slab.h>
8 #include <linux/types.h>
9 #include "adf_accel_devices.h"
11 #include "qat_crypto.h"
13 void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
14 struct qat_request_buffs *buf)
16 struct device *dev = &GET_DEV(accel_dev);
17 struct qat_alg_buf_list *bl = buf->bl;
18 struct qat_alg_buf_list *blout = buf->blout;
19 dma_addr_t blp = buf->blp;
20 dma_addr_t blpout = buf->bloutp;
22 size_t sz_out = buf->sz_out;
26 bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
28 for (i = 0; i < bl->num_bufs; i++)
29 dma_unmap_single(dev, bl->buffers[i].addr,
30 bl->buffers[i].len, bl_dma_dir);
32 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
34 if (!buf->sgl_src_valid)
38 for (i = 0; i < blout->num_mapped_bufs; i++) {
39 dma_unmap_single(dev, blout->buffers[i].addr,
40 blout->buffers[i].len,
43 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
45 if (!buf->sgl_dst_valid)
50 static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
51 struct scatterlist *sgl,
52 struct scatterlist *sglout,
53 struct qat_request_buffs *buf,
54 dma_addr_t extra_dst_buff,
55 size_t sz_extra_dst_buff,
60 struct device *dev = &GET_DEV(accel_dev);
62 int n = sg_nents(sgl);
63 struct qat_alg_buf_list *bufl;
64 struct qat_alg_buf_list *buflout = NULL;
65 dma_addr_t blp = DMA_MAPPING_ERROR;
66 dma_addr_t bloutp = DMA_MAPPING_ERROR;
67 struct scatterlist *sg;
68 size_t sz_out, sz = struct_size(bufl, buffers, n);
69 int node = dev_to_node(&GET_DEV(accel_dev));
76 buf->sgl_src_valid = false;
77 buf->sgl_dst_valid = false;
79 if (n > QAT_MAX_BUFF_DESC) {
80 bufl = kzalloc_node(sz, flags, node);
84 bufl = &buf->sgl_src.sgl_hdr;
85 memset(bufl, 0, sizeof(struct qat_alg_buf_list));
86 buf->sgl_src_valid = true;
89 bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
91 for (i = 0; i < n; i++)
92 bufl->buffers[i].addr = DMA_MAPPING_ERROR;
96 for_each_sg(sgl, sg, n, i) {
102 if (left >= sg->length) {
106 bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
109 bufl->buffers[y].len = sg->length;
110 if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
114 bufl->buffers[y].len -= left;
118 bufl->num_bufs = sg_nctr;
119 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
120 if (unlikely(dma_mapping_error(dev, blp)))
125 /* Handle out of place operation */
127 struct qat_alg_buf *buffers;
128 int extra_buff = extra_dst_buff ? 1 : 0;
129 int n_sglout = sg_nents(sglout);
131 n = n_sglout + extra_buff;
132 sz_out = struct_size(buflout, buffers, n);
137 if (n > QAT_MAX_BUFF_DESC) {
138 buflout = kzalloc_node(sz_out, flags, node);
139 if (unlikely(!buflout))
142 buflout = &buf->sgl_dst.sgl_hdr;
143 memset(buflout, 0, sizeof(struct qat_alg_buf_list));
144 buf->sgl_dst_valid = true;
147 buffers = buflout->buffers;
148 for (i = 0; i < n; i++)
149 buffers[i].addr = DMA_MAPPING_ERROR;
151 for_each_sg(sglout, sg, n_sglout, i) {
157 if (left >= sg->length) {
161 buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
164 if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
166 buffers[y].len = sg->length;
169 buffers[y].len -= left;
174 buffers[sg_nctr].addr = extra_dst_buff;
175 buffers[sg_nctr].len = sz_extra_dst_buff;
178 buflout->num_bufs = sg_nctr;
179 buflout->num_bufs += extra_buff;
180 buflout->num_mapped_bufs = sg_nctr;
181 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
182 if (unlikely(dma_mapping_error(dev, bloutp)))
184 buf->blout = buflout;
185 buf->bloutp = bloutp;
186 buf->sz_out = sz_out;
188 /* Otherwise set the src and dst to the same address */
189 buf->bloutp = buf->blp;
195 if (!dma_mapping_error(dev, bloutp))
196 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
198 n = sg_nents(sglout);
199 for (i = 0; i < n; i++) {
200 if (buflout->buffers[i].addr == extra_dst_buff)
202 if (!dma_mapping_error(dev, buflout->buffers[i].addr))
203 dma_unmap_single(dev, buflout->buffers[i].addr,
204 buflout->buffers[i].len,
208 if (!buf->sgl_dst_valid)
212 if (!dma_mapping_error(dev, blp))
213 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
216 for (i = 0; i < n; i++)
217 if (!dma_mapping_error(dev, bufl->buffers[i].addr))
218 dma_unmap_single(dev, bufl->buffers[i].addr,
219 bufl->buffers[i].len,
222 if (!buf->sgl_src_valid)
225 dev_err(dev, "Failed to map buf for dma\n");
229 int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
230 struct scatterlist *sgl,
231 struct scatterlist *sglout,
232 struct qat_request_buffs *buf,
233 struct qat_sgl_to_bufl_params *params,
236 dma_addr_t extra_dst_buff = 0;
237 size_t sz_extra_dst_buff = 0;
238 unsigned int sskip = 0;
239 unsigned int dskip = 0;
242 extra_dst_buff = params->extra_dst_buff;
243 sz_extra_dst_buff = params->sz_extra_dst_buff;
244 sskip = params->sskip;
245 dskip = params->dskip;
248 return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
249 extra_dst_buff, sz_extra_dst_buff,
250 sskip, dskip, flags);
253 static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
254 struct qat_alg_buf_list *bl)
256 struct device *dev = &GET_DEV(accel_dev);
257 int n = bl->num_bufs;
260 for (i = 0; i < n; i++)
261 if (!dma_mapping_error(dev, bl->buffers[i].addr))
262 dma_unmap_single(dev, bl->buffers[i].addr,
263 bl->buffers[i].len, DMA_FROM_DEVICE);
266 static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
267 struct scatterlist *sgl,
268 struct qat_alg_buf_list **bl)
270 struct device *dev = &GET_DEV(accel_dev);
271 struct qat_alg_buf_list *bufl;
272 int node = dev_to_node(dev);
273 struct scatterlist *sg;
278 sz = struct_size(bufl, buffers, n);
279 bufl = kzalloc_node(sz, GFP_KERNEL, node);
283 for (i = 0; i < n; i++)
284 bufl->buffers[i].addr = DMA_MAPPING_ERROR;
287 for_each_sg(sgl, sg, n, i) {
293 bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
296 bufl->buffers[y].len = sg->length;
297 if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
301 bufl->num_bufs = sg_nctr;
302 bufl->num_mapped_bufs = sg_nctr;
309 for (i = 0; i < n; i++)
310 if (!dma_mapping_error(dev, bufl->buffers[i].addr))
311 dma_unmap_single(dev, bufl->buffers[i].addr,
312 bufl->buffers[i].len,
320 static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
321 struct scatterlist *sgl,
322 struct qat_alg_buf_list *bl,
326 qat_bl_sgl_unmap(accel_dev, bl);
335 static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
336 struct scatterlist **sgl,
337 struct qat_alg_buf_list **bl,
341 struct scatterlist *dst;
344 dst = sgl_alloc(dlen, gfp, NULL);
346 dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
350 ret = qat_bl_sgl_map(accel_dev, dst, bl);
364 int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
365 struct scatterlist **sg,
367 struct qat_request_buffs *qat_bufs,
370 struct device *dev = &GET_DEV(accel_dev);
371 dma_addr_t new_blp = DMA_MAPPING_ERROR;
372 struct qat_alg_buf_list *new_bl;
373 struct scatterlist *new_sg;
377 ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
381 new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
383 /* Map new firmware SGL descriptor */
384 new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
385 if (unlikely(dma_mapping_error(dev, new_blp)))
388 /* Unmap old firmware SGL descriptor */
389 dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
391 /* Free and unmap old scatterlist */
392 qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
393 !qat_bufs->sgl_dst_valid);
395 qat_bufs->sgl_dst_valid = false;
396 qat_bufs->blout = new_bl;
397 qat_bufs->bloutp = new_blp;
398 qat_bufs->sz_out = new_bl_size;
404 qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
406 if (!dma_mapping_error(dev, new_blp))
407 dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);