1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2014 - 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/dma-mapping.h>
6 #include <linux/scatterlist.h>
7 #include <linux/slab.h>
8 #include <linux/types.h>
9 #include "adf_accel_devices.h"
11 #include "qat_crypto.h"
13 void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
14 struct qat_request_buffs *buf)
16 struct device *dev = &GET_DEV(accel_dev);
17 struct qat_alg_buf_list *bl = buf->bl;
18 struct qat_alg_buf_list *blout = buf->blout;
19 dma_addr_t blp = buf->blp;
20 dma_addr_t blpout = buf->bloutp;
22 size_t sz_out = buf->sz_out;
26 bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
28 for (i = 0; i < bl->num_bufs; i++)
29 dma_unmap_single(dev, bl->buffers[i].addr,
30 bl->buffers[i].len, bl_dma_dir);
32 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
34 if (!buf->sgl_src_valid)
38 for (i = 0; i < blout->num_mapped_bufs; i++) {
39 dma_unmap_single(dev, blout->buffers[i].addr,
40 blout->buffers[i].len,
43 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
45 if (!buf->sgl_dst_valid)
50 static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
51 struct scatterlist *sgl,
52 struct scatterlist *sglout,
53 struct qat_request_buffs *buf,
54 dma_addr_t extra_dst_buff,
55 size_t sz_extra_dst_buff,
60 struct device *dev = &GET_DEV(accel_dev);
62 int n = sg_nents(sgl);
63 struct qat_alg_buf_list *bufl;
64 struct qat_alg_buf_list *buflout = NULL;
65 dma_addr_t blp = DMA_MAPPING_ERROR;
66 dma_addr_t bloutp = DMA_MAPPING_ERROR;
67 struct scatterlist *sg;
68 size_t sz_out, sz = struct_size(bufl, buffers, n);
69 int node = dev_to_node(&GET_DEV(accel_dev));
76 buf->sgl_src_valid = false;
77 buf->sgl_dst_valid = false;
79 if (n > QAT_MAX_BUFF_DESC) {
80 bufl = kzalloc_node(sz, flags, node);
84 bufl = container_of(&buf->sgl_src.sgl_hdr,
85 struct qat_alg_buf_list, hdr);
86 memset(bufl, 0, sizeof(struct qat_alg_buf_list));
87 buf->sgl_src_valid = true;
90 bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
92 for (i = 0; i < n; i++)
93 bufl->buffers[i].addr = DMA_MAPPING_ERROR;
97 for_each_sg(sgl, sg, n, i) {
103 if (left >= sg->length) {
107 bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
110 bufl->buffers[y].len = sg->length;
111 if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
115 bufl->buffers[y].len -= left;
119 bufl->num_bufs = sg_nctr;
120 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
121 if (unlikely(dma_mapping_error(dev, blp)))
126 /* Handle out of place operation */
128 struct qat_alg_buf *buffers;
129 int extra_buff = extra_dst_buff ? 1 : 0;
130 int n_sglout = sg_nents(sglout);
132 n = n_sglout + extra_buff;
133 sz_out = struct_size(buflout, buffers, n);
138 if (n > QAT_MAX_BUFF_DESC) {
139 buflout = kzalloc_node(sz_out, flags, node);
140 if (unlikely(!buflout))
143 buflout = container_of(&buf->sgl_dst.sgl_hdr,
144 struct qat_alg_buf_list, hdr);
145 memset(buflout, 0, sizeof(struct qat_alg_buf_list));
146 buf->sgl_dst_valid = true;
149 buffers = buflout->buffers;
150 for (i = 0; i < n; i++)
151 buffers[i].addr = DMA_MAPPING_ERROR;
153 for_each_sg(sglout, sg, n_sglout, i) {
159 if (left >= sg->length) {
163 buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
166 if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
168 buffers[y].len = sg->length;
171 buffers[y].len -= left;
176 buffers[sg_nctr].addr = extra_dst_buff;
177 buffers[sg_nctr].len = sz_extra_dst_buff;
180 buflout->num_bufs = sg_nctr;
181 buflout->num_bufs += extra_buff;
182 buflout->num_mapped_bufs = sg_nctr;
183 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
184 if (unlikely(dma_mapping_error(dev, bloutp)))
186 buf->blout = buflout;
187 buf->bloutp = bloutp;
188 buf->sz_out = sz_out;
190 /* Otherwise set the src and dst to the same address */
191 buf->bloutp = buf->blp;
197 if (!dma_mapping_error(dev, bloutp))
198 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
200 n = sg_nents(sglout);
201 for (i = 0; i < n; i++) {
202 if (buflout->buffers[i].addr == extra_dst_buff)
204 if (!dma_mapping_error(dev, buflout->buffers[i].addr))
205 dma_unmap_single(dev, buflout->buffers[i].addr,
206 buflout->buffers[i].len,
210 if (!buf->sgl_dst_valid)
214 if (!dma_mapping_error(dev, blp))
215 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
218 for (i = 0; i < n; i++)
219 if (!dma_mapping_error(dev, bufl->buffers[i].addr))
220 dma_unmap_single(dev, bufl->buffers[i].addr,
221 bufl->buffers[i].len,
224 if (!buf->sgl_src_valid)
227 dev_err(dev, "Failed to map buf for dma\n");
231 int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
232 struct scatterlist *sgl,
233 struct scatterlist *sglout,
234 struct qat_request_buffs *buf,
235 struct qat_sgl_to_bufl_params *params,
238 dma_addr_t extra_dst_buff = 0;
239 size_t sz_extra_dst_buff = 0;
240 unsigned int sskip = 0;
241 unsigned int dskip = 0;
244 extra_dst_buff = params->extra_dst_buff;
245 sz_extra_dst_buff = params->sz_extra_dst_buff;
246 sskip = params->sskip;
247 dskip = params->dskip;
250 return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
251 extra_dst_buff, sz_extra_dst_buff,
252 sskip, dskip, flags);
255 static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
256 struct qat_alg_buf_list *bl)
258 struct device *dev = &GET_DEV(accel_dev);
259 int n = bl->num_bufs;
262 for (i = 0; i < n; i++)
263 if (!dma_mapping_error(dev, bl->buffers[i].addr))
264 dma_unmap_single(dev, bl->buffers[i].addr,
265 bl->buffers[i].len, DMA_FROM_DEVICE);
268 static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
269 struct scatterlist *sgl,
270 struct qat_alg_buf_list **bl)
272 struct device *dev = &GET_DEV(accel_dev);
273 struct qat_alg_buf_list *bufl;
274 int node = dev_to_node(dev);
275 struct scatterlist *sg;
280 sz = struct_size(bufl, buffers, n);
281 bufl = kzalloc_node(sz, GFP_KERNEL, node);
285 for (i = 0; i < n; i++)
286 bufl->buffers[i].addr = DMA_MAPPING_ERROR;
289 for_each_sg(sgl, sg, n, i) {
295 bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
298 bufl->buffers[y].len = sg->length;
299 if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
303 bufl->num_bufs = sg_nctr;
304 bufl->num_mapped_bufs = sg_nctr;
311 for (i = 0; i < n; i++)
312 if (!dma_mapping_error(dev, bufl->buffers[i].addr))
313 dma_unmap_single(dev, bufl->buffers[i].addr,
314 bufl->buffers[i].len,
322 static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
323 struct scatterlist *sgl,
324 struct qat_alg_buf_list *bl,
328 qat_bl_sgl_unmap(accel_dev, bl);
337 static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
338 struct scatterlist **sgl,
339 struct qat_alg_buf_list **bl,
343 struct scatterlist *dst;
346 dst = sgl_alloc(dlen, gfp, NULL);
348 dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
352 ret = qat_bl_sgl_map(accel_dev, dst, bl);
366 int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
367 struct scatterlist **sg,
369 struct qat_request_buffs *qat_bufs,
372 struct device *dev = &GET_DEV(accel_dev);
373 dma_addr_t new_blp = DMA_MAPPING_ERROR;
374 struct qat_alg_buf_list *new_bl;
375 struct scatterlist *new_sg;
379 ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
383 new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
385 /* Map new firmware SGL descriptor */
386 new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
387 if (unlikely(dma_mapping_error(dev, new_blp)))
390 /* Unmap old firmware SGL descriptor */
391 dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
393 /* Free and unmap old scatterlist */
394 qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
395 !qat_bufs->sgl_dst_valid);
397 qat_bufs->sgl_dst_valid = false;
398 qat_bufs->blout = new_bl;
399 qat_bufs->bloutp = new_blp;
400 qat_bufs->sz_out = new_bl_size;
406 qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
408 if (!dma_mapping_error(dev, new_blp))
409 dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);