1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <crypto/internal/rsa.h>
5 #include <crypto/internal/akcipher.h>
6 #include <crypto/akcipher.h>
7 #include <crypto/kpp.h>
8 #include <crypto/internal/kpp.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/fips.h>
12 #include <crypto/scatterwalk.h>
13 #include "icp_qat_fw_pke.h"
14 #include "adf_accel_devices.h"
15 #include "qat_algs_send.h"
16 #include "adf_transport.h"
17 #include "adf_common_drv.h"
18 #include "qat_crypto.h"
20 static DEFINE_MUTEX(algs_lock);
21 static unsigned int active_devs;
23 struct qat_rsa_input_params {
45 } __packed __aligned(64);
47 struct qat_rsa_output_params {
57 } __packed __aligned(64);
78 struct qat_crypto_instance *inst;
79 } __packed __aligned(64);
81 struct qat_dh_input_params {
94 } __packed __aligned(64);
96 struct qat_dh_output_params {
101 } __packed __aligned(64);
112 struct qat_crypto_instance *inst;
113 struct crypto_kpp *ftfm;
115 } __packed __aligned(64);
117 struct qat_asym_request {
119 struct qat_rsa_input_params rsa;
120 struct qat_dh_input_params dh;
123 struct qat_rsa_output_params rsa;
124 struct qat_dh_output_params dh;
130 struct icp_qat_fw_pke_request req;
132 struct qat_rsa_ctx *rsa;
133 struct qat_dh_ctx *dh;
136 struct akcipher_request *rsa;
137 struct kpp_request *dh;
140 void (*cb)(struct icp_qat_fw_pke_resp *resp);
141 struct qat_alg_req alg_req;
144 static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
145 struct qat_crypto_instance *inst,
146 struct crypto_async_request *base)
148 struct qat_alg_req *alg_req = &qat_req->alg_req;
150 alg_req->fw_req = (u32 *)&qat_req->req;
151 alg_req->tx_ring = inst->pke_tx;
152 alg_req->base = base;
153 alg_req->backlog = &inst->backlog;
155 return qat_alg_send_message(alg_req);
158 static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
160 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
161 struct kpp_request *areq = req->areq.dh;
162 struct device *dev = &GET_DEV(req->ctx.dh->inst->accel_dev);
163 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
164 resp->pke_resp_hdr.comn_resp_flags);
166 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
169 dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size,
171 kfree_sensitive(req->src_align);
174 areq->dst_len = req->ctx.dh->p_size;
175 dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size,
177 if (req->dst_align) {
178 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
180 kfree_sensitive(req->dst_align);
183 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params),
185 dma_unmap_single(dev, req->phy_out,
186 sizeof(struct qat_dh_output_params),
189 kpp_request_complete(areq, err);
192 #define PKE_DH_1536 0x390c1a49
193 #define PKE_DH_G2_1536 0x2e0b1a3e
194 #define PKE_DH_2048 0x4d0c1a60
195 #define PKE_DH_G2_2048 0x3e0b1a55
196 #define PKE_DH_3072 0x510c1a77
197 #define PKE_DH_G2_3072 0x3a0b1a6c
198 #define PKE_DH_4096 0x690c1a8e
199 #define PKE_DH_G2_4096 0x4a0b1a83
201 static unsigned long qat_dh_fn_id(unsigned int len, bool g2)
203 unsigned int bitslen = len << 3;
207 return g2 ? PKE_DH_G2_1536 : PKE_DH_1536;
209 return g2 ? PKE_DH_G2_2048 : PKE_DH_2048;
211 return g2 ? PKE_DH_G2_3072 : PKE_DH_3072;
213 return g2 ? PKE_DH_G2_4096 : PKE_DH_4096;
219 static int qat_dh_compute_value(struct kpp_request *req)
221 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
222 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
223 struct qat_crypto_instance *inst = ctx->inst;
224 struct device *dev = &GET_DEV(inst->accel_dev);
225 struct qat_asym_request *qat_req =
226 PTR_ALIGN(kpp_request_ctx(req), 64);
227 struct icp_qat_fw_pke_request *msg = &qat_req->req;
228 gfp_t flags = qat_algs_alloc_flags(&req->base);
229 int n_input_params = 0;
233 if (unlikely(!ctx->xa))
236 if (req->dst_len < ctx->p_size) {
237 req->dst_len = ctx->p_size;
241 if (req->src_len > ctx->p_size)
244 memset(msg, '\0', sizeof(*msg));
245 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
246 ICP_QAT_FW_COMN_REQ_FLAG_SET);
248 msg->pke_hdr.cd_pars.func_id = qat_dh_fn_id(ctx->p_size,
249 !req->src && ctx->g2);
250 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
253 qat_req->cb = qat_dh_cb;
254 qat_req->ctx.dh = ctx;
255 qat_req->areq.dh = req;
256 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
257 msg->pke_hdr.comn_req_flags =
258 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
259 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
262 * If no source is provided use g as base
265 qat_req->in.dh.in.xa = ctx->dma_xa;
266 qat_req->in.dh.in.p = ctx->dma_p;
270 qat_req->in.dh.in_g2.xa = ctx->dma_xa;
271 qat_req->in.dh.in_g2.p = ctx->dma_p;
274 qat_req->in.dh.in.b = ctx->dma_g;
275 qat_req->in.dh.in.xa = ctx->dma_xa;
276 qat_req->in.dh.in.p = ctx->dma_p;
284 * src can be of any size in valid range, but HW expects it to
285 * be the same as modulo p so in case it is different we need
286 * to allocate a new buf and copy src data.
287 * In other case we just need to map the user provided buffer.
288 * Also need to make sure that it is in contiguous buffer.
290 if (sg_is_last(req->src) && req->src_len == ctx->p_size) {
291 qat_req->src_align = NULL;
292 vaddr = sg_virt(req->src);
294 int shift = ctx->p_size - req->src_len;
296 qat_req->src_align = kzalloc(ctx->p_size, flags);
297 if (unlikely(!qat_req->src_align))
300 scatterwalk_map_and_copy(qat_req->src_align + shift,
301 req->src, 0, req->src_len, 0);
303 vaddr = qat_req->src_align;
306 qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size,
308 if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b)))
312 * dst can be of any size in valid range, but HW expects it to be the
313 * same as modulo m so in case it is different we need to allocate a
314 * new buf and copy src data.
315 * In other case we just need to map the user provided buffer.
316 * Also need to make sure that it is in contiguous buffer.
318 if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) {
319 qat_req->dst_align = NULL;
320 vaddr = sg_virt(req->dst);
322 qat_req->dst_align = kzalloc(ctx->p_size, flags);
323 if (unlikely(!qat_req->dst_align))
326 vaddr = qat_req->dst_align;
328 qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size,
330 if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r)))
333 qat_req->in.dh.in_tab[n_input_params] = 0;
334 qat_req->out.dh.out_tab[1] = 0;
335 /* Mapping in.in.b or in.in_g2.xa is the same */
336 qat_req->phy_in = dma_map_single(dev, &qat_req->in.dh,
337 sizeof(struct qat_dh_input_params),
339 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
342 qat_req->phy_out = dma_map_single(dev, &qat_req->out.dh,
343 sizeof(struct qat_dh_output_params),
345 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
346 goto unmap_in_params;
348 msg->pke_mid.src_data_addr = qat_req->phy_in;
349 msg->pke_mid.dest_data_addr = qat_req->phy_out;
350 msg->pke_mid.opaque = (u64)(__force long)qat_req;
351 msg->input_param_count = n_input_params;
352 msg->output_param_count = 1;
354 ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
361 if (!dma_mapping_error(dev, qat_req->phy_out))
362 dma_unmap_single(dev, qat_req->phy_out,
363 sizeof(struct qat_dh_output_params),
366 if (!dma_mapping_error(dev, qat_req->phy_in))
367 dma_unmap_single(dev, qat_req->phy_in,
368 sizeof(struct qat_dh_input_params),
371 if (!dma_mapping_error(dev, qat_req->out.dh.r))
372 dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size,
374 kfree_sensitive(qat_req->dst_align);
377 if (!dma_mapping_error(dev, qat_req->in.dh.in.b))
378 dma_unmap_single(dev, qat_req->in.dh.in.b,
381 kfree_sensitive(qat_req->src_align);
386 static int qat_dh_generate_public_key(struct kpp_request *req)
388 struct kpp_request *nreq = kpp_request_ctx(req);
389 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
390 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
393 memcpy(nreq, req, sizeof(*req));
394 kpp_request_set_tfm(nreq, ctx->ftfm);
395 return crypto_kpp_generate_public_key(nreq);
398 return qat_dh_compute_value(req);
401 static int qat_dh_compute_shared_secret(struct kpp_request *req)
403 struct kpp_request *nreq = kpp_request_ctx(req);
404 struct crypto_kpp *tfm = crypto_kpp_reqtfm(req);
405 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
408 memcpy(nreq, req, sizeof(*req));
409 kpp_request_set_tfm(nreq, ctx->ftfm);
410 return crypto_kpp_compute_shared_secret(nreq);
413 return qat_dh_compute_value(req);
416 static int qat_dh_check_params_length(unsigned int p_len)
428 static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params)
430 struct qat_crypto_instance *inst = ctx->inst;
431 struct device *dev = &GET_DEV(inst->accel_dev);
433 ctx->p_size = params->p_size;
434 ctx->p = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_p, GFP_KERNEL);
437 memcpy(ctx->p, params->p, ctx->p_size);
439 /* If g equals 2 don't copy it */
440 if (params->g_size == 1 && *(char *)params->g == 0x02) {
445 ctx->g = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_g, GFP_KERNEL);
448 memcpy(ctx->g + (ctx->p_size - params->g_size), params->g,
454 static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx)
457 memset(ctx->g, 0, ctx->p_size);
458 dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g);
462 memset(ctx->xa, 0, ctx->p_size);
463 dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa);
467 memset(ctx->p, 0, ctx->p_size);
468 dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p);
475 static int qat_dh_set_secret(struct crypto_kpp *tfm, const void *buf,
478 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
479 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
483 if (crypto_dh_decode_key(buf, len, ¶ms) < 0)
486 if (qat_dh_check_params_length(params.p_size << 3)) {
487 ctx->fallback = true;
488 return crypto_kpp_set_secret(ctx->ftfm, buf, len);
491 ctx->fallback = false;
493 /* Free old secret if any */
494 qat_dh_clear_ctx(dev, ctx);
496 ret = qat_dh_set_params(ctx, ¶ms);
500 ctx->xa = dma_alloc_coherent(dev, ctx->p_size, &ctx->dma_xa,
506 memcpy(ctx->xa + (ctx->p_size - params.key_size), params.key,
512 qat_dh_clear_ctx(dev, ctx);
516 static unsigned int qat_dh_max_size(struct crypto_kpp *tfm)
518 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
521 return crypto_kpp_maxsize(ctx->ftfm);
526 static int qat_dh_init_tfm(struct crypto_kpp *tfm)
528 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
529 struct qat_crypto_instance *inst =
530 qat_crypto_get_instance_node(numa_node_id());
531 const char *alg = kpp_alg_name(tfm);
532 unsigned int reqsize;
537 ctx->ftfm = crypto_alloc_kpp(alg, 0, CRYPTO_ALG_NEED_FALLBACK);
538 if (IS_ERR(ctx->ftfm))
539 return PTR_ERR(ctx->ftfm);
541 crypto_kpp_set_flags(ctx->ftfm, crypto_kpp_get_flags(tfm));
543 reqsize = max(sizeof(struct qat_asym_request) + 64,
544 sizeof(struct kpp_request) + crypto_kpp_reqsize(ctx->ftfm));
546 kpp_set_reqsize(tfm, reqsize);
554 static void qat_dh_exit_tfm(struct crypto_kpp *tfm)
556 struct qat_dh_ctx *ctx = kpp_tfm_ctx(tfm);
557 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
560 crypto_free_kpp(ctx->ftfm);
562 qat_dh_clear_ctx(dev, ctx);
563 qat_crypto_put_instance(ctx->inst);
566 static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
568 struct qat_asym_request *req = (void *)(__force long)resp->opaque;
569 struct akcipher_request *areq = req->areq.rsa;
570 struct device *dev = &GET_DEV(req->ctx.rsa->inst->accel_dev);
571 int err = ICP_QAT_FW_PKE_RESP_PKE_STAT_GET(
572 resp->pke_resp_hdr.comn_resp_flags);
574 err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
576 dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz,
579 kfree_sensitive(req->src_align);
581 areq->dst_len = req->ctx.rsa->key_sz;
582 dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz,
584 if (req->dst_align) {
585 scatterwalk_map_and_copy(req->dst_align, areq->dst, 0,
588 kfree_sensitive(req->dst_align);
591 dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params),
593 dma_unmap_single(dev, req->phy_out,
594 sizeof(struct qat_rsa_output_params),
597 akcipher_request_complete(areq, err);
600 void qat_alg_asym_callback(void *_resp)
602 struct icp_qat_fw_pke_resp *resp = _resp;
603 struct qat_asym_request *areq = (void *)(__force long)resp->opaque;
604 struct qat_instance_backlog *backlog = areq->alg_req.backlog;
608 qat_alg_send_backlog(backlog);
611 #define PKE_RSA_EP_512 0x1c161b21
612 #define PKE_RSA_EP_1024 0x35111bf7
613 #define PKE_RSA_EP_1536 0x4d111cdc
614 #define PKE_RSA_EP_2048 0x6e111dba
615 #define PKE_RSA_EP_3072 0x7d111ea3
616 #define PKE_RSA_EP_4096 0xa5101f7e
618 static unsigned long qat_rsa_enc_fn_id(unsigned int len)
620 unsigned int bitslen = len << 3;
624 return PKE_RSA_EP_512;
626 return PKE_RSA_EP_1024;
628 return PKE_RSA_EP_1536;
630 return PKE_RSA_EP_2048;
632 return PKE_RSA_EP_3072;
634 return PKE_RSA_EP_4096;
640 #define PKE_RSA_DP1_512 0x1c161b3c
641 #define PKE_RSA_DP1_1024 0x35111c12
642 #define PKE_RSA_DP1_1536 0x4d111cf7
643 #define PKE_RSA_DP1_2048 0x6e111dda
644 #define PKE_RSA_DP1_3072 0x7d111ebe
645 #define PKE_RSA_DP1_4096 0xa5101f98
647 static unsigned long qat_rsa_dec_fn_id(unsigned int len)
649 unsigned int bitslen = len << 3;
653 return PKE_RSA_DP1_512;
655 return PKE_RSA_DP1_1024;
657 return PKE_RSA_DP1_1536;
659 return PKE_RSA_DP1_2048;
661 return PKE_RSA_DP1_3072;
663 return PKE_RSA_DP1_4096;
669 #define PKE_RSA_DP2_512 0x1c131b57
670 #define PKE_RSA_DP2_1024 0x26131c2d
671 #define PKE_RSA_DP2_1536 0x45111d12
672 #define PKE_RSA_DP2_2048 0x59121dfa
673 #define PKE_RSA_DP2_3072 0x81121ed9
674 #define PKE_RSA_DP2_4096 0xb1111fb2
676 static unsigned long qat_rsa_dec_fn_id_crt(unsigned int len)
678 unsigned int bitslen = len << 3;
682 return PKE_RSA_DP2_512;
684 return PKE_RSA_DP2_1024;
686 return PKE_RSA_DP2_1536;
688 return PKE_RSA_DP2_2048;
690 return PKE_RSA_DP2_3072;
692 return PKE_RSA_DP2_4096;
698 static int qat_rsa_enc(struct akcipher_request *req)
700 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
701 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
702 struct qat_crypto_instance *inst = ctx->inst;
703 struct device *dev = &GET_DEV(inst->accel_dev);
704 struct qat_asym_request *qat_req =
705 PTR_ALIGN(akcipher_request_ctx(req), 64);
706 struct icp_qat_fw_pke_request *msg = &qat_req->req;
707 gfp_t flags = qat_algs_alloc_flags(&req->base);
711 if (unlikely(!ctx->n || !ctx->e))
714 if (req->dst_len < ctx->key_sz) {
715 req->dst_len = ctx->key_sz;
719 if (req->src_len > ctx->key_sz)
722 memset(msg, '\0', sizeof(*msg));
723 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
724 ICP_QAT_FW_COMN_REQ_FLAG_SET);
725 msg->pke_hdr.cd_pars.func_id = qat_rsa_enc_fn_id(ctx->key_sz);
726 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
729 qat_req->cb = qat_rsa_cb;
730 qat_req->ctx.rsa = ctx;
731 qat_req->areq.rsa = req;
732 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
733 msg->pke_hdr.comn_req_flags =
734 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
735 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
737 qat_req->in.rsa.enc.e = ctx->dma_e;
738 qat_req->in.rsa.enc.n = ctx->dma_n;
742 * src can be of any size in valid range, but HW expects it to be the
743 * same as modulo n so in case it is different we need to allocate a
744 * new buf and copy src data.
745 * In other case we just need to map the user provided buffer.
746 * Also need to make sure that it is in contiguous buffer.
748 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
749 qat_req->src_align = NULL;
750 vaddr = sg_virt(req->src);
752 int shift = ctx->key_sz - req->src_len;
754 qat_req->src_align = kzalloc(ctx->key_sz, flags);
755 if (unlikely(!qat_req->src_align))
758 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
760 vaddr = qat_req->src_align;
763 qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz,
765 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m)))
768 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
769 qat_req->dst_align = NULL;
770 vaddr = sg_virt(req->dst);
772 qat_req->dst_align = kzalloc(ctx->key_sz, flags);
773 if (unlikely(!qat_req->dst_align))
775 vaddr = qat_req->dst_align;
778 qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz,
780 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c)))
783 qat_req->in.rsa.in_tab[3] = 0;
784 qat_req->out.rsa.out_tab[1] = 0;
785 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
786 sizeof(struct qat_rsa_input_params),
788 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
791 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
792 sizeof(struct qat_rsa_output_params),
794 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
795 goto unmap_in_params;
797 msg->pke_mid.src_data_addr = qat_req->phy_in;
798 msg->pke_mid.dest_data_addr = qat_req->phy_out;
799 msg->pke_mid.opaque = (u64)(__force long)qat_req;
800 msg->input_param_count = 3;
801 msg->output_param_count = 1;
803 ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
810 if (!dma_mapping_error(dev, qat_req->phy_out))
811 dma_unmap_single(dev, qat_req->phy_out,
812 sizeof(struct qat_rsa_output_params),
815 if (!dma_mapping_error(dev, qat_req->phy_in))
816 dma_unmap_single(dev, qat_req->phy_in,
817 sizeof(struct qat_rsa_input_params),
820 if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c))
821 dma_unmap_single(dev, qat_req->out.rsa.enc.c,
822 ctx->key_sz, DMA_FROM_DEVICE);
823 kfree_sensitive(qat_req->dst_align);
825 if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m))
826 dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz,
828 kfree_sensitive(qat_req->src_align);
832 static int qat_rsa_dec(struct akcipher_request *req)
834 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
835 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
836 struct qat_crypto_instance *inst = ctx->inst;
837 struct device *dev = &GET_DEV(inst->accel_dev);
838 struct qat_asym_request *qat_req =
839 PTR_ALIGN(akcipher_request_ctx(req), 64);
840 struct icp_qat_fw_pke_request *msg = &qat_req->req;
841 gfp_t flags = qat_algs_alloc_flags(&req->base);
845 if (unlikely(!ctx->n || !ctx->d))
848 if (req->dst_len < ctx->key_sz) {
849 req->dst_len = ctx->key_sz;
853 if (req->src_len > ctx->key_sz)
856 memset(msg, '\0', sizeof(*msg));
857 ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr,
858 ICP_QAT_FW_COMN_REQ_FLAG_SET);
859 msg->pke_hdr.cd_pars.func_id = ctx->crt_mode ?
860 qat_rsa_dec_fn_id_crt(ctx->key_sz) :
861 qat_rsa_dec_fn_id(ctx->key_sz);
862 if (unlikely(!msg->pke_hdr.cd_pars.func_id))
865 qat_req->cb = qat_rsa_cb;
866 qat_req->ctx.rsa = ctx;
867 qat_req->areq.rsa = req;
868 msg->pke_hdr.service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_PKE;
869 msg->pke_hdr.comn_req_flags =
870 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_PTR_TYPE_FLAT,
871 QAT_COMN_CD_FLD_TYPE_64BIT_ADR);
874 qat_req->in.rsa.dec_crt.p = ctx->dma_p;
875 qat_req->in.rsa.dec_crt.q = ctx->dma_q;
876 qat_req->in.rsa.dec_crt.dp = ctx->dma_dp;
877 qat_req->in.rsa.dec_crt.dq = ctx->dma_dq;
878 qat_req->in.rsa.dec_crt.qinv = ctx->dma_qinv;
880 qat_req->in.rsa.dec.d = ctx->dma_d;
881 qat_req->in.rsa.dec.n = ctx->dma_n;
886 * src can be of any size in valid range, but HW expects it to be the
887 * same as modulo n so in case it is different we need to allocate a
888 * new buf and copy src data.
889 * In other case we just need to map the user provided buffer.
890 * Also need to make sure that it is in contiguous buffer.
892 if (sg_is_last(req->src) && req->src_len == ctx->key_sz) {
893 qat_req->src_align = NULL;
894 vaddr = sg_virt(req->src);
896 int shift = ctx->key_sz - req->src_len;
898 qat_req->src_align = kzalloc(ctx->key_sz, flags);
899 if (unlikely(!qat_req->src_align))
902 scatterwalk_map_and_copy(qat_req->src_align + shift, req->src,
904 vaddr = qat_req->src_align;
907 qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz,
909 if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c)))
912 if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) {
913 qat_req->dst_align = NULL;
914 vaddr = sg_virt(req->dst);
916 qat_req->dst_align = kzalloc(ctx->key_sz, flags);
917 if (unlikely(!qat_req->dst_align))
919 vaddr = qat_req->dst_align;
921 qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz,
923 if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m)))
927 qat_req->in.rsa.in_tab[6] = 0;
929 qat_req->in.rsa.in_tab[3] = 0;
930 qat_req->out.rsa.out_tab[1] = 0;
931 qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa,
932 sizeof(struct qat_rsa_input_params),
934 if (unlikely(dma_mapping_error(dev, qat_req->phy_in)))
937 qat_req->phy_out = dma_map_single(dev, &qat_req->out.rsa,
938 sizeof(struct qat_rsa_output_params),
940 if (unlikely(dma_mapping_error(dev, qat_req->phy_out)))
941 goto unmap_in_params;
943 msg->pke_mid.src_data_addr = qat_req->phy_in;
944 msg->pke_mid.dest_data_addr = qat_req->phy_out;
945 msg->pke_mid.opaque = (u64)(__force long)qat_req;
947 msg->input_param_count = 6;
949 msg->input_param_count = 3;
951 msg->output_param_count = 1;
953 ret = qat_alg_send_asym_message(qat_req, inst, &req->base);
960 if (!dma_mapping_error(dev, qat_req->phy_out))
961 dma_unmap_single(dev, qat_req->phy_out,
962 sizeof(struct qat_rsa_output_params),
965 if (!dma_mapping_error(dev, qat_req->phy_in))
966 dma_unmap_single(dev, qat_req->phy_in,
967 sizeof(struct qat_rsa_input_params),
970 if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m))
971 dma_unmap_single(dev, qat_req->out.rsa.dec.m,
972 ctx->key_sz, DMA_FROM_DEVICE);
973 kfree_sensitive(qat_req->dst_align);
975 if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c))
976 dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz,
978 kfree_sensitive(qat_req->src_align);
982 static int qat_rsa_set_n(struct qat_rsa_ctx *ctx, const char *value,
985 struct qat_crypto_instance *inst = ctx->inst;
986 struct device *dev = &GET_DEV(inst->accel_dev);
987 const char *ptr = value;
990 while (!*ptr && vlen) {
997 /* invalid key size provided */
998 if (!qat_rsa_enc_fn_id(ctx->key_sz))
1002 ctx->n = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_n, GFP_KERNEL);
1006 memcpy(ctx->n, ptr, ctx->key_sz);
1014 static int qat_rsa_set_e(struct qat_rsa_ctx *ctx, const char *value,
1017 struct qat_crypto_instance *inst = ctx->inst;
1018 struct device *dev = &GET_DEV(inst->accel_dev);
1019 const char *ptr = value;
1021 while (!*ptr && vlen) {
1026 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz) {
1031 ctx->e = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_e, GFP_KERNEL);
1035 memcpy(ctx->e + (ctx->key_sz - vlen), ptr, vlen);
1039 static int qat_rsa_set_d(struct qat_rsa_ctx *ctx, const char *value,
1042 struct qat_crypto_instance *inst = ctx->inst;
1043 struct device *dev = &GET_DEV(inst->accel_dev);
1044 const char *ptr = value;
1047 while (!*ptr && vlen) {
1053 if (!ctx->key_sz || !vlen || vlen > ctx->key_sz)
1057 ctx->d = dma_alloc_coherent(dev, ctx->key_sz, &ctx->dma_d, GFP_KERNEL);
1061 memcpy(ctx->d + (ctx->key_sz - vlen), ptr, vlen);
1068 static void qat_rsa_drop_leading_zeros(const char **ptr, unsigned int *len)
1070 while (!**ptr && *len) {
1076 static void qat_rsa_setkey_crt(struct qat_rsa_ctx *ctx, struct rsa_key *rsa_key)
1078 struct qat_crypto_instance *inst = ctx->inst;
1079 struct device *dev = &GET_DEV(inst->accel_dev);
1082 unsigned int half_key_sz = ctx->key_sz / 2;
1086 len = rsa_key->p_sz;
1087 qat_rsa_drop_leading_zeros(&ptr, &len);
1090 ctx->p = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_p, GFP_KERNEL);
1093 memcpy(ctx->p + (half_key_sz - len), ptr, len);
1097 len = rsa_key->q_sz;
1098 qat_rsa_drop_leading_zeros(&ptr, &len);
1101 ctx->q = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_q, GFP_KERNEL);
1104 memcpy(ctx->q + (half_key_sz - len), ptr, len);
1108 len = rsa_key->dp_sz;
1109 qat_rsa_drop_leading_zeros(&ptr, &len);
1112 ctx->dp = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dp,
1116 memcpy(ctx->dp + (half_key_sz - len), ptr, len);
1120 len = rsa_key->dq_sz;
1121 qat_rsa_drop_leading_zeros(&ptr, &len);
1124 ctx->dq = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_dq,
1128 memcpy(ctx->dq + (half_key_sz - len), ptr, len);
1131 ptr = rsa_key->qinv;
1132 len = rsa_key->qinv_sz;
1133 qat_rsa_drop_leading_zeros(&ptr, &len);
1136 ctx->qinv = dma_alloc_coherent(dev, half_key_sz, &ctx->dma_qinv,
1140 memcpy(ctx->qinv + (half_key_sz - len), ptr, len);
1142 ctx->crt_mode = true;
1146 memset(ctx->dq, '\0', half_key_sz);
1147 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1150 memset(ctx->dp, '\0', half_key_sz);
1151 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1154 memset(ctx->q, '\0', half_key_sz);
1155 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1158 memset(ctx->p, '\0', half_key_sz);
1159 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1162 ctx->crt_mode = false;
1165 static void qat_rsa_clear_ctx(struct device *dev, struct qat_rsa_ctx *ctx)
1167 unsigned int half_key_sz = ctx->key_sz / 2;
1169 /* Free the old key if any */
1171 dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n);
1173 dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e);
1175 memset(ctx->d, '\0', ctx->key_sz);
1176 dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d);
1179 memset(ctx->p, '\0', half_key_sz);
1180 dma_free_coherent(dev, half_key_sz, ctx->p, ctx->dma_p);
1183 memset(ctx->q, '\0', half_key_sz);
1184 dma_free_coherent(dev, half_key_sz, ctx->q, ctx->dma_q);
1187 memset(ctx->dp, '\0', half_key_sz);
1188 dma_free_coherent(dev, half_key_sz, ctx->dp, ctx->dma_dp);
1191 memset(ctx->dq, '\0', half_key_sz);
1192 dma_free_coherent(dev, half_key_sz, ctx->dq, ctx->dma_dq);
1195 memset(ctx->qinv, '\0', half_key_sz);
1196 dma_free_coherent(dev, half_key_sz, ctx->qinv, ctx->dma_qinv);
1207 ctx->crt_mode = false;
1211 static int qat_rsa_setkey(struct crypto_akcipher *tfm, const void *key,
1212 unsigned int keylen, bool private)
1214 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1215 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1216 struct rsa_key rsa_key;
1219 qat_rsa_clear_ctx(dev, ctx);
1222 ret = rsa_parse_priv_key(&rsa_key, key, keylen);
1224 ret = rsa_parse_pub_key(&rsa_key, key, keylen);
1228 ret = qat_rsa_set_n(ctx, rsa_key.n, rsa_key.n_sz);
1231 ret = qat_rsa_set_e(ctx, rsa_key.e, rsa_key.e_sz);
1235 ret = qat_rsa_set_d(ctx, rsa_key.d, rsa_key.d_sz);
1238 qat_rsa_setkey_crt(ctx, &rsa_key);
1241 if (!ctx->n || !ctx->e) {
1242 /* invalid key provided */
1246 if (private && !ctx->d) {
1247 /* invalid private key provided */
1254 qat_rsa_clear_ctx(dev, ctx);
1258 static int qat_rsa_setpubkey(struct crypto_akcipher *tfm, const void *key,
1259 unsigned int keylen)
1261 return qat_rsa_setkey(tfm, key, keylen, false);
1264 static int qat_rsa_setprivkey(struct crypto_akcipher *tfm, const void *key,
1265 unsigned int keylen)
1267 return qat_rsa_setkey(tfm, key, keylen, true);
1270 static unsigned int qat_rsa_max_size(struct crypto_akcipher *tfm)
1272 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1277 static int qat_rsa_init_tfm(struct crypto_akcipher *tfm)
1279 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1280 struct qat_crypto_instance *inst =
1281 qat_crypto_get_instance_node(numa_node_id());
1286 akcipher_set_reqsize(tfm, sizeof(struct qat_asym_request) + 64);
1293 static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm)
1295 struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm);
1296 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1298 qat_rsa_clear_ctx(dev, ctx);
1299 qat_crypto_put_instance(ctx->inst);
1302 static struct akcipher_alg rsa = {
1303 .encrypt = qat_rsa_enc,
1304 .decrypt = qat_rsa_dec,
1305 .set_pub_key = qat_rsa_setpubkey,
1306 .set_priv_key = qat_rsa_setprivkey,
1307 .max_size = qat_rsa_max_size,
1308 .init = qat_rsa_init_tfm,
1309 .exit = qat_rsa_exit_tfm,
1312 .cra_driver_name = "qat-rsa",
1313 .cra_priority = 1000,
1314 .cra_module = THIS_MODULE,
1315 .cra_ctxsize = sizeof(struct qat_rsa_ctx),
1319 static struct kpp_alg dh = {
1320 .set_secret = qat_dh_set_secret,
1321 .generate_public_key = qat_dh_generate_public_key,
1322 .compute_shared_secret = qat_dh_compute_shared_secret,
1323 .max_size = qat_dh_max_size,
1324 .init = qat_dh_init_tfm,
1325 .exit = qat_dh_exit_tfm,
1328 .cra_driver_name = "qat-dh",
1329 .cra_priority = 1000,
1330 .cra_module = THIS_MODULE,
1331 .cra_ctxsize = sizeof(struct qat_dh_ctx),
1332 .cra_flags = CRYPTO_ALG_NEED_FALLBACK,
1336 int qat_asym_algs_register(void)
1340 mutex_lock(&algs_lock);
1341 if (++active_devs == 1) {
1342 rsa.base.cra_flags = 0;
1343 ret = crypto_register_akcipher(&rsa);
1346 ret = crypto_register_kpp(&dh);
1349 mutex_unlock(&algs_lock);
1353 void qat_asym_algs_unregister(void)
1355 mutex_lock(&algs_lock);
1356 if (--active_devs == 0) {
1357 crypto_unregister_akcipher(&rsa);
1358 crypto_unregister_kpp(&dh);
1360 mutex_unlock(&algs_lock);