1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2017 Marvell
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/firmware.h>
13 #include <linux/interrupt.h>
14 #include <linux/module.h>
15 #include <linux/of_platform.h>
16 #include <linux/of_irq.h>
17 #include <linux/platform_device.h>
18 #include <linux/workqueue.h>
20 #include <crypto/internal/aead.h>
21 #include <crypto/internal/hash.h>
22 #include <crypto/internal/skcipher.h>
26 static u32 max_rings = EIP197_MAX_RINGS;
27 module_param(max_rings, uint, 0644);
28 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
30 static void eip197_trc_cache_init(struct safexcel_crypto_priv *priv)
32 u32 val, htable_offset;
33 int i, cs_rc_max, cs_ht_wc, cs_trc_rec_wc, cs_trc_lg_rec_wc;
35 if (priv->version == EIP197B) {
36 cs_rc_max = EIP197B_CS_RC_MAX;
37 cs_ht_wc = EIP197B_CS_HT_WC;
38 cs_trc_rec_wc = EIP197B_CS_TRC_REC_WC;
39 cs_trc_lg_rec_wc = EIP197B_CS_TRC_LG_REC_WC;
41 cs_rc_max = EIP197D_CS_RC_MAX;
42 cs_ht_wc = EIP197D_CS_HT_WC;
43 cs_trc_rec_wc = EIP197D_CS_TRC_REC_WC;
44 cs_trc_lg_rec_wc = EIP197D_CS_TRC_LG_REC_WC;
47 /* Enable the record cache memory access */
48 val = readl(priv->base + EIP197_CS_RAM_CTRL);
49 val &= ~EIP197_TRC_ENABLE_MASK;
50 val |= EIP197_TRC_ENABLE_0;
51 writel(val, priv->base + EIP197_CS_RAM_CTRL);
53 /* Clear all ECC errors */
54 writel(0, priv->base + EIP197_TRC_ECCCTRL);
57 * Make sure the cache memory is accessible by taking record cache into
60 val = readl(priv->base + EIP197_TRC_PARAMS);
61 val |= EIP197_TRC_PARAMS_SW_RESET;
62 val &= ~EIP197_TRC_PARAMS_DATA_ACCESS;
63 writel(val, priv->base + EIP197_TRC_PARAMS);
65 /* Clear all records */
66 for (i = 0; i < cs_rc_max; i++) {
67 u32 val, offset = EIP197_CLASSIFICATION_RAMS + i * EIP197_CS_RC_SIZE;
69 writel(EIP197_CS_RC_NEXT(EIP197_RC_NULL) |
70 EIP197_CS_RC_PREV(EIP197_RC_NULL),
73 val = EIP197_CS_RC_NEXT(i+1) | EIP197_CS_RC_PREV(i-1);
75 val |= EIP197_CS_RC_PREV(EIP197_RC_NULL);
76 else if (i == cs_rc_max - 1)
77 val |= EIP197_CS_RC_NEXT(EIP197_RC_NULL);
78 writel(val, priv->base + offset + sizeof(u32));
81 /* Clear the hash table entries */
82 htable_offset = cs_rc_max * EIP197_CS_RC_SIZE;
83 for (i = 0; i < cs_ht_wc; i++)
84 writel(GENMASK(29, 0),
85 priv->base + EIP197_CLASSIFICATION_RAMS + htable_offset + i * sizeof(u32));
87 /* Disable the record cache memory access */
88 val = readl(priv->base + EIP197_CS_RAM_CTRL);
89 val &= ~EIP197_TRC_ENABLE_MASK;
90 writel(val, priv->base + EIP197_CS_RAM_CTRL);
92 /* Write head and tail pointers of the record free chain */
93 val = EIP197_TRC_FREECHAIN_HEAD_PTR(0) |
94 EIP197_TRC_FREECHAIN_TAIL_PTR(cs_rc_max - 1);
95 writel(val, priv->base + EIP197_TRC_FREECHAIN);
97 /* Configure the record cache #1 */
98 val = EIP197_TRC_PARAMS2_RC_SZ_SMALL(cs_trc_rec_wc) |
99 EIP197_TRC_PARAMS2_HTABLE_PTR(cs_rc_max);
100 writel(val, priv->base + EIP197_TRC_PARAMS2);
102 /* Configure the record cache #2 */
103 val = EIP197_TRC_PARAMS_RC_SZ_LARGE(cs_trc_lg_rec_wc) |
104 EIP197_TRC_PARAMS_BLK_TIMER_SPEED(1) |
105 EIP197_TRC_PARAMS_HTABLE_SZ(2);
106 writel(val, priv->base + EIP197_TRC_PARAMS);
109 static void eip197_write_firmware(struct safexcel_crypto_priv *priv,
110 const struct firmware *fw, int pe, u32 ctrl,
113 const u32 *data = (const u32 *)fw->data;
117 /* Reset the engine to make its program memory accessible */
118 writel(EIP197_PE_ICE_x_CTRL_SW_RESET |
119 EIP197_PE_ICE_x_CTRL_CLR_ECC_CORR |
120 EIP197_PE_ICE_x_CTRL_CLR_ECC_NON_CORR,
121 EIP197_PE(priv) + ctrl);
123 /* Enable access to the program memory */
124 writel(prog_en, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
126 /* Write the firmware */
127 for (i = 0; i < fw->size / sizeof(u32); i++)
128 writel(be32_to_cpu(data[i]),
129 priv->base + EIP197_CLASSIFICATION_RAMS + i * sizeof(u32));
131 /* Disable access to the program memory */
132 writel(0, EIP197_PE(priv) + EIP197_PE_ICE_RAM_CTRL(pe));
134 /* Release engine from reset */
135 val = readl(EIP197_PE(priv) + ctrl);
136 val &= ~EIP197_PE_ICE_x_CTRL_SW_RESET;
137 writel(val, EIP197_PE(priv) + ctrl);
140 static int eip197_load_firmwares(struct safexcel_crypto_priv *priv)
142 const char *fw_name[] = {"ifpp.bin", "ipue.bin"};
143 const struct firmware *fw[FW_NB];
144 char fw_path[31], *dir = NULL;
145 int i, j, ret = 0, pe;
148 switch (priv->version) {
156 /* No firmware is required */
160 for (i = 0; i < FW_NB; i++) {
161 snprintf(fw_path, 31, "inside-secure/%s/%s", dir, fw_name[i]);
162 ret = request_firmware(&fw[i], fw_path, priv->dev);
164 if (priv->version != EIP197B)
167 /* Fallback to the old firmware location for the
170 ret = request_firmware(&fw[i], fw_name[i], priv->dev);
173 "Failed to request firmware %s (%d)\n",
180 for (pe = 0; pe < priv->config.pes; pe++) {
181 /* Clear the scratchpad memory */
182 val = readl(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
183 val |= EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_TIMER |
184 EIP197_PE_ICE_SCRATCH_CTRL_TIMER_EN |
185 EIP197_PE_ICE_SCRATCH_CTRL_SCRATCH_ACCESS |
186 EIP197_PE_ICE_SCRATCH_CTRL_CHANGE_ACCESS;
187 writel(val, EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_CTRL(pe));
189 memset_io(EIP197_PE(priv) + EIP197_PE_ICE_SCRATCH_RAM(pe), 0,
190 EIP197_NUM_OF_SCRATCH_BLOCKS * sizeof(u32));
192 eip197_write_firmware(priv, fw[FW_IFPP], pe,
193 EIP197_PE_ICE_FPP_CTRL(pe),
194 EIP197_PE_ICE_RAM_CTRL_FPP_PROG_EN);
196 eip197_write_firmware(priv, fw[FW_IPUE], pe,
197 EIP197_PE_ICE_PUE_CTRL(pe),
198 EIP197_PE_ICE_RAM_CTRL_PUE_PROG_EN);
202 for (j = 0; j < i; j++)
203 release_firmware(fw[j]);
208 static int safexcel_hw_setup_cdesc_rings(struct safexcel_crypto_priv *priv)
210 u32 hdw, cd_size_rnd, val;
213 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
214 hdw &= GENMASK(27, 25);
217 cd_size_rnd = (priv->config.cd_size + (BIT(hdw) - 1)) >> hdw;
219 for (i = 0; i < priv->config.rings; i++) {
220 /* ring base address */
221 writel(lower_32_bits(priv->ring[i].cdr.base_dma),
222 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
223 writel(upper_32_bits(priv->ring[i].cdr.base_dma),
224 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
226 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.cd_offset << 16) |
227 priv->config.cd_size,
228 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
229 writel(((EIP197_FETCH_COUNT * (cd_size_rnd << hdw)) << 16) |
230 (EIP197_FETCH_COUNT * priv->config.cd_offset),
231 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
233 /* Configure DMA tx control */
234 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
235 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
236 writel(val, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
238 /* clear any pending interrupt */
239 writel(GENMASK(5, 0),
240 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
246 static int safexcel_hw_setup_rdesc_rings(struct safexcel_crypto_priv *priv)
248 u32 hdw, rd_size_rnd, val;
251 hdw = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
252 hdw &= GENMASK(27, 25);
255 rd_size_rnd = (priv->config.rd_size + (BIT(hdw) - 1)) >> hdw;
257 for (i = 0; i < priv->config.rings; i++) {
258 /* ring base address */
259 writel(lower_32_bits(priv->ring[i].rdr.base_dma),
260 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
261 writel(upper_32_bits(priv->ring[i].rdr.base_dma),
262 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
264 writel(EIP197_xDR_DESC_MODE_64BIT | (priv->config.rd_offset << 16) |
265 priv->config.rd_size,
266 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DESC_SIZE);
268 writel(((EIP197_FETCH_COUNT * (rd_size_rnd << hdw)) << 16) |
269 (EIP197_FETCH_COUNT * priv->config.rd_offset),
270 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
272 /* Configure DMA tx control */
273 val = EIP197_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS);
274 val |= EIP197_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS);
275 val |= EIP197_HIA_xDR_WR_RES_BUF | EIP197_HIA_xDR_WR_CTRL_BUF;
277 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_DMA_CFG);
279 /* clear any pending interrupt */
280 writel(GENMASK(7, 0),
281 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
283 /* enable ring interrupt */
284 val = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
285 val |= EIP197_RDR_IRQ(i);
286 writel(val, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CTRL(i));
292 static int safexcel_hw_init(struct safexcel_crypto_priv *priv)
297 /* Determine endianess and configure byte swap */
298 version = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_VERSION);
299 val = readl(EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
301 if ((version & 0xffff) == EIP197_HIA_VERSION_BE)
302 val |= EIP197_MST_CTRL_BYTE_SWAP;
303 else if (((version >> 16) & 0xffff) == EIP197_HIA_VERSION_LE)
304 val |= (EIP197_MST_CTRL_NO_BYTE_SWAP >> 24);
306 /* For EIP197 set maximum number of TX commands to 2^5 = 32 */
307 if (priv->version == EIP197B || priv->version == EIP197D)
308 val |= EIP197_MST_CTRL_TX_MAX_CMD(5);
310 writel(val, EIP197_HIA_AIC(priv) + EIP197_HIA_MST_CTRL);
312 /* Configure wr/rd cache values */
313 writel(EIP197_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
314 EIP197_MST_CTRL_WD_CACHE(WR_CACHE_4BITS),
315 EIP197_HIA_GEN_CFG(priv) + EIP197_MST_CTRL);
317 /* Interrupts reset */
319 /* Disable all global interrupts */
320 writel(0, EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ENABLE_CTRL);
322 /* Clear any pending interrupt */
323 writel(GENMASK(31, 0), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
325 /* Processing Engine configuration */
326 for (pe = 0; pe < priv->config.pes; pe++) {
327 /* Data Fetch Engine configuration */
329 /* Reset all DFE threads */
330 writel(EIP197_DxE_THR_CTRL_RESET_PE,
331 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
333 if (priv->version == EIP197B || priv->version == EIP197D) {
334 /* Reset HIA input interface arbiter */
335 writel(EIP197_HIA_RA_PE_CTRL_RESET,
336 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
339 /* DMA transfer size to use */
340 val = EIP197_HIA_DFE_CFG_DIS_DEBUG;
341 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
342 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(9);
343 val |= EIP197_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
344 EIP197_HIA_DxE_CFG_MAX_CTRL_SIZE(7);
345 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS);
346 val |= EIP197_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS);
347 writel(val, EIP197_HIA_DFE(priv) + EIP197_HIA_DFE_CFG(pe));
349 /* Leave the DFE threads reset state */
350 writel(0, EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
352 /* Configure the processing engine thresholds */
353 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
354 EIP197_PE_IN_xBUF_THRES_MAX(9),
355 EIP197_PE(priv) + EIP197_PE_IN_DBUF_THRES(pe));
356 writel(EIP197_PE_IN_xBUF_THRES_MIN(6) |
357 EIP197_PE_IN_xBUF_THRES_MAX(7),
358 EIP197_PE(priv) + EIP197_PE_IN_TBUF_THRES(pe));
360 if (priv->version == EIP197B || priv->version == EIP197D) {
361 /* enable HIA input interface arbiter and rings */
362 writel(EIP197_HIA_RA_PE_CTRL_EN |
363 GENMASK(priv->config.rings - 1, 0),
364 EIP197_HIA_AIC(priv) + EIP197_HIA_RA_PE_CTRL(pe));
367 /* Data Store Engine configuration */
369 /* Reset all DSE threads */
370 writel(EIP197_DxE_THR_CTRL_RESET_PE,
371 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
373 /* Wait for all DSE threads to complete */
374 while ((readl(EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_STAT(pe)) &
375 GENMASK(15, 12)) != GENMASK(15, 12))
378 /* DMA transfer size to use */
379 val = EIP197_HIA_DSE_CFG_DIS_DEBUG;
380 val |= EIP197_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
381 EIP197_HIA_DxE_CFG_MAX_DATA_SIZE(8);
382 val |= EIP197_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS);
383 val |= EIP197_HIA_DSE_CFG_ALWAYS_BUFFERABLE;
384 /* FIXME: instability issues can occur for EIP97 but disabling it impact
387 if (priv->version == EIP197B || priv->version == EIP197D)
388 val |= EIP197_HIA_DSE_CFG_EN_SINGLE_WR;
389 writel(val, EIP197_HIA_DSE(priv) + EIP197_HIA_DSE_CFG(pe));
391 /* Leave the DSE threads reset state */
392 writel(0, EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
394 /* Configure the procesing engine thresholds */
395 writel(EIP197_PE_OUT_DBUF_THRES_MIN(7) |
396 EIP197_PE_OUT_DBUF_THRES_MAX(8),
397 EIP197_PE(priv) + EIP197_PE_OUT_DBUF_THRES(pe));
399 /* Processing Engine configuration */
401 /* H/W capabilities selection */
402 val = EIP197_FUNCTION_RSVD;
403 val |= EIP197_PROTOCOL_ENCRYPT_ONLY | EIP197_PROTOCOL_HASH_ONLY;
404 val |= EIP197_PROTOCOL_ENCRYPT_HASH | EIP197_PROTOCOL_HASH_DECRYPT;
405 val |= EIP197_ALG_DES_ECB | EIP197_ALG_DES_CBC;
406 val |= EIP197_ALG_3DES_ECB | EIP197_ALG_3DES_CBC;
407 val |= EIP197_ALG_AES_ECB | EIP197_ALG_AES_CBC;
408 val |= EIP197_ALG_MD5 | EIP197_ALG_HMAC_MD5;
409 val |= EIP197_ALG_SHA1 | EIP197_ALG_HMAC_SHA1;
410 val |= EIP197_ALG_SHA2 | EIP197_ALG_HMAC_SHA2;
411 writel(val, EIP197_PE(priv) + EIP197_PE_EIP96_FUNCTION_EN(pe));
414 /* Command Descriptor Rings prepare */
415 for (i = 0; i < priv->config.rings; i++) {
416 /* Clear interrupts for this ring */
417 writel(GENMASK(31, 0),
418 EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLE_CLR(i));
420 /* Disable external triggering */
421 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_CFG);
423 /* Clear the pending prepared counter */
424 writel(EIP197_xDR_PREP_CLR_COUNT,
425 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
427 /* Clear the pending processed counter */
428 writel(EIP197_xDR_PROC_CLR_COUNT,
429 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
432 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
434 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
436 writel((EIP197_DEFAULT_RING_SIZE * priv->config.cd_offset) << 2,
437 EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
440 /* Result Descriptor Ring prepare */
441 for (i = 0; i < priv->config.rings; i++) {
442 /* Disable external triggering*/
443 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_CFG);
445 /* Clear the pending prepared counter */
446 writel(EIP197_xDR_PREP_CLR_COUNT,
447 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_COUNT);
449 /* Clear the pending processed counter */
450 writel(EIP197_xDR_PROC_CLR_COUNT,
451 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_COUNT);
454 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PREP_PNTR);
456 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_PROC_PNTR);
459 writel((EIP197_DEFAULT_RING_SIZE * priv->config.rd_offset) << 2,
460 EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_SIZE);
463 for (pe = 0; pe < priv->config.pes; pe++) {
464 /* Enable command descriptor rings */
465 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
466 EIP197_HIA_DFE_THR(priv) + EIP197_HIA_DFE_THR_CTRL(pe));
468 /* Enable result descriptor rings */
469 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0),
470 EIP197_HIA_DSE_THR(priv) + EIP197_HIA_DSE_THR_CTRL(pe));
473 /* Clear any HIA interrupt */
474 writel(GENMASK(30, 20), EIP197_HIA_AIC_G(priv) + EIP197_HIA_AIC_G_ACK);
476 if (priv->version == EIP197B || priv->version == EIP197D) {
477 eip197_trc_cache_init(priv);
479 ret = eip197_load_firmwares(priv);
484 safexcel_hw_setup_cdesc_rings(priv);
485 safexcel_hw_setup_rdesc_rings(priv);
490 /* Called with ring's lock taken */
491 static void safexcel_try_push_requests(struct safexcel_crypto_priv *priv,
494 int coal = min_t(int, priv->ring[ring].requests, EIP197_MAX_BATCH_SZ);
499 /* Configure when we want an interrupt */
500 writel(EIP197_HIA_RDR_THRESH_PKT_MODE |
501 EIP197_HIA_RDR_THRESH_PROC_PKT(coal),
502 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_THRESH);
505 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring)
507 struct crypto_async_request *req, *backlog;
508 struct safexcel_context *ctx;
509 int ret, nreq = 0, cdesc = 0, rdesc = 0, commands, results;
511 /* If a request wasn't properly dequeued because of a lack of resources,
512 * proceeded it first,
514 req = priv->ring[ring].req;
515 backlog = priv->ring[ring].backlog;
520 spin_lock_bh(&priv->ring[ring].queue_lock);
521 backlog = crypto_get_backlog(&priv->ring[ring].queue);
522 req = crypto_dequeue_request(&priv->ring[ring].queue);
523 spin_unlock_bh(&priv->ring[ring].queue_lock);
526 priv->ring[ring].req = NULL;
527 priv->ring[ring].backlog = NULL;
532 ctx = crypto_tfm_ctx(req->tfm);
533 ret = ctx->send(req, ring, &commands, &results);
538 backlog->complete(backlog, -EINPROGRESS);
540 /* In case the send() helper did not issue any command to push
541 * to the engine because the input data was cached, continue to
542 * dequeue other requests as this is valid and not an error.
544 if (!commands && !results)
553 /* Not enough resources to handle all the requests. Bail out and save
554 * the request and the backlog for the next dequeue call (per-ring).
556 priv->ring[ring].req = req;
557 priv->ring[ring].backlog = backlog;
563 spin_lock_bh(&priv->ring[ring].lock);
565 priv->ring[ring].requests += nreq;
567 if (!priv->ring[ring].busy) {
568 safexcel_try_push_requests(priv, ring);
569 priv->ring[ring].busy = true;
572 spin_unlock_bh(&priv->ring[ring].lock);
574 /* let the RDR know we have pending descriptors */
575 writel((rdesc * priv->config.rd_offset) << 2,
576 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
578 /* let the CDR know we have pending descriptors */
579 writel((cdesc * priv->config.cd_offset) << 2,
580 EIP197_HIA_CDR(priv, ring) + EIP197_HIA_xDR_PREP_COUNT);
583 inline int safexcel_rdesc_check_errors(struct safexcel_crypto_priv *priv,
584 struct safexcel_result_desc *rdesc)
586 if (likely(!rdesc->result_data.error_code))
589 if (rdesc->result_data.error_code & 0x407f) {
590 /* Fatal error (bits 0-7, 14) */
592 "cipher: result: result descriptor error (%d)\n",
593 rdesc->result_data.error_code);
595 } else if (rdesc->result_data.error_code == BIT(9)) {
596 /* Authentication failed */
600 /* All other non-fatal errors */
604 inline void safexcel_rdr_req_set(struct safexcel_crypto_priv *priv,
606 struct safexcel_result_desc *rdesc,
607 struct crypto_async_request *req)
609 int i = safexcel_ring_rdr_rdesc_index(priv, ring, rdesc);
611 priv->ring[ring].rdr_req[i] = req;
614 inline struct crypto_async_request *
615 safexcel_rdr_req_get(struct safexcel_crypto_priv *priv, int ring)
617 int i = safexcel_ring_first_rdr_index(priv, ring);
619 return priv->ring[ring].rdr_req[i];
622 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring)
624 struct safexcel_command_desc *cdesc;
626 /* Acknowledge the command descriptors */
628 cdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].cdr);
631 "Could not retrieve the command descriptor\n");
634 } while (!cdesc->last_seg);
637 void safexcel_inv_complete(struct crypto_async_request *req, int error)
639 struct safexcel_inv_result *result = req->data;
641 if (error == -EINPROGRESS)
644 result->error = error;
645 complete(&result->completion);
648 int safexcel_invalidate_cache(struct crypto_async_request *async,
649 struct safexcel_crypto_priv *priv,
650 dma_addr_t ctxr_dma, int ring)
652 struct safexcel_command_desc *cdesc;
653 struct safexcel_result_desc *rdesc;
656 /* Prepare command descriptor */
657 cdesc = safexcel_add_cdesc(priv, ring, true, true, 0, 0, 0, ctxr_dma);
659 return PTR_ERR(cdesc);
661 cdesc->control_data.type = EIP197_TYPE_EXTENDED;
662 cdesc->control_data.options = 0;
663 cdesc->control_data.refresh = 0;
664 cdesc->control_data.control0 = CONTEXT_CONTROL_INV_TR;
666 /* Prepare result descriptor */
667 rdesc = safexcel_add_rdesc(priv, ring, true, true, 0, 0);
670 ret = PTR_ERR(rdesc);
674 safexcel_rdr_req_set(priv, ring, rdesc, async);
679 safexcel_ring_rollback_wptr(priv, &priv->ring[ring].cdr);
684 static inline void safexcel_handle_result_descriptor(struct safexcel_crypto_priv *priv,
687 struct crypto_async_request *req;
688 struct safexcel_context *ctx;
689 int ret, i, nreq, ndesc, tot_descs, handled = 0;
690 bool should_complete;
695 nreq = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
696 nreq >>= EIP197_xDR_PROC_xD_PKT_OFFSET;
697 nreq &= EIP197_xDR_PROC_xD_PKT_MASK;
701 for (i = 0; i < nreq; i++) {
702 req = safexcel_rdr_req_get(priv, ring);
704 ctx = crypto_tfm_ctx(req->tfm);
705 ndesc = ctx->handle_result(priv, ring, req,
706 &should_complete, &ret);
708 dev_err(priv->dev, "failed to handle result (%d)", ndesc);
712 if (should_complete) {
714 req->complete(req, ret);
724 writel(EIP197_xDR_PROC_xD_PKT(i) |
725 EIP197_xDR_PROC_xD_COUNT(tot_descs * priv->config.rd_offset),
726 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_PROC_COUNT);
729 /* If the number of requests overflowed the counter, try to proceed more
732 if (nreq == EIP197_xDR_PROC_xD_PKT_MASK)
736 spin_lock_bh(&priv->ring[ring].lock);
738 priv->ring[ring].requests -= handled;
739 safexcel_try_push_requests(priv, ring);
741 if (!priv->ring[ring].requests)
742 priv->ring[ring].busy = false;
744 spin_unlock_bh(&priv->ring[ring].lock);
747 static void safexcel_dequeue_work(struct work_struct *work)
749 struct safexcel_work_data *data =
750 container_of(work, struct safexcel_work_data, work);
752 safexcel_dequeue(data->priv, data->ring);
755 struct safexcel_ring_irq_data {
756 struct safexcel_crypto_priv *priv;
760 static irqreturn_t safexcel_irq_ring(int irq, void *data)
762 struct safexcel_ring_irq_data *irq_data = data;
763 struct safexcel_crypto_priv *priv = irq_data->priv;
764 int ring = irq_data->ring, rc = IRQ_NONE;
767 status = readl(EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ENABLED_STAT(ring));
772 if (status & EIP197_RDR_IRQ(ring)) {
773 stat = readl(EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
775 if (unlikely(stat & EIP197_xDR_ERR)) {
777 * Fatal error, the RDR is unusable and must be
778 * reinitialized. This should not happen under
779 * normal circumstances.
781 dev_err(priv->dev, "RDR: fatal error.");
782 } else if (likely(stat & EIP197_xDR_THRESH)) {
783 rc = IRQ_WAKE_THREAD;
786 /* ACK the interrupts */
788 EIP197_HIA_RDR(priv, ring) + EIP197_HIA_xDR_STAT);
791 /* ACK the interrupts */
792 writel(status, EIP197_HIA_AIC_R(priv) + EIP197_HIA_AIC_R_ACK(ring));
797 static irqreturn_t safexcel_irq_ring_thread(int irq, void *data)
799 struct safexcel_ring_irq_data *irq_data = data;
800 struct safexcel_crypto_priv *priv = irq_data->priv;
801 int ring = irq_data->ring;
803 safexcel_handle_result_descriptor(priv, ring);
805 queue_work(priv->ring[ring].workqueue,
806 &priv->ring[ring].work_data.work);
811 static int safexcel_request_ring_irq(struct platform_device *pdev, const char *name,
812 irq_handler_t handler,
813 irq_handler_t threaded_handler,
814 struct safexcel_ring_irq_data *ring_irq_priv)
816 int ret, irq = platform_get_irq_byname(pdev, name);
819 dev_err(&pdev->dev, "unable to get IRQ '%s'\n", name);
823 ret = devm_request_threaded_irq(&pdev->dev, irq, handler,
824 threaded_handler, IRQF_ONESHOT,
825 dev_name(&pdev->dev), ring_irq_priv);
827 dev_err(&pdev->dev, "unable to request IRQ %d\n", irq);
834 static struct safexcel_alg_template *safexcel_algs[] = {
835 &safexcel_alg_ecb_des,
836 &safexcel_alg_cbc_des,
837 &safexcel_alg_ecb_des3_ede,
838 &safexcel_alg_cbc_des3_ede,
839 &safexcel_alg_ecb_aes,
840 &safexcel_alg_cbc_aes,
843 &safexcel_alg_sha224,
844 &safexcel_alg_sha256,
845 &safexcel_alg_sha384,
846 &safexcel_alg_sha512,
847 &safexcel_alg_hmac_md5,
848 &safexcel_alg_hmac_sha1,
849 &safexcel_alg_hmac_sha224,
850 &safexcel_alg_hmac_sha256,
851 &safexcel_alg_hmac_sha384,
852 &safexcel_alg_hmac_sha512,
853 &safexcel_alg_authenc_hmac_sha1_cbc_aes,
854 &safexcel_alg_authenc_hmac_sha224_cbc_aes,
855 &safexcel_alg_authenc_hmac_sha256_cbc_aes,
856 &safexcel_alg_authenc_hmac_sha384_cbc_aes,
857 &safexcel_alg_authenc_hmac_sha512_cbc_aes,
860 static int safexcel_register_algorithms(struct safexcel_crypto_priv *priv)
864 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
865 safexcel_algs[i]->priv = priv;
867 if (!(safexcel_algs[i]->engines & priv->version))
870 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
871 ret = crypto_register_skcipher(&safexcel_algs[i]->alg.skcipher);
872 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
873 ret = crypto_register_aead(&safexcel_algs[i]->alg.aead);
875 ret = crypto_register_ahash(&safexcel_algs[i]->alg.ahash);
884 for (j = 0; j < i; j++) {
885 if (!(safexcel_algs[j]->engines & priv->version))
888 if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
889 crypto_unregister_skcipher(&safexcel_algs[j]->alg.skcipher);
890 else if (safexcel_algs[j]->type == SAFEXCEL_ALG_TYPE_AEAD)
891 crypto_unregister_aead(&safexcel_algs[j]->alg.aead);
893 crypto_unregister_ahash(&safexcel_algs[j]->alg.ahash);
899 static void safexcel_unregister_algorithms(struct safexcel_crypto_priv *priv)
903 for (i = 0; i < ARRAY_SIZE(safexcel_algs); i++) {
904 if (!(safexcel_algs[i]->engines & priv->version))
907 if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_SKCIPHER)
908 crypto_unregister_skcipher(&safexcel_algs[i]->alg.skcipher);
909 else if (safexcel_algs[i]->type == SAFEXCEL_ALG_TYPE_AEAD)
910 crypto_unregister_aead(&safexcel_algs[i]->alg.aead);
912 crypto_unregister_ahash(&safexcel_algs[i]->alg.ahash);
916 static void safexcel_configure(struct safexcel_crypto_priv *priv)
920 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
922 /* Read number of PEs from the engine */
923 switch (priv->version) {
926 mask = EIP197_N_PES_MASK;
929 mask = EIP97_N_PES_MASK;
931 priv->config.pes = (val >> EIP197_N_PES_OFFSET) & mask;
933 val = (val & GENMASK(27, 25)) >> 25;
936 val = readl(EIP197_HIA_AIC_G(priv) + EIP197_HIA_OPTIONS);
937 priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings);
939 priv->config.cd_size = (sizeof(struct safexcel_command_desc) / sizeof(u32));
940 priv->config.cd_offset = (priv->config.cd_size + mask) & ~mask;
942 priv->config.rd_size = (sizeof(struct safexcel_result_desc) / sizeof(u32));
943 priv->config.rd_offset = (priv->config.rd_size + mask) & ~mask;
946 static void safexcel_init_register_offsets(struct safexcel_crypto_priv *priv)
948 struct safexcel_register_offsets *offsets = &priv->offsets;
950 switch (priv->version) {
953 offsets->hia_aic = EIP197_HIA_AIC_BASE;
954 offsets->hia_aic_g = EIP197_HIA_AIC_G_BASE;
955 offsets->hia_aic_r = EIP197_HIA_AIC_R_BASE;
956 offsets->hia_aic_xdr = EIP197_HIA_AIC_xDR_BASE;
957 offsets->hia_dfe = EIP197_HIA_DFE_BASE;
958 offsets->hia_dfe_thr = EIP197_HIA_DFE_THR_BASE;
959 offsets->hia_dse = EIP197_HIA_DSE_BASE;
960 offsets->hia_dse_thr = EIP197_HIA_DSE_THR_BASE;
961 offsets->hia_gen_cfg = EIP197_HIA_GEN_CFG_BASE;
962 offsets->pe = EIP197_PE_BASE;
965 offsets->hia_aic = EIP97_HIA_AIC_BASE;
966 offsets->hia_aic_g = EIP97_HIA_AIC_G_BASE;
967 offsets->hia_aic_r = EIP97_HIA_AIC_R_BASE;
968 offsets->hia_aic_xdr = EIP97_HIA_AIC_xDR_BASE;
969 offsets->hia_dfe = EIP97_HIA_DFE_BASE;
970 offsets->hia_dfe_thr = EIP97_HIA_DFE_THR_BASE;
971 offsets->hia_dse = EIP97_HIA_DSE_BASE;
972 offsets->hia_dse_thr = EIP97_HIA_DSE_THR_BASE;
973 offsets->hia_gen_cfg = EIP97_HIA_GEN_CFG_BASE;
974 offsets->pe = EIP97_PE_BASE;
979 static int safexcel_probe(struct platform_device *pdev)
981 struct device *dev = &pdev->dev;
982 struct resource *res;
983 struct safexcel_crypto_priv *priv;
986 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
991 priv->version = (enum safexcel_eip_version)of_device_get_match_data(dev);
993 if (priv->version == EIP197B || priv->version == EIP197D)
994 priv->flags |= EIP197_TRC_CACHE;
996 safexcel_init_register_offsets(priv);
998 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
999 priv->base = devm_ioremap_resource(dev, res);
1000 if (IS_ERR(priv->base)) {
1001 dev_err(dev, "failed to get resource\n");
1002 return PTR_ERR(priv->base);
1005 priv->clk = devm_clk_get(&pdev->dev, NULL);
1006 ret = PTR_ERR_OR_ZERO(priv->clk);
1007 /* The clock isn't mandatory */
1008 if (ret != -ENOENT) {
1012 ret = clk_prepare_enable(priv->clk);
1014 dev_err(dev, "unable to enable clk (%d)\n", ret);
1019 priv->reg_clk = devm_clk_get(&pdev->dev, "reg");
1020 ret = PTR_ERR_OR_ZERO(priv->reg_clk);
1021 /* The clock isn't mandatory */
1022 if (ret != -ENOENT) {
1026 ret = clk_prepare_enable(priv->reg_clk);
1028 dev_err(dev, "unable to enable reg clk (%d)\n", ret);
1033 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
1037 priv->context_pool = dmam_pool_create("safexcel-context", dev,
1038 sizeof(struct safexcel_context_record),
1040 if (!priv->context_pool) {
1045 safexcel_configure(priv);
1047 priv->ring = devm_kzalloc(dev, priv->config.rings * sizeof(*priv->ring),
1054 for (i = 0; i < priv->config.rings; i++) {
1055 char irq_name[6] = {0}; /* "ringX\0" */
1056 char wq_name[9] = {0}; /* "wq_ringX\0" */
1058 struct safexcel_ring_irq_data *ring_irq;
1060 ret = safexcel_init_ring_descriptors(priv,
1062 &priv->ring[i].rdr);
1066 priv->ring[i].rdr_req = devm_kzalloc(dev,
1067 sizeof(priv->ring[i].rdr_req) * EIP197_DEFAULT_RING_SIZE,
1069 if (!priv->ring[i].rdr_req) {
1074 ring_irq = devm_kzalloc(dev, sizeof(*ring_irq), GFP_KERNEL);
1080 ring_irq->priv = priv;
1083 snprintf(irq_name, 6, "ring%d", i);
1084 irq = safexcel_request_ring_irq(pdev, irq_name, safexcel_irq_ring,
1085 safexcel_irq_ring_thread,
1092 priv->ring[i].work_data.priv = priv;
1093 priv->ring[i].work_data.ring = i;
1094 INIT_WORK(&priv->ring[i].work_data.work, safexcel_dequeue_work);
1096 snprintf(wq_name, 9, "wq_ring%d", i);
1097 priv->ring[i].workqueue = create_singlethread_workqueue(wq_name);
1098 if (!priv->ring[i].workqueue) {
1103 priv->ring[i].requests = 0;
1104 priv->ring[i].busy = false;
1106 crypto_init_queue(&priv->ring[i].queue,
1107 EIP197_DEFAULT_RING_SIZE);
1109 spin_lock_init(&priv->ring[i].lock);
1110 spin_lock_init(&priv->ring[i].queue_lock);
1113 platform_set_drvdata(pdev, priv);
1114 atomic_set(&priv->ring_used, 0);
1116 ret = safexcel_hw_init(priv);
1118 dev_err(dev, "EIP h/w init failed (%d)\n", ret);
1122 ret = safexcel_register_algorithms(priv);
1124 dev_err(dev, "Failed to register algorithms (%d)\n", ret);
1131 clk_disable_unprepare(priv->reg_clk);
1133 clk_disable_unprepare(priv->clk);
1137 static void safexcel_hw_reset_rings(struct safexcel_crypto_priv *priv)
1141 for (i = 0; i < priv->config.rings; i++) {
1142 /* clear any pending interrupt */
1143 writel(GENMASK(5, 0), EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_STAT);
1144 writel(GENMASK(7, 0), EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_STAT);
1146 /* Reset the CDR base address */
1147 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1148 writel(0, EIP197_HIA_CDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1150 /* Reset the RDR base address */
1151 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_LO);
1152 writel(0, EIP197_HIA_RDR(priv, i) + EIP197_HIA_xDR_RING_BASE_ADDR_HI);
1156 static int safexcel_remove(struct platform_device *pdev)
1158 struct safexcel_crypto_priv *priv = platform_get_drvdata(pdev);
1161 safexcel_unregister_algorithms(priv);
1162 safexcel_hw_reset_rings(priv);
1164 clk_disable_unprepare(priv->clk);
1166 for (i = 0; i < priv->config.rings; i++)
1167 destroy_workqueue(priv->ring[i].workqueue);
1172 static const struct of_device_id safexcel_of_match_table[] = {
1174 .compatible = "inside-secure,safexcel-eip97ies",
1175 .data = (void *)EIP97IES,
1178 .compatible = "inside-secure,safexcel-eip197b",
1179 .data = (void *)EIP197B,
1182 .compatible = "inside-secure,safexcel-eip197d",
1183 .data = (void *)EIP197D,
1186 /* Deprecated. Kept for backward compatibility. */
1187 .compatible = "inside-secure,safexcel-eip97",
1188 .data = (void *)EIP97IES,
1191 /* Deprecated. Kept for backward compatibility. */
1192 .compatible = "inside-secure,safexcel-eip197",
1193 .data = (void *)EIP197B,
1199 static struct platform_driver crypto_safexcel = {
1200 .probe = safexcel_probe,
1201 .remove = safexcel_remove,
1203 .name = "crypto-safexcel",
1204 .of_match_table = safexcel_of_match_table,
1207 module_platform_driver(crypto_safexcel);
1212 MODULE_DESCRIPTION("Support for SafeXcel cryptographic engine EIP197");
1213 MODULE_LICENSE("GPL v2");