]> Git Repo - linux.git/blob - drivers/crypto/picoxcell_crypto.c
Merge tag 'ovl-update-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs
[linux.git] / drivers / crypto / picoxcell_crypto.c
1 /*
2  * Copyright (c) 2010-2011 Picochip Ltd., Jamie Iles
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
17  */
18 #include <crypto/internal/aead.h>
19 #include <crypto/aes.h>
20 #include <crypto/algapi.h>
21 #include <crypto/authenc.h>
22 #include <crypto/des.h>
23 #include <crypto/md5.h>
24 #include <crypto/sha.h>
25 #include <crypto/internal/skcipher.h>
26 #include <linux/clk.h>
27 #include <linux/crypto.h>
28 #include <linux/delay.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/dmapool.h>
31 #include <linux/err.h>
32 #include <linux/init.h>
33 #include <linux/interrupt.h>
34 #include <linux/io.h>
35 #include <linux/list.h>
36 #include <linux/module.h>
37 #include <linux/of.h>
38 #include <linux/platform_device.h>
39 #include <linux/pm.h>
40 #include <linux/rtnetlink.h>
41 #include <linux/scatterlist.h>
42 #include <linux/sched.h>
43 #include <linux/sizes.h>
44 #include <linux/slab.h>
45 #include <linux/timer.h>
46
47 #include "picoxcell_crypto_regs.h"
48
49 /*
50  * The threshold for the number of entries in the CMD FIFO available before
51  * the CMD0_CNT interrupt is raised. Increasing this value will reduce the
52  * number of interrupts raised to the CPU.
53  */
54 #define CMD0_IRQ_THRESHOLD   1
55
56 /*
57  * The timeout period (in jiffies) for a PDU. When the the number of PDUs in
58  * flight is greater than the STAT_IRQ_THRESHOLD or 0 the timer is disabled.
59  * When there are packets in flight but lower than the threshold, we enable
60  * the timer and at expiry, attempt to remove any processed packets from the
61  * queue and if there are still packets left, schedule the timer again.
62  */
63 #define PACKET_TIMEOUT      1
64
65 /* The priority to register each algorithm with. */
66 #define SPACC_CRYPTO_ALG_PRIORITY       10000
67
68 #define SPACC_CRYPTO_KASUMI_F8_KEY_LEN  16
69 #define SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ 64
70 #define SPACC_CRYPTO_IPSEC_HASH_PG_SZ   64
71 #define SPACC_CRYPTO_IPSEC_MAX_CTXS     32
72 #define SPACC_CRYPTO_IPSEC_FIFO_SZ      32
73 #define SPACC_CRYPTO_L2_CIPHER_PG_SZ    64
74 #define SPACC_CRYPTO_L2_HASH_PG_SZ      64
75 #define SPACC_CRYPTO_L2_MAX_CTXS        128
76 #define SPACC_CRYPTO_L2_FIFO_SZ         128
77
78 #define MAX_DDT_LEN                     16
79
80 /* DDT format. This must match the hardware DDT format exactly. */
81 struct spacc_ddt {
82         dma_addr_t      p;
83         u32             len;
84 };
85
86 /*
87  * Asynchronous crypto request structure.
88  *
89  * This structure defines a request that is either queued for processing or
90  * being processed.
91  */
92 struct spacc_req {
93         struct list_head                list;
94         struct spacc_engine             *engine;
95         struct crypto_async_request     *req;
96         int                             result;
97         bool                            is_encrypt;
98         unsigned                        ctx_id;
99         dma_addr_t                      src_addr, dst_addr;
100         struct spacc_ddt                *src_ddt, *dst_ddt;
101         void                            (*complete)(struct spacc_req *req);
102 };
103
104 struct spacc_aead {
105         unsigned long                   ctrl_default;
106         unsigned long                   type;
107         struct aead_alg                 alg;
108         struct spacc_engine             *engine;
109         struct list_head                entry;
110         int                             key_offs;
111         int                             iv_offs;
112 };
113
114 struct spacc_engine {
115         void __iomem                    *regs;
116         struct list_head                pending;
117         int                             next_ctx;
118         spinlock_t                      hw_lock;
119         int                             in_flight;
120         struct list_head                completed;
121         struct list_head                in_progress;
122         struct tasklet_struct           complete;
123         unsigned long                   fifo_sz;
124         void __iomem                    *cipher_ctx_base;
125         void __iomem                    *hash_key_base;
126         struct spacc_alg                *algs;
127         unsigned                        num_algs;
128         struct list_head                registered_algs;
129         struct spacc_aead               *aeads;
130         unsigned                        num_aeads;
131         struct list_head                registered_aeads;
132         size_t                          cipher_pg_sz;
133         size_t                          hash_pg_sz;
134         const char                      *name;
135         struct clk                      *clk;
136         struct device                   *dev;
137         unsigned                        max_ctxs;
138         struct timer_list               packet_timeout;
139         unsigned                        stat_irq_thresh;
140         struct dma_pool                 *req_pool;
141 };
142
143 /* Algorithm type mask. */
144 #define SPACC_CRYPTO_ALG_MASK           0x7
145
146 /* SPACC definition of a crypto algorithm. */
147 struct spacc_alg {
148         unsigned long                   ctrl_default;
149         unsigned long                   type;
150         struct crypto_alg               alg;
151         struct spacc_engine             *engine;
152         struct list_head                entry;
153         int                             key_offs;
154         int                             iv_offs;
155 };
156
157 /* Generic context structure for any algorithm type. */
158 struct spacc_generic_ctx {
159         struct spacc_engine             *engine;
160         int                             flags;
161         int                             key_offs;
162         int                             iv_offs;
163 };
164
165 /* Block cipher context. */
166 struct spacc_ablk_ctx {
167         struct spacc_generic_ctx        generic;
168         u8                              key[AES_MAX_KEY_SIZE];
169         u8                              key_len;
170         /*
171          * The fallback cipher. If the operation can't be done in hardware,
172          * fallback to a software version.
173          */
174         struct crypto_sync_skcipher     *sw_cipher;
175 };
176
177 /* AEAD cipher context. */
178 struct spacc_aead_ctx {
179         struct spacc_generic_ctx        generic;
180         u8                              cipher_key[AES_MAX_KEY_SIZE];
181         u8                              hash_ctx[SPACC_CRYPTO_IPSEC_HASH_PG_SZ];
182         u8                              cipher_key_len;
183         u8                              hash_key_len;
184         struct crypto_aead              *sw_cipher;
185 };
186
187 static int spacc_ablk_submit(struct spacc_req *req);
188
189 static inline struct spacc_alg *to_spacc_alg(struct crypto_alg *alg)
190 {
191         return alg ? container_of(alg, struct spacc_alg, alg) : NULL;
192 }
193
194 static inline struct spacc_aead *to_spacc_aead(struct aead_alg *alg)
195 {
196         return container_of(alg, struct spacc_aead, alg);
197 }
198
199 static inline int spacc_fifo_cmd_full(struct spacc_engine *engine)
200 {
201         u32 fifo_stat = readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET);
202
203         return fifo_stat & SPA_FIFO_CMD_FULL;
204 }
205
206 /*
207  * Given a cipher context, and a context number, get the base address of the
208  * context page.
209  *
210  * Returns the address of the context page where the key/context may
211  * be written.
212  */
213 static inline void __iomem *spacc_ctx_page_addr(struct spacc_generic_ctx *ctx,
214                                                 unsigned indx,
215                                                 bool is_cipher_ctx)
216 {
217         return is_cipher_ctx ? ctx->engine->cipher_ctx_base +
218                         (indx * ctx->engine->cipher_pg_sz) :
219                 ctx->engine->hash_key_base + (indx * ctx->engine->hash_pg_sz);
220 }
221
222 /* The context pages can only be written with 32-bit accesses. */
223 static inline void memcpy_toio32(u32 __iomem *dst, const void *src,
224                                  unsigned count)
225 {
226         const u32 *src32 = (const u32 *) src;
227
228         while (count--)
229                 writel(*src32++, dst++);
230 }
231
232 static void spacc_cipher_write_ctx(struct spacc_generic_ctx *ctx,
233                                    void __iomem *page_addr, const u8 *key,
234                                    size_t key_len, const u8 *iv, size_t iv_len)
235 {
236         void __iomem *key_ptr = page_addr + ctx->key_offs;
237         void __iomem *iv_ptr = page_addr + ctx->iv_offs;
238
239         memcpy_toio32(key_ptr, key, key_len / 4);
240         memcpy_toio32(iv_ptr, iv, iv_len / 4);
241 }
242
243 /*
244  * Load a context into the engines context memory.
245  *
246  * Returns the index of the context page where the context was loaded.
247  */
248 static unsigned spacc_load_ctx(struct spacc_generic_ctx *ctx,
249                                const u8 *ciph_key, size_t ciph_len,
250                                const u8 *iv, size_t ivlen, const u8 *hash_key,
251                                size_t hash_len)
252 {
253         unsigned indx = ctx->engine->next_ctx++;
254         void __iomem *ciph_page_addr, *hash_page_addr;
255
256         ciph_page_addr = spacc_ctx_page_addr(ctx, indx, 1);
257         hash_page_addr = spacc_ctx_page_addr(ctx, indx, 0);
258
259         ctx->engine->next_ctx &= ctx->engine->fifo_sz - 1;
260         spacc_cipher_write_ctx(ctx, ciph_page_addr, ciph_key, ciph_len, iv,
261                                ivlen);
262         writel(ciph_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET) |
263                (1 << SPA_KEY_SZ_CIPHER_OFFSET),
264                ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
265
266         if (hash_key) {
267                 memcpy_toio32(hash_page_addr, hash_key, hash_len / 4);
268                 writel(hash_len | (indx << SPA_KEY_SZ_CTX_INDEX_OFFSET),
269                        ctx->engine->regs + SPA_KEY_SZ_REG_OFFSET);
270         }
271
272         return indx;
273 }
274
275 static inline void ddt_set(struct spacc_ddt *ddt, dma_addr_t phys, size_t len)
276 {
277         ddt->p = phys;
278         ddt->len = len;
279 }
280
281 /*
282  * Take a crypto request and scatterlists for the data and turn them into DDTs
283  * for passing to the crypto engines. This also DMA maps the data so that the
284  * crypto engines can DMA to/from them.
285  */
286 static struct spacc_ddt *spacc_sg_to_ddt(struct spacc_engine *engine,
287                                          struct scatterlist *payload,
288                                          unsigned nbytes,
289                                          enum dma_data_direction dir,
290                                          dma_addr_t *ddt_phys)
291 {
292         unsigned mapped_ents;
293         struct scatterlist *cur;
294         struct spacc_ddt *ddt;
295         int i;
296         int nents;
297
298         nents = sg_nents_for_len(payload, nbytes);
299         if (nents < 0) {
300                 dev_err(engine->dev, "Invalid numbers of SG.\n");
301                 return NULL;
302         }
303         mapped_ents = dma_map_sg(engine->dev, payload, nents, dir);
304
305         if (mapped_ents + 1 > MAX_DDT_LEN)
306                 goto out;
307
308         ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, ddt_phys);
309         if (!ddt)
310                 goto out;
311
312         for_each_sg(payload, cur, mapped_ents, i)
313                 ddt_set(&ddt[i], sg_dma_address(cur), sg_dma_len(cur));
314         ddt_set(&ddt[mapped_ents], 0, 0);
315
316         return ddt;
317
318 out:
319         dma_unmap_sg(engine->dev, payload, nents, dir);
320         return NULL;
321 }
322
323 static int spacc_aead_make_ddts(struct aead_request *areq)
324 {
325         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
326         struct spacc_req *req = aead_request_ctx(areq);
327         struct spacc_engine *engine = req->engine;
328         struct spacc_ddt *src_ddt, *dst_ddt;
329         unsigned total;
330         int src_nents, dst_nents;
331         struct scatterlist *cur;
332         int i, dst_ents, src_ents;
333
334         total = areq->assoclen + areq->cryptlen;
335         if (req->is_encrypt)
336                 total += crypto_aead_authsize(aead);
337
338         src_nents = sg_nents_for_len(areq->src, total);
339         if (src_nents < 0) {
340                 dev_err(engine->dev, "Invalid numbers of src SG.\n");
341                 return src_nents;
342         }
343         if (src_nents + 1 > MAX_DDT_LEN)
344                 return -E2BIG;
345
346         dst_nents = 0;
347         if (areq->src != areq->dst) {
348                 dst_nents = sg_nents_for_len(areq->dst, total);
349                 if (dst_nents < 0) {
350                         dev_err(engine->dev, "Invalid numbers of dst SG.\n");
351                         return dst_nents;
352                 }
353                 if (src_nents + 1 > MAX_DDT_LEN)
354                         return -E2BIG;
355         }
356
357         src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr);
358         if (!src_ddt)
359                 goto err;
360
361         dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr);
362         if (!dst_ddt)
363                 goto err_free_src;
364
365         req->src_ddt = src_ddt;
366         req->dst_ddt = dst_ddt;
367
368         if (dst_nents) {
369                 src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
370                                       DMA_TO_DEVICE);
371                 if (!src_ents)
372                         goto err_free_dst;
373
374                 dst_ents = dma_map_sg(engine->dev, areq->dst, dst_nents,
375                                       DMA_FROM_DEVICE);
376
377                 if (!dst_ents) {
378                         dma_unmap_sg(engine->dev, areq->src, src_nents,
379                                      DMA_TO_DEVICE);
380                         goto err_free_dst;
381                 }
382         } else {
383                 src_ents = dma_map_sg(engine->dev, areq->src, src_nents,
384                                       DMA_BIDIRECTIONAL);
385                 if (!src_ents)
386                         goto err_free_dst;
387                 dst_ents = src_ents;
388         }
389
390         /*
391          * Now map in the payload for the source and destination and terminate
392          * with the NULL pointers.
393          */
394         for_each_sg(areq->src, cur, src_ents, i)
395                 ddt_set(src_ddt++, sg_dma_address(cur), sg_dma_len(cur));
396
397         /* For decryption we need to skip the associated data. */
398         total = req->is_encrypt ? 0 : areq->assoclen;
399         for_each_sg(areq->dst, cur, dst_ents, i) {
400                 unsigned len = sg_dma_len(cur);
401
402                 if (len <= total) {
403                         total -= len;
404                         continue;
405                 }
406
407                 ddt_set(dst_ddt++, sg_dma_address(cur) + total, len - total);
408         }
409
410         ddt_set(src_ddt, 0, 0);
411         ddt_set(dst_ddt, 0, 0);
412
413         return 0;
414
415 err_free_dst:
416         dma_pool_free(engine->req_pool, dst_ddt, req->dst_addr);
417 err_free_src:
418         dma_pool_free(engine->req_pool, src_ddt, req->src_addr);
419 err:
420         return -ENOMEM;
421 }
422
423 static void spacc_aead_free_ddts(struct spacc_req *req)
424 {
425         struct aead_request *areq = container_of(req->req, struct aead_request,
426                                                  base);
427         struct crypto_aead *aead = crypto_aead_reqtfm(areq);
428         unsigned total = areq->assoclen + areq->cryptlen +
429                          (req->is_encrypt ? crypto_aead_authsize(aead) : 0);
430         struct spacc_aead_ctx *aead_ctx = crypto_aead_ctx(aead);
431         struct spacc_engine *engine = aead_ctx->generic.engine;
432         int nents = sg_nents_for_len(areq->src, total);
433
434         /* sg_nents_for_len should not fail since it works when mapping sg */
435         if (unlikely(nents < 0)) {
436                 dev_err(engine->dev, "Invalid numbers of src SG.\n");
437                 return;
438         }
439
440         if (areq->src != areq->dst) {
441                 dma_unmap_sg(engine->dev, areq->src, nents, DMA_TO_DEVICE);
442                 nents = sg_nents_for_len(areq->dst, total);
443                 if (unlikely(nents < 0)) {
444                         dev_err(engine->dev, "Invalid numbers of dst SG.\n");
445                         return;
446                 }
447                 dma_unmap_sg(engine->dev, areq->dst, nents, DMA_FROM_DEVICE);
448         } else
449                 dma_unmap_sg(engine->dev, areq->src, nents, DMA_BIDIRECTIONAL);
450
451         dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr);
452         dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr);
453 }
454
455 static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt,
456                            dma_addr_t ddt_addr, struct scatterlist *payload,
457                            unsigned nbytes, enum dma_data_direction dir)
458 {
459         int nents = sg_nents_for_len(payload, nbytes);
460
461         if (nents < 0) {
462                 dev_err(req->engine->dev, "Invalid numbers of SG.\n");
463                 return;
464         }
465
466         dma_unmap_sg(req->engine->dev, payload, nents, dir);
467         dma_pool_free(req->engine->req_pool, ddt, ddt_addr);
468 }
469
470 static int spacc_aead_setkey(struct crypto_aead *tfm, const u8 *key,
471                              unsigned int keylen)
472 {
473         struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
474         struct crypto_authenc_keys keys;
475         int err;
476
477         crypto_aead_clear_flags(ctx->sw_cipher, CRYPTO_TFM_REQ_MASK);
478         crypto_aead_set_flags(ctx->sw_cipher, crypto_aead_get_flags(tfm) &
479                                               CRYPTO_TFM_REQ_MASK);
480         err = crypto_aead_setkey(ctx->sw_cipher, key, keylen);
481         crypto_aead_clear_flags(tfm, CRYPTO_TFM_RES_MASK);
482         crypto_aead_set_flags(tfm, crypto_aead_get_flags(ctx->sw_cipher) &
483                                    CRYPTO_TFM_RES_MASK);
484         if (err)
485                 return err;
486
487         if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
488                 goto badkey;
489
490         if (keys.enckeylen > AES_MAX_KEY_SIZE)
491                 goto badkey;
492
493         if (keys.authkeylen > sizeof(ctx->hash_ctx))
494                 goto badkey;
495
496         memcpy(ctx->cipher_key, keys.enckey, keys.enckeylen);
497         ctx->cipher_key_len = keys.enckeylen;
498
499         memcpy(ctx->hash_ctx, keys.authkey, keys.authkeylen);
500         ctx->hash_key_len = keys.authkeylen;
501
502         memzero_explicit(&keys, sizeof(keys));
503         return 0;
504
505 badkey:
506         crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
507         memzero_explicit(&keys, sizeof(keys));
508         return -EINVAL;
509 }
510
511 static int spacc_aead_setauthsize(struct crypto_aead *tfm,
512                                   unsigned int authsize)
513 {
514         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(crypto_aead_tfm(tfm));
515
516         return crypto_aead_setauthsize(ctx->sw_cipher, authsize);
517 }
518
519 /*
520  * Check if an AEAD request requires a fallback operation. Some requests can't
521  * be completed in hardware because the hardware may not support certain key
522  * sizes. In these cases we need to complete the request in software.
523  */
524 static int spacc_aead_need_fallback(struct aead_request *aead_req)
525 {
526         struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
527         struct aead_alg *alg = crypto_aead_alg(aead);
528         struct spacc_aead *spacc_alg = to_spacc_aead(alg);
529         struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
530
531         /*
532          * If we have a non-supported key-length, then we need to do a
533          * software fallback.
534          */
535         if ((spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
536             SPA_CTRL_CIPH_ALG_AES &&
537             ctx->cipher_key_len != AES_KEYSIZE_128 &&
538             ctx->cipher_key_len != AES_KEYSIZE_256)
539                 return 1;
540
541         return 0;
542 }
543
544 static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type,
545                                   bool is_encrypt)
546 {
547         struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req));
548         struct spacc_aead_ctx *ctx = crypto_tfm_ctx(old_tfm);
549         struct aead_request *subreq = aead_request_ctx(req);
550
551         aead_request_set_tfm(subreq, ctx->sw_cipher);
552         aead_request_set_callback(subreq, req->base.flags,
553                                   req->base.complete, req->base.data);
554         aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
555                                req->iv);
556         aead_request_set_ad(subreq, req->assoclen);
557
558         return is_encrypt ? crypto_aead_encrypt(subreq) :
559                             crypto_aead_decrypt(subreq);
560 }
561
562 static void spacc_aead_complete(struct spacc_req *req)
563 {
564         spacc_aead_free_ddts(req);
565         req->req->complete(req->req, req->result);
566 }
567
568 static int spacc_aead_submit(struct spacc_req *req)
569 {
570         struct aead_request *aead_req =
571                 container_of(req->req, struct aead_request, base);
572         struct crypto_aead *aead = crypto_aead_reqtfm(aead_req);
573         unsigned int authsize = crypto_aead_authsize(aead);
574         struct spacc_aead_ctx *ctx = crypto_aead_ctx(aead);
575         struct aead_alg *alg = crypto_aead_alg(aead);
576         struct spacc_aead *spacc_alg = to_spacc_aead(alg);
577         struct spacc_engine *engine = ctx->generic.engine;
578         u32 ctrl, proc_len, assoc_len;
579
580         req->result = -EINPROGRESS;
581         req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key,
582                 ctx->cipher_key_len, aead_req->iv, crypto_aead_ivsize(aead),
583                 ctx->hash_ctx, ctx->hash_key_len);
584
585         /* Set the source and destination DDT pointers. */
586         writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
587         writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
588         writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
589
590         assoc_len = aead_req->assoclen;
591         proc_len = aead_req->cryptlen + assoc_len;
592
593         /*
594          * If we are decrypting, we need to take the length of the ICV out of
595          * the processing length.
596          */
597         if (!req->is_encrypt)
598                 proc_len -= authsize;
599
600         writel(proc_len, engine->regs + SPA_PROC_LEN_REG_OFFSET);
601         writel(assoc_len, engine->regs + SPA_AAD_LEN_REG_OFFSET);
602         writel(authsize, engine->regs + SPA_ICV_LEN_REG_OFFSET);
603         writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
604         writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
605
606         ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
607                 (1 << SPA_CTRL_ICV_APPEND);
608         if (req->is_encrypt)
609                 ctrl |= (1 << SPA_CTRL_ENCRYPT_IDX) | (1 << SPA_CTRL_AAD_COPY);
610         else
611                 ctrl |= (1 << SPA_CTRL_KEY_EXP);
612
613         mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
614
615         writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
616
617         return -EINPROGRESS;
618 }
619
620 static int spacc_req_submit(struct spacc_req *req);
621
622 static void spacc_push(struct spacc_engine *engine)
623 {
624         struct spacc_req *req;
625
626         while (!list_empty(&engine->pending) &&
627                engine->in_flight + 1 <= engine->fifo_sz) {
628
629                 ++engine->in_flight;
630                 req = list_first_entry(&engine->pending, struct spacc_req,
631                                        list);
632                 list_move_tail(&req->list, &engine->in_progress);
633
634                 req->result = spacc_req_submit(req);
635         }
636 }
637
638 /*
639  * Setup an AEAD request for processing. This will configure the engine, load
640  * the context and then start the packet processing.
641  */
642 static int spacc_aead_setup(struct aead_request *req,
643                             unsigned alg_type, bool is_encrypt)
644 {
645         struct crypto_aead *aead = crypto_aead_reqtfm(req);
646         struct aead_alg *alg = crypto_aead_alg(aead);
647         struct spacc_engine *engine = to_spacc_aead(alg)->engine;
648         struct spacc_req *dev_req = aead_request_ctx(req);
649         int err;
650         unsigned long flags;
651
652         dev_req->req            = &req->base;
653         dev_req->is_encrypt     = is_encrypt;
654         dev_req->result         = -EBUSY;
655         dev_req->engine         = engine;
656         dev_req->complete       = spacc_aead_complete;
657
658         if (unlikely(spacc_aead_need_fallback(req) ||
659                      ((err = spacc_aead_make_ddts(req)) == -E2BIG)))
660                 return spacc_aead_do_fallback(req, alg_type, is_encrypt);
661
662         if (err)
663                 goto out;
664
665         err = -EINPROGRESS;
666         spin_lock_irqsave(&engine->hw_lock, flags);
667         if (unlikely(spacc_fifo_cmd_full(engine)) ||
668             engine->in_flight + 1 > engine->fifo_sz) {
669                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
670                         err = -EBUSY;
671                         spin_unlock_irqrestore(&engine->hw_lock, flags);
672                         goto out_free_ddts;
673                 }
674                 list_add_tail(&dev_req->list, &engine->pending);
675         } else {
676                 list_add_tail(&dev_req->list, &engine->pending);
677                 spacc_push(engine);
678         }
679         spin_unlock_irqrestore(&engine->hw_lock, flags);
680
681         goto out;
682
683 out_free_ddts:
684         spacc_aead_free_ddts(dev_req);
685 out:
686         return err;
687 }
688
689 static int spacc_aead_encrypt(struct aead_request *req)
690 {
691         struct crypto_aead *aead = crypto_aead_reqtfm(req);
692         struct spacc_aead *alg = to_spacc_aead(crypto_aead_alg(aead));
693
694         return spacc_aead_setup(req, alg->type, 1);
695 }
696
697 static int spacc_aead_decrypt(struct aead_request *req)
698 {
699         struct crypto_aead *aead = crypto_aead_reqtfm(req);
700         struct spacc_aead  *alg = to_spacc_aead(crypto_aead_alg(aead));
701
702         return spacc_aead_setup(req, alg->type, 0);
703 }
704
705 /*
706  * Initialise a new AEAD context. This is responsible for allocating the
707  * fallback cipher and initialising the context.
708  */
709 static int spacc_aead_cra_init(struct crypto_aead *tfm)
710 {
711         struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
712         struct aead_alg *alg = crypto_aead_alg(tfm);
713         struct spacc_aead *spacc_alg = to_spacc_aead(alg);
714         struct spacc_engine *engine = spacc_alg->engine;
715
716         ctx->generic.flags = spacc_alg->type;
717         ctx->generic.engine = engine;
718         ctx->sw_cipher = crypto_alloc_aead(alg->base.cra_name, 0,
719                                            CRYPTO_ALG_NEED_FALLBACK);
720         if (IS_ERR(ctx->sw_cipher))
721                 return PTR_ERR(ctx->sw_cipher);
722         ctx->generic.key_offs = spacc_alg->key_offs;
723         ctx->generic.iv_offs = spacc_alg->iv_offs;
724
725         crypto_aead_set_reqsize(
726                 tfm,
727                 max(sizeof(struct spacc_req),
728                     sizeof(struct aead_request) +
729                     crypto_aead_reqsize(ctx->sw_cipher)));
730
731         return 0;
732 }
733
734 /*
735  * Destructor for an AEAD context. This is called when the transform is freed
736  * and must free the fallback cipher.
737  */
738 static void spacc_aead_cra_exit(struct crypto_aead *tfm)
739 {
740         struct spacc_aead_ctx *ctx = crypto_aead_ctx(tfm);
741
742         crypto_free_aead(ctx->sw_cipher);
743 }
744
745 /*
746  * Set the DES key for a block cipher transform. This also performs weak key
747  * checking if the transform has requested it.
748  */
749 static int spacc_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
750                             unsigned int len)
751 {
752         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
753         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
754         u32 tmp[DES_EXPKEY_WORDS];
755
756         if (unlikely(!des_ekey(tmp, key)) &&
757             (crypto_ablkcipher_get_flags(cipher) &
758              CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
759                 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
760                 return -EINVAL;
761         }
762
763         memcpy(ctx->key, key, len);
764         ctx->key_len = len;
765
766         return 0;
767 }
768
769 /*
770  * Set the 3DES key for a block cipher transform. This also performs weak key
771  * checking if the transform has requested it.
772  */
773 static int spacc_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
774                              unsigned int len)
775 {
776         struct spacc_ablk_ctx *ctx = crypto_ablkcipher_ctx(cipher);
777         u32 flags;
778         int err;
779
780         flags = crypto_ablkcipher_get_flags(cipher);
781         err = __des3_verify_key(&flags, key);
782         if (unlikely(err)) {
783                 crypto_ablkcipher_set_flags(cipher, flags);
784                 return err;
785         }
786
787         memcpy(ctx->key, key, len);
788         ctx->key_len = len;
789
790         return 0;
791 }
792
793 /*
794  * Set the key for an AES block cipher. Some key lengths are not supported in
795  * hardware so this must also check whether a fallback is needed.
796  */
797 static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
798                             unsigned int len)
799 {
800         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
801         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
802         int err = 0;
803
804         if (len > AES_MAX_KEY_SIZE) {
805                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
806                 return -EINVAL;
807         }
808
809         /*
810          * IPSec engine only supports 128 and 256 bit AES keys. If we get a
811          * request for any other size (192 bits) then we need to do a software
812          * fallback.
813          */
814         if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256) {
815                 if (!ctx->sw_cipher)
816                         return -EINVAL;
817
818                 /*
819                  * Set the fallback transform to use the same request flags as
820                  * the hardware transform.
821                  */
822                 crypto_sync_skcipher_clear_flags(ctx->sw_cipher,
823                                             CRYPTO_TFM_REQ_MASK);
824                 crypto_sync_skcipher_set_flags(ctx->sw_cipher,
825                                           cipher->base.crt_flags &
826                                           CRYPTO_TFM_REQ_MASK);
827
828                 err = crypto_sync_skcipher_setkey(ctx->sw_cipher, key, len);
829
830                 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
831                 tfm->crt_flags |=
832                         crypto_sync_skcipher_get_flags(ctx->sw_cipher) &
833                         CRYPTO_TFM_RES_MASK;
834
835                 if (err)
836                         goto sw_setkey_failed;
837         }
838
839         memcpy(ctx->key, key, len);
840         ctx->key_len = len;
841
842 sw_setkey_failed:
843         return err;
844 }
845
846 static int spacc_kasumi_f8_setkey(struct crypto_ablkcipher *cipher,
847                                   const u8 *key, unsigned int len)
848 {
849         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
850         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
851         int err = 0;
852
853         if (len > AES_MAX_KEY_SIZE) {
854                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
855                 err = -EINVAL;
856                 goto out;
857         }
858
859         memcpy(ctx->key, key, len);
860         ctx->key_len = len;
861
862 out:
863         return err;
864 }
865
866 static int spacc_ablk_need_fallback(struct spacc_req *req)
867 {
868         struct spacc_ablk_ctx *ctx;
869         struct crypto_tfm *tfm = req->req->tfm;
870         struct crypto_alg *alg = req->req->tfm->__crt_alg;
871         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
872
873         ctx = crypto_tfm_ctx(tfm);
874
875         return (spacc_alg->ctrl_default & SPACC_CRYPTO_ALG_MASK) ==
876                         SPA_CTRL_CIPH_ALG_AES &&
877                         ctx->key_len != AES_KEYSIZE_128 &&
878                         ctx->key_len != AES_KEYSIZE_256;
879 }
880
881 static void spacc_ablk_complete(struct spacc_req *req)
882 {
883         struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
884
885         if (ablk_req->src != ablk_req->dst) {
886                 spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src,
887                                ablk_req->nbytes, DMA_TO_DEVICE);
888                 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
889                                ablk_req->nbytes, DMA_FROM_DEVICE);
890         } else
891                 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst,
892                                ablk_req->nbytes, DMA_BIDIRECTIONAL);
893
894         req->req->complete(req->req, req->result);
895 }
896
897 static int spacc_ablk_submit(struct spacc_req *req)
898 {
899         struct crypto_tfm *tfm = req->req->tfm;
900         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
901         struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req);
902         struct crypto_alg *alg = req->req->tfm->__crt_alg;
903         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
904         struct spacc_engine *engine = ctx->generic.engine;
905         u32 ctrl;
906
907         req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key,
908                 ctx->key_len, ablk_req->info, alg->cra_ablkcipher.ivsize,
909                 NULL, 0);
910
911         writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET);
912         writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET);
913         writel(0, engine->regs + SPA_OFFSET_REG_OFFSET);
914
915         writel(ablk_req->nbytes, engine->regs + SPA_PROC_LEN_REG_OFFSET);
916         writel(0, engine->regs + SPA_ICV_OFFSET_REG_OFFSET);
917         writel(0, engine->regs + SPA_AUX_INFO_REG_OFFSET);
918         writel(0, engine->regs + SPA_AAD_LEN_REG_OFFSET);
919
920         ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) |
921                 (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) :
922                  (1 << SPA_CTRL_KEY_EXP));
923
924         mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
925
926         writel(ctrl, engine->regs + SPA_CTRL_REG_OFFSET);
927
928         return -EINPROGRESS;
929 }
930
931 static int spacc_ablk_do_fallback(struct ablkcipher_request *req,
932                                   unsigned alg_type, bool is_encrypt)
933 {
934         struct crypto_tfm *old_tfm =
935             crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
936         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(old_tfm);
937         SYNC_SKCIPHER_REQUEST_ON_STACK(subreq, ctx->sw_cipher);
938         int err;
939
940         /*
941          * Change the request to use the software fallback transform, and once
942          * the ciphering has completed, put the old transform back into the
943          * request.
944          */
945         skcipher_request_set_sync_tfm(subreq, ctx->sw_cipher);
946         skcipher_request_set_callback(subreq, req->base.flags, NULL, NULL);
947         skcipher_request_set_crypt(subreq, req->src, req->dst,
948                                    req->nbytes, req->info);
949         err = is_encrypt ? crypto_skcipher_encrypt(subreq) :
950                            crypto_skcipher_decrypt(subreq);
951         skcipher_request_zero(subreq);
952
953         return err;
954 }
955
956 static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type,
957                             bool is_encrypt)
958 {
959         struct crypto_alg *alg = req->base.tfm->__crt_alg;
960         struct spacc_engine *engine = to_spacc_alg(alg)->engine;
961         struct spacc_req *dev_req = ablkcipher_request_ctx(req);
962         unsigned long flags;
963         int err = -ENOMEM;
964
965         dev_req->req            = &req->base;
966         dev_req->is_encrypt     = is_encrypt;
967         dev_req->engine         = engine;
968         dev_req->complete       = spacc_ablk_complete;
969         dev_req->result         = -EINPROGRESS;
970
971         if (unlikely(spacc_ablk_need_fallback(dev_req)))
972                 return spacc_ablk_do_fallback(req, alg_type, is_encrypt);
973
974         /*
975          * Create the DDT's for the engine. If we share the same source and
976          * destination then we can optimize by reusing the DDT's.
977          */
978         if (req->src != req->dst) {
979                 dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src,
980                         req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr);
981                 if (!dev_req->src_ddt)
982                         goto out;
983
984                 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
985                         req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr);
986                 if (!dev_req->dst_ddt)
987                         goto out_free_src;
988         } else {
989                 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst,
990                         req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr);
991                 if (!dev_req->dst_ddt)
992                         goto out;
993
994                 dev_req->src_ddt = NULL;
995                 dev_req->src_addr = dev_req->dst_addr;
996         }
997
998         err = -EINPROGRESS;
999         spin_lock_irqsave(&engine->hw_lock, flags);
1000         /*
1001          * Check if the engine will accept the operation now. If it won't then
1002          * we either stick it on the end of a pending list if we can backlog,
1003          * or bailout with an error if not.
1004          */
1005         if (unlikely(spacc_fifo_cmd_full(engine)) ||
1006             engine->in_flight + 1 > engine->fifo_sz) {
1007                 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1008                         err = -EBUSY;
1009                         spin_unlock_irqrestore(&engine->hw_lock, flags);
1010                         goto out_free_ddts;
1011                 }
1012                 list_add_tail(&dev_req->list, &engine->pending);
1013         } else {
1014                 list_add_tail(&dev_req->list, &engine->pending);
1015                 spacc_push(engine);
1016         }
1017         spin_unlock_irqrestore(&engine->hw_lock, flags);
1018
1019         goto out;
1020
1021 out_free_ddts:
1022         spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst,
1023                        req->nbytes, req->src == req->dst ?
1024                        DMA_BIDIRECTIONAL : DMA_FROM_DEVICE);
1025 out_free_src:
1026         if (req->src != req->dst)
1027                 spacc_free_ddt(dev_req, dev_req->src_ddt, dev_req->src_addr,
1028                                req->src, req->nbytes, DMA_TO_DEVICE);
1029 out:
1030         return err;
1031 }
1032
1033 static int spacc_ablk_cra_init(struct crypto_tfm *tfm)
1034 {
1035         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1036         struct crypto_alg *alg = tfm->__crt_alg;
1037         struct spacc_alg *spacc_alg = to_spacc_alg(alg);
1038         struct spacc_engine *engine = spacc_alg->engine;
1039
1040         ctx->generic.flags = spacc_alg->type;
1041         ctx->generic.engine = engine;
1042         if (alg->cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
1043                 ctx->sw_cipher = crypto_alloc_sync_skcipher(
1044                         alg->cra_name, 0, CRYPTO_ALG_NEED_FALLBACK);
1045                 if (IS_ERR(ctx->sw_cipher)) {
1046                         dev_warn(engine->dev, "failed to allocate fallback for %s\n",
1047                                  alg->cra_name);
1048                         return PTR_ERR(ctx->sw_cipher);
1049                 }
1050         }
1051         ctx->generic.key_offs = spacc_alg->key_offs;
1052         ctx->generic.iv_offs = spacc_alg->iv_offs;
1053
1054         tfm->crt_ablkcipher.reqsize = sizeof(struct spacc_req);
1055
1056         return 0;
1057 }
1058
1059 static void spacc_ablk_cra_exit(struct crypto_tfm *tfm)
1060 {
1061         struct spacc_ablk_ctx *ctx = crypto_tfm_ctx(tfm);
1062
1063         crypto_free_sync_skcipher(ctx->sw_cipher);
1064 }
1065
1066 static int spacc_ablk_encrypt(struct ablkcipher_request *req)
1067 {
1068         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1069         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1070         struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1071
1072         return spacc_ablk_setup(req, alg->type, 1);
1073 }
1074
1075 static int spacc_ablk_decrypt(struct ablkcipher_request *req)
1076 {
1077         struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
1078         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
1079         struct spacc_alg *alg = to_spacc_alg(tfm->__crt_alg);
1080
1081         return spacc_ablk_setup(req, alg->type, 0);
1082 }
1083
1084 static inline int spacc_fifo_stat_empty(struct spacc_engine *engine)
1085 {
1086         return readl(engine->regs + SPA_FIFO_STAT_REG_OFFSET) &
1087                 SPA_FIFO_STAT_EMPTY;
1088 }
1089
1090 static void spacc_process_done(struct spacc_engine *engine)
1091 {
1092         struct spacc_req *req;
1093         unsigned long flags;
1094
1095         spin_lock_irqsave(&engine->hw_lock, flags);
1096
1097         while (!spacc_fifo_stat_empty(engine)) {
1098                 req = list_first_entry(&engine->in_progress, struct spacc_req,
1099                                        list);
1100                 list_move_tail(&req->list, &engine->completed);
1101                 --engine->in_flight;
1102
1103                 /* POP the status register. */
1104                 writel(~0, engine->regs + SPA_STAT_POP_REG_OFFSET);
1105                 req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) &
1106                      SPA_STATUS_RES_CODE_MASK) >> SPA_STATUS_RES_CODE_OFFSET;
1107
1108                 /*
1109                  * Convert the SPAcc error status into the standard POSIX error
1110                  * codes.
1111                  */
1112                 if (unlikely(req->result)) {
1113                         switch (req->result) {
1114                         case SPA_STATUS_ICV_FAIL:
1115                                 req->result = -EBADMSG;
1116                                 break;
1117
1118                         case SPA_STATUS_MEMORY_ERROR:
1119                                 dev_warn(engine->dev,
1120                                          "memory error triggered\n");
1121                                 req->result = -EFAULT;
1122                                 break;
1123
1124                         case SPA_STATUS_BLOCK_ERROR:
1125                                 dev_warn(engine->dev,
1126                                          "block error triggered\n");
1127                                 req->result = -EIO;
1128                                 break;
1129                         }
1130                 }
1131         }
1132
1133         tasklet_schedule(&engine->complete);
1134
1135         spin_unlock_irqrestore(&engine->hw_lock, flags);
1136 }
1137
1138 static irqreturn_t spacc_spacc_irq(int irq, void *dev)
1139 {
1140         struct spacc_engine *engine = (struct spacc_engine *)dev;
1141         u32 spacc_irq_stat = readl(engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1142
1143         writel(spacc_irq_stat, engine->regs + SPA_IRQ_STAT_REG_OFFSET);
1144         spacc_process_done(engine);
1145
1146         return IRQ_HANDLED;
1147 }
1148
1149 static void spacc_packet_timeout(struct timer_list *t)
1150 {
1151         struct spacc_engine *engine = from_timer(engine, t, packet_timeout);
1152
1153         spacc_process_done(engine);
1154 }
1155
1156 static int spacc_req_submit(struct spacc_req *req)
1157 {
1158         struct crypto_alg *alg = req->req->tfm->__crt_alg;
1159
1160         if (CRYPTO_ALG_TYPE_AEAD == (CRYPTO_ALG_TYPE_MASK & alg->cra_flags))
1161                 return spacc_aead_submit(req);
1162         else
1163                 return spacc_ablk_submit(req);
1164 }
1165
1166 static void spacc_spacc_complete(unsigned long data)
1167 {
1168         struct spacc_engine *engine = (struct spacc_engine *)data;
1169         struct spacc_req *req, *tmp;
1170         unsigned long flags;
1171         LIST_HEAD(completed);
1172
1173         spin_lock_irqsave(&engine->hw_lock, flags);
1174
1175         list_splice_init(&engine->completed, &completed);
1176         spacc_push(engine);
1177         if (engine->in_flight)
1178                 mod_timer(&engine->packet_timeout, jiffies + PACKET_TIMEOUT);
1179
1180         spin_unlock_irqrestore(&engine->hw_lock, flags);
1181
1182         list_for_each_entry_safe(req, tmp, &completed, list) {
1183                 list_del(&req->list);
1184                 req->complete(req);
1185         }
1186 }
1187
1188 #ifdef CONFIG_PM
1189 static int spacc_suspend(struct device *dev)
1190 {
1191         struct spacc_engine *engine = dev_get_drvdata(dev);
1192
1193         /*
1194          * We only support standby mode. All we have to do is gate the clock to
1195          * the spacc. The hardware will preserve state until we turn it back
1196          * on again.
1197          */
1198         clk_disable(engine->clk);
1199
1200         return 0;
1201 }
1202
1203 static int spacc_resume(struct device *dev)
1204 {
1205         struct spacc_engine *engine = dev_get_drvdata(dev);
1206
1207         return clk_enable(engine->clk);
1208 }
1209
1210 static const struct dev_pm_ops spacc_pm_ops = {
1211         .suspend        = spacc_suspend,
1212         .resume         = spacc_resume,
1213 };
1214 #endif /* CONFIG_PM */
1215
1216 static inline struct spacc_engine *spacc_dev_to_engine(struct device *dev)
1217 {
1218         return dev ? dev_get_drvdata(dev) : NULL;
1219 }
1220
1221 static ssize_t spacc_stat_irq_thresh_show(struct device *dev,
1222                                           struct device_attribute *attr,
1223                                           char *buf)
1224 {
1225         struct spacc_engine *engine = spacc_dev_to_engine(dev);
1226
1227         return snprintf(buf, PAGE_SIZE, "%u\n", engine->stat_irq_thresh);
1228 }
1229
1230 static ssize_t spacc_stat_irq_thresh_store(struct device *dev,
1231                                            struct device_attribute *attr,
1232                                            const char *buf, size_t len)
1233 {
1234         struct spacc_engine *engine = spacc_dev_to_engine(dev);
1235         unsigned long thresh;
1236
1237         if (kstrtoul(buf, 0, &thresh))
1238                 return -EINVAL;
1239
1240         thresh = clamp(thresh, 1UL, engine->fifo_sz - 1);
1241
1242         engine->stat_irq_thresh = thresh;
1243         writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1244                engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1245
1246         return len;
1247 }
1248 static DEVICE_ATTR(stat_irq_thresh, 0644, spacc_stat_irq_thresh_show,
1249                    spacc_stat_irq_thresh_store);
1250
1251 static struct spacc_alg ipsec_engine_algs[] = {
1252         {
1253                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_CBC,
1254                 .key_offs = 0,
1255                 .iv_offs = AES_MAX_KEY_SIZE,
1256                 .alg = {
1257                         .cra_name = "cbc(aes)",
1258                         .cra_driver_name = "cbc-aes-picoxcell",
1259                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1260                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1261                                      CRYPTO_ALG_KERN_DRIVER_ONLY |
1262                                      CRYPTO_ALG_ASYNC |
1263                                      CRYPTO_ALG_NEED_FALLBACK,
1264                         .cra_blocksize = AES_BLOCK_SIZE,
1265                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1266                         .cra_type = &crypto_ablkcipher_type,
1267                         .cra_module = THIS_MODULE,
1268                         .cra_ablkcipher = {
1269                                 .setkey = spacc_aes_setkey,
1270                                 .encrypt = spacc_ablk_encrypt,
1271                                 .decrypt = spacc_ablk_decrypt,
1272                                 .min_keysize = AES_MIN_KEY_SIZE,
1273                                 .max_keysize = AES_MAX_KEY_SIZE,
1274                                 .ivsize = AES_BLOCK_SIZE,
1275                         },
1276                         .cra_init = spacc_ablk_cra_init,
1277                         .cra_exit = spacc_ablk_cra_exit,
1278                 },
1279         },
1280         {
1281                 .key_offs = 0,
1282                 .iv_offs = AES_MAX_KEY_SIZE,
1283                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES | SPA_CTRL_CIPH_MODE_ECB,
1284                 .alg = {
1285                         .cra_name = "ecb(aes)",
1286                         .cra_driver_name = "ecb-aes-picoxcell",
1287                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1288                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1289                                 CRYPTO_ALG_KERN_DRIVER_ONLY |
1290                                 CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK,
1291                         .cra_blocksize = AES_BLOCK_SIZE,
1292                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1293                         .cra_type = &crypto_ablkcipher_type,
1294                         .cra_module = THIS_MODULE,
1295                         .cra_ablkcipher = {
1296                                 .setkey = spacc_aes_setkey,
1297                                 .encrypt = spacc_ablk_encrypt,
1298                                 .decrypt = spacc_ablk_decrypt,
1299                                 .min_keysize = AES_MIN_KEY_SIZE,
1300                                 .max_keysize = AES_MAX_KEY_SIZE,
1301                         },
1302                         .cra_init = spacc_ablk_cra_init,
1303                         .cra_exit = spacc_ablk_cra_exit,
1304                 },
1305         },
1306         {
1307                 .key_offs = DES_BLOCK_SIZE,
1308                 .iv_offs = 0,
1309                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1310                 .alg = {
1311                         .cra_name = "cbc(des)",
1312                         .cra_driver_name = "cbc-des-picoxcell",
1313                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1314                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1315                                         CRYPTO_ALG_ASYNC |
1316                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1317                         .cra_blocksize = DES_BLOCK_SIZE,
1318                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1319                         .cra_type = &crypto_ablkcipher_type,
1320                         .cra_module = THIS_MODULE,
1321                         .cra_ablkcipher = {
1322                                 .setkey = spacc_des_setkey,
1323                                 .encrypt = spacc_ablk_encrypt,
1324                                 .decrypt = spacc_ablk_decrypt,
1325                                 .min_keysize = DES_KEY_SIZE,
1326                                 .max_keysize = DES_KEY_SIZE,
1327                                 .ivsize = DES_BLOCK_SIZE,
1328                         },
1329                         .cra_init = spacc_ablk_cra_init,
1330                         .cra_exit = spacc_ablk_cra_exit,
1331                 },
1332         },
1333         {
1334                 .key_offs = DES_BLOCK_SIZE,
1335                 .iv_offs = 0,
1336                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1337                 .alg = {
1338                         .cra_name = "ecb(des)",
1339                         .cra_driver_name = "ecb-des-picoxcell",
1340                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1341                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1342                                         CRYPTO_ALG_ASYNC |
1343                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1344                         .cra_blocksize = DES_BLOCK_SIZE,
1345                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1346                         .cra_type = &crypto_ablkcipher_type,
1347                         .cra_module = THIS_MODULE,
1348                         .cra_ablkcipher = {
1349                                 .setkey = spacc_des_setkey,
1350                                 .encrypt = spacc_ablk_encrypt,
1351                                 .decrypt = spacc_ablk_decrypt,
1352                                 .min_keysize = DES_KEY_SIZE,
1353                                 .max_keysize = DES_KEY_SIZE,
1354                         },
1355                         .cra_init = spacc_ablk_cra_init,
1356                         .cra_exit = spacc_ablk_cra_exit,
1357                 },
1358         },
1359         {
1360                 .key_offs = DES_BLOCK_SIZE,
1361                 .iv_offs = 0,
1362                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_CBC,
1363                 .alg = {
1364                         .cra_name = "cbc(des3_ede)",
1365                         .cra_driver_name = "cbc-des3-ede-picoxcell",
1366                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1367                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1368                                         CRYPTO_ALG_ASYNC |
1369                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1370                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1371                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1372                         .cra_type = &crypto_ablkcipher_type,
1373                         .cra_module = THIS_MODULE,
1374                         .cra_ablkcipher = {
1375                                 .setkey = spacc_des3_setkey,
1376                                 .encrypt = spacc_ablk_encrypt,
1377                                 .decrypt = spacc_ablk_decrypt,
1378                                 .min_keysize = DES3_EDE_KEY_SIZE,
1379                                 .max_keysize = DES3_EDE_KEY_SIZE,
1380                                 .ivsize = DES3_EDE_BLOCK_SIZE,
1381                         },
1382                         .cra_init = spacc_ablk_cra_init,
1383                         .cra_exit = spacc_ablk_cra_exit,
1384                 },
1385         },
1386         {
1387                 .key_offs = DES_BLOCK_SIZE,
1388                 .iv_offs = 0,
1389                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES | SPA_CTRL_CIPH_MODE_ECB,
1390                 .alg = {
1391                         .cra_name = "ecb(des3_ede)",
1392                         .cra_driver_name = "ecb-des3-ede-picoxcell",
1393                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1394                         .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1395                                         CRYPTO_ALG_ASYNC |
1396                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1397                         .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1398                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1399                         .cra_type = &crypto_ablkcipher_type,
1400                         .cra_module = THIS_MODULE,
1401                         .cra_ablkcipher = {
1402                                 .setkey = spacc_des3_setkey,
1403                                 .encrypt = spacc_ablk_encrypt,
1404                                 .decrypt = spacc_ablk_decrypt,
1405                                 .min_keysize = DES3_EDE_KEY_SIZE,
1406                                 .max_keysize = DES3_EDE_KEY_SIZE,
1407                         },
1408                         .cra_init = spacc_ablk_cra_init,
1409                         .cra_exit = spacc_ablk_cra_exit,
1410                 },
1411         },
1412 };
1413
1414 static struct spacc_aead ipsec_engine_aeads[] = {
1415         {
1416                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1417                                 SPA_CTRL_CIPH_MODE_CBC |
1418                                 SPA_CTRL_HASH_ALG_SHA |
1419                                 SPA_CTRL_HASH_MODE_HMAC,
1420                 .key_offs = 0,
1421                 .iv_offs = AES_MAX_KEY_SIZE,
1422                 .alg = {
1423                         .base = {
1424                                 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1425                                 .cra_driver_name = "authenc-hmac-sha1-"
1426                                                    "cbc-aes-picoxcell",
1427                                 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1428                                 .cra_flags = CRYPTO_ALG_ASYNC |
1429                                              CRYPTO_ALG_NEED_FALLBACK |
1430                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1431                                 .cra_blocksize = AES_BLOCK_SIZE,
1432                                 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1433                                 .cra_module = THIS_MODULE,
1434                         },
1435                         .setkey = spacc_aead_setkey,
1436                         .setauthsize = spacc_aead_setauthsize,
1437                         .encrypt = spacc_aead_encrypt,
1438                         .decrypt = spacc_aead_decrypt,
1439                         .ivsize = AES_BLOCK_SIZE,
1440                         .maxauthsize = SHA1_DIGEST_SIZE,
1441                         .init = spacc_aead_cra_init,
1442                         .exit = spacc_aead_cra_exit,
1443                 },
1444         },
1445         {
1446                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1447                                 SPA_CTRL_CIPH_MODE_CBC |
1448                                 SPA_CTRL_HASH_ALG_SHA256 |
1449                                 SPA_CTRL_HASH_MODE_HMAC,
1450                 .key_offs = 0,
1451                 .iv_offs = AES_MAX_KEY_SIZE,
1452                 .alg = {
1453                         .base = {
1454                                 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1455                                 .cra_driver_name = "authenc-hmac-sha256-"
1456                                                    "cbc-aes-picoxcell",
1457                                 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1458                                 .cra_flags = CRYPTO_ALG_ASYNC |
1459                                              CRYPTO_ALG_NEED_FALLBACK |
1460                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1461                                 .cra_blocksize = AES_BLOCK_SIZE,
1462                                 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1463                                 .cra_module = THIS_MODULE,
1464                         },
1465                         .setkey = spacc_aead_setkey,
1466                         .setauthsize = spacc_aead_setauthsize,
1467                         .encrypt = spacc_aead_encrypt,
1468                         .decrypt = spacc_aead_decrypt,
1469                         .ivsize = AES_BLOCK_SIZE,
1470                         .maxauthsize = SHA256_DIGEST_SIZE,
1471                         .init = spacc_aead_cra_init,
1472                         .exit = spacc_aead_cra_exit,
1473                 },
1474         },
1475         {
1476                 .key_offs = 0,
1477                 .iv_offs = AES_MAX_KEY_SIZE,
1478                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1479                                 SPA_CTRL_CIPH_MODE_CBC |
1480                                 SPA_CTRL_HASH_ALG_MD5 |
1481                                 SPA_CTRL_HASH_MODE_HMAC,
1482                 .alg = {
1483                         .base = {
1484                                 .cra_name = "authenc(hmac(md5),cbc(aes))",
1485                                 .cra_driver_name = "authenc-hmac-md5-"
1486                                                    "cbc-aes-picoxcell",
1487                                 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1488                                 .cra_flags = CRYPTO_ALG_ASYNC |
1489                                              CRYPTO_ALG_NEED_FALLBACK |
1490                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1491                                 .cra_blocksize = AES_BLOCK_SIZE,
1492                                 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1493                                 .cra_module = THIS_MODULE,
1494                         },
1495                         .setkey = spacc_aead_setkey,
1496                         .setauthsize = spacc_aead_setauthsize,
1497                         .encrypt = spacc_aead_encrypt,
1498                         .decrypt = spacc_aead_decrypt,
1499                         .ivsize = AES_BLOCK_SIZE,
1500                         .maxauthsize = MD5_DIGEST_SIZE,
1501                         .init = spacc_aead_cra_init,
1502                         .exit = spacc_aead_cra_exit,
1503                 },
1504         },
1505         {
1506                 .key_offs = DES_BLOCK_SIZE,
1507                 .iv_offs = 0,
1508                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
1509                                 SPA_CTRL_CIPH_MODE_CBC |
1510                                 SPA_CTRL_HASH_ALG_SHA |
1511                                 SPA_CTRL_HASH_MODE_HMAC,
1512                 .alg = {
1513                         .base = {
1514                                 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1515                                 .cra_driver_name = "authenc-hmac-sha1-"
1516                                                    "cbc-3des-picoxcell",
1517                                 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1518                                 .cra_flags = CRYPTO_ALG_ASYNC |
1519                                              CRYPTO_ALG_NEED_FALLBACK |
1520                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1521                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1522                                 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1523                                 .cra_module = THIS_MODULE,
1524                         },
1525                         .setkey = spacc_aead_setkey,
1526                         .setauthsize = spacc_aead_setauthsize,
1527                         .encrypt = spacc_aead_encrypt,
1528                         .decrypt = spacc_aead_decrypt,
1529                         .ivsize = DES3_EDE_BLOCK_SIZE,
1530                         .maxauthsize = SHA1_DIGEST_SIZE,
1531                         .init = spacc_aead_cra_init,
1532                         .exit = spacc_aead_cra_exit,
1533                 },
1534         },
1535         {
1536                 .key_offs = DES_BLOCK_SIZE,
1537                 .iv_offs = 0,
1538                 .ctrl_default = SPA_CTRL_CIPH_ALG_AES |
1539                                 SPA_CTRL_CIPH_MODE_CBC |
1540                                 SPA_CTRL_HASH_ALG_SHA256 |
1541                                 SPA_CTRL_HASH_MODE_HMAC,
1542                 .alg = {
1543                         .base = {
1544                                 .cra_name = "authenc(hmac(sha256),"
1545                                             "cbc(des3_ede))",
1546                                 .cra_driver_name = "authenc-hmac-sha256-"
1547                                                    "cbc-3des-picoxcell",
1548                                 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1549                                 .cra_flags = CRYPTO_ALG_ASYNC |
1550                                              CRYPTO_ALG_NEED_FALLBACK |
1551                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1552                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1553                                 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1554                                 .cra_module = THIS_MODULE,
1555                         },
1556                         .setkey = spacc_aead_setkey,
1557                         .setauthsize = spacc_aead_setauthsize,
1558                         .encrypt = spacc_aead_encrypt,
1559                         .decrypt = spacc_aead_decrypt,
1560                         .ivsize = DES3_EDE_BLOCK_SIZE,
1561                         .maxauthsize = SHA256_DIGEST_SIZE,
1562                         .init = spacc_aead_cra_init,
1563                         .exit = spacc_aead_cra_exit,
1564                 },
1565         },
1566         {
1567                 .key_offs = DES_BLOCK_SIZE,
1568                 .iv_offs = 0,
1569                 .ctrl_default = SPA_CTRL_CIPH_ALG_DES |
1570                                 SPA_CTRL_CIPH_MODE_CBC |
1571                                 SPA_CTRL_HASH_ALG_MD5 |
1572                                 SPA_CTRL_HASH_MODE_HMAC,
1573                 .alg = {
1574                         .base = {
1575                                 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1576                                 .cra_driver_name = "authenc-hmac-md5-"
1577                                                    "cbc-3des-picoxcell",
1578                                 .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1579                                 .cra_flags = CRYPTO_ALG_ASYNC |
1580                                              CRYPTO_ALG_NEED_FALLBACK |
1581                                              CRYPTO_ALG_KERN_DRIVER_ONLY,
1582                                 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1583                                 .cra_ctxsize = sizeof(struct spacc_aead_ctx),
1584                                 .cra_module = THIS_MODULE,
1585                         },
1586                         .setkey = spacc_aead_setkey,
1587                         .setauthsize = spacc_aead_setauthsize,
1588                         .encrypt = spacc_aead_encrypt,
1589                         .decrypt = spacc_aead_decrypt,
1590                         .ivsize = DES3_EDE_BLOCK_SIZE,
1591                         .maxauthsize = MD5_DIGEST_SIZE,
1592                         .init = spacc_aead_cra_init,
1593                         .exit = spacc_aead_cra_exit,
1594                 },
1595         },
1596 };
1597
1598 static struct spacc_alg l2_engine_algs[] = {
1599         {
1600                 .key_offs = 0,
1601                 .iv_offs = SPACC_CRYPTO_KASUMI_F8_KEY_LEN,
1602                 .ctrl_default = SPA_CTRL_CIPH_ALG_KASUMI |
1603                                 SPA_CTRL_CIPH_MODE_F8,
1604                 .alg = {
1605                         .cra_name = "f8(kasumi)",
1606                         .cra_driver_name = "f8-kasumi-picoxcell",
1607                         .cra_priority = SPACC_CRYPTO_ALG_PRIORITY,
1608                         .cra_flags = CRYPTO_ALG_ASYNC |
1609                                         CRYPTO_ALG_KERN_DRIVER_ONLY,
1610                         .cra_blocksize = 8,
1611                         .cra_ctxsize = sizeof(struct spacc_ablk_ctx),
1612                         .cra_type = &crypto_ablkcipher_type,
1613                         .cra_module = THIS_MODULE,
1614                         .cra_ablkcipher = {
1615                                 .setkey = spacc_kasumi_f8_setkey,
1616                                 .encrypt = spacc_ablk_encrypt,
1617                                 .decrypt = spacc_ablk_decrypt,
1618                                 .min_keysize = 16,
1619                                 .max_keysize = 16,
1620                                 .ivsize = 8,
1621                         },
1622                         .cra_init = spacc_ablk_cra_init,
1623                         .cra_exit = spacc_ablk_cra_exit,
1624                 },
1625         },
1626 };
1627
1628 #ifdef CONFIG_OF
1629 static const struct of_device_id spacc_of_id_table[] = {
1630         { .compatible = "picochip,spacc-ipsec" },
1631         { .compatible = "picochip,spacc-l2" },
1632         {}
1633 };
1634 MODULE_DEVICE_TABLE(of, spacc_of_id_table);
1635 #endif /* CONFIG_OF */
1636
1637 static int spacc_probe(struct platform_device *pdev)
1638 {
1639         int i, err, ret;
1640         struct resource *mem, *irq;
1641         struct device_node *np = pdev->dev.of_node;
1642         struct spacc_engine *engine = devm_kzalloc(&pdev->dev, sizeof(*engine),
1643                                                    GFP_KERNEL);
1644         if (!engine)
1645                 return -ENOMEM;
1646
1647         if (of_device_is_compatible(np, "picochip,spacc-ipsec")) {
1648                 engine->max_ctxs        = SPACC_CRYPTO_IPSEC_MAX_CTXS;
1649                 engine->cipher_pg_sz    = SPACC_CRYPTO_IPSEC_CIPHER_PG_SZ;
1650                 engine->hash_pg_sz      = SPACC_CRYPTO_IPSEC_HASH_PG_SZ;
1651                 engine->fifo_sz         = SPACC_CRYPTO_IPSEC_FIFO_SZ;
1652                 engine->algs            = ipsec_engine_algs;
1653                 engine->num_algs        = ARRAY_SIZE(ipsec_engine_algs);
1654                 engine->aeads           = ipsec_engine_aeads;
1655                 engine->num_aeads       = ARRAY_SIZE(ipsec_engine_aeads);
1656         } else if (of_device_is_compatible(np, "picochip,spacc-l2")) {
1657                 engine->max_ctxs        = SPACC_CRYPTO_L2_MAX_CTXS;
1658                 engine->cipher_pg_sz    = SPACC_CRYPTO_L2_CIPHER_PG_SZ;
1659                 engine->hash_pg_sz      = SPACC_CRYPTO_L2_HASH_PG_SZ;
1660                 engine->fifo_sz         = SPACC_CRYPTO_L2_FIFO_SZ;
1661                 engine->algs            = l2_engine_algs;
1662                 engine->num_algs        = ARRAY_SIZE(l2_engine_algs);
1663         } else {
1664                 return -EINVAL;
1665         }
1666
1667         engine->name = dev_name(&pdev->dev);
1668
1669         mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1670         engine->regs = devm_ioremap_resource(&pdev->dev, mem);
1671         if (IS_ERR(engine->regs))
1672                 return PTR_ERR(engine->regs);
1673
1674         irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1675         if (!irq) {
1676                 dev_err(&pdev->dev, "no memory/irq resource for engine\n");
1677                 return -ENXIO;
1678         }
1679
1680         if (devm_request_irq(&pdev->dev, irq->start, spacc_spacc_irq, 0,
1681                              engine->name, engine)) {
1682                 dev_err(engine->dev, "failed to request IRQ\n");
1683                 return -EBUSY;
1684         }
1685
1686         engine->dev             = &pdev->dev;
1687         engine->cipher_ctx_base = engine->regs + SPA_CIPH_KEY_BASE_REG_OFFSET;
1688         engine->hash_key_base   = engine->regs + SPA_HASH_KEY_BASE_REG_OFFSET;
1689
1690         engine->req_pool = dmam_pool_create(engine->name, engine->dev,
1691                 MAX_DDT_LEN * sizeof(struct spacc_ddt), 8, SZ_64K);
1692         if (!engine->req_pool)
1693                 return -ENOMEM;
1694
1695         spin_lock_init(&engine->hw_lock);
1696
1697         engine->clk = clk_get(&pdev->dev, "ref");
1698         if (IS_ERR(engine->clk)) {
1699                 dev_info(&pdev->dev, "clk unavailable\n");
1700                 return PTR_ERR(engine->clk);
1701         }
1702
1703         if (clk_prepare_enable(engine->clk)) {
1704                 dev_info(&pdev->dev, "unable to prepare/enable clk\n");
1705                 ret = -EIO;
1706                 goto err_clk_put;
1707         }
1708
1709         ret = device_create_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1710         if (ret)
1711                 goto err_clk_disable;
1712
1713
1714         /*
1715          * Use an IRQ threshold of 50% as a default. This seems to be a
1716          * reasonable trade off of latency against throughput but can be
1717          * changed at runtime.
1718          */
1719         engine->stat_irq_thresh = (engine->fifo_sz / 2);
1720
1721         /*
1722          * Configure the interrupts. We only use the STAT_CNT interrupt as we
1723          * only submit a new packet for processing when we complete another in
1724          * the queue. This minimizes time spent in the interrupt handler.
1725          */
1726         writel(engine->stat_irq_thresh << SPA_IRQ_CTRL_STAT_CNT_OFFSET,
1727                engine->regs + SPA_IRQ_CTRL_REG_OFFSET);
1728         writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN,
1729                engine->regs + SPA_IRQ_EN_REG_OFFSET);
1730
1731         timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0);
1732
1733         INIT_LIST_HEAD(&engine->pending);
1734         INIT_LIST_HEAD(&engine->completed);
1735         INIT_LIST_HEAD(&engine->in_progress);
1736         engine->in_flight = 0;
1737         tasklet_init(&engine->complete, spacc_spacc_complete,
1738                      (unsigned long)engine);
1739
1740         platform_set_drvdata(pdev, engine);
1741
1742         ret = -EINVAL;
1743         INIT_LIST_HEAD(&engine->registered_algs);
1744         for (i = 0; i < engine->num_algs; ++i) {
1745                 engine->algs[i].engine = engine;
1746                 err = crypto_register_alg(&engine->algs[i].alg);
1747                 if (!err) {
1748                         list_add_tail(&engine->algs[i].entry,
1749                                       &engine->registered_algs);
1750                         ret = 0;
1751                 }
1752                 if (err)
1753                         dev_err(engine->dev, "failed to register alg \"%s\"\n",
1754                                 engine->algs[i].alg.cra_name);
1755                 else
1756                         dev_dbg(engine->dev, "registered alg \"%s\"\n",
1757                                 engine->algs[i].alg.cra_name);
1758         }
1759
1760         INIT_LIST_HEAD(&engine->registered_aeads);
1761         for (i = 0; i < engine->num_aeads; ++i) {
1762                 engine->aeads[i].engine = engine;
1763                 err = crypto_register_aead(&engine->aeads[i].alg);
1764                 if (!err) {
1765                         list_add_tail(&engine->aeads[i].entry,
1766                                       &engine->registered_aeads);
1767                         ret = 0;
1768                 }
1769                 if (err)
1770                         dev_err(engine->dev, "failed to register alg \"%s\"\n",
1771                                 engine->aeads[i].alg.base.cra_name);
1772                 else
1773                         dev_dbg(engine->dev, "registered alg \"%s\"\n",
1774                                 engine->aeads[i].alg.base.cra_name);
1775         }
1776
1777         if (!ret)
1778                 return 0;
1779
1780         del_timer_sync(&engine->packet_timeout);
1781         device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1782 err_clk_disable:
1783         clk_disable_unprepare(engine->clk);
1784 err_clk_put:
1785         clk_put(engine->clk);
1786
1787         return ret;
1788 }
1789
1790 static int spacc_remove(struct platform_device *pdev)
1791 {
1792         struct spacc_aead *aead, *an;
1793         struct spacc_alg *alg, *next;
1794         struct spacc_engine *engine = platform_get_drvdata(pdev);
1795
1796         del_timer_sync(&engine->packet_timeout);
1797         device_remove_file(&pdev->dev, &dev_attr_stat_irq_thresh);
1798
1799         list_for_each_entry_safe(aead, an, &engine->registered_aeads, entry) {
1800                 list_del(&aead->entry);
1801                 crypto_unregister_aead(&aead->alg);
1802         }
1803
1804         list_for_each_entry_safe(alg, next, &engine->registered_algs, entry) {
1805                 list_del(&alg->entry);
1806                 crypto_unregister_alg(&alg->alg);
1807         }
1808
1809         clk_disable_unprepare(engine->clk);
1810         clk_put(engine->clk);
1811
1812         return 0;
1813 }
1814
1815 static struct platform_driver spacc_driver = {
1816         .probe          = spacc_probe,
1817         .remove         = spacc_remove,
1818         .driver         = {
1819                 .name   = "picochip,spacc",
1820 #ifdef CONFIG_PM
1821                 .pm     = &spacc_pm_ops,
1822 #endif /* CONFIG_PM */
1823                 .of_match_table = of_match_ptr(spacc_of_id_table),
1824         },
1825 };
1826
1827 module_platform_driver(spacc_driver);
1828
1829 MODULE_LICENSE("GPL");
1830 MODULE_AUTHOR("Jamie Iles");
This page took 0.145659 seconds and 4 git commands to generate.