]> Git Repo - linux.git/blob - drivers/crypto/n2_core.c
Merge tag 'lkdtm-next' of https://git.kernel.org/pub/scm/linux/kernel/git/kees/linux...
[linux.git] / drivers / crypto / n2_core.c
1 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
2  *
3  * Copyright (C) 2010, 2011 David S. Miller <[email protected]>
4  */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/cpumask.h>
13 #include <linux/slab.h>
14 #include <linux/interrupt.h>
15 #include <linux/crypto.h>
16 #include <crypto/md5.h>
17 #include <crypto/sha.h>
18 #include <crypto/aes.h>
19 #include <crypto/des.h>
20 #include <linux/mutex.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>
23
24 #include <crypto/internal/hash.h>
25 #include <crypto/scatterwalk.h>
26 #include <crypto/algapi.h>
27
28 #include <asm/hypervisor.h>
29 #include <asm/mdesc.h>
30
31 #include "n2_core.h"
32
33 #define DRV_MODULE_NAME         "n2_crypto"
34 #define DRV_MODULE_VERSION      "0.2"
35 #define DRV_MODULE_RELDATE      "July 28, 2011"
36
37 static const char version[] =
38         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
39
40 MODULE_AUTHOR("David S. Miller ([email protected])");
41 MODULE_DESCRIPTION("Niagara2 Crypto driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(DRV_MODULE_VERSION);
44
45 #define N2_CRA_PRIORITY         200
46
47 static DEFINE_MUTEX(spu_lock);
48
49 struct spu_queue {
50         cpumask_t               sharing;
51         unsigned long           qhandle;
52
53         spinlock_t              lock;
54         u8                      q_type;
55         void                    *q;
56         unsigned long           head;
57         unsigned long           tail;
58         struct list_head        jobs;
59
60         unsigned long           devino;
61
62         char                    irq_name[32];
63         unsigned int            irq;
64
65         struct list_head        list;
66 };
67
68 struct spu_qreg {
69         struct spu_queue        *queue;
70         unsigned long           type;
71 };
72
73 static struct spu_queue **cpu_to_cwq;
74 static struct spu_queue **cpu_to_mau;
75
76 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
77 {
78         if (q->q_type == HV_NCS_QTYPE_MAU) {
79                 off += MAU_ENTRY_SIZE;
80                 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
81                         off = 0;
82         } else {
83                 off += CWQ_ENTRY_SIZE;
84                 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
85                         off = 0;
86         }
87         return off;
88 }
89
90 struct n2_request_common {
91         struct list_head        entry;
92         unsigned int            offset;
93 };
94 #define OFFSET_NOT_RUNNING      (~(unsigned int)0)
95
96 /* An async job request records the final tail value it used in
97  * n2_request_common->offset, test to see if that offset is in
98  * the range old_head, new_head, inclusive.
99  */
100 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
101                                 unsigned long old_head, unsigned long new_head)
102 {
103         if (old_head <= new_head) {
104                 if (offset > old_head && offset <= new_head)
105                         return true;
106         } else {
107                 if (offset > old_head || offset <= new_head)
108                         return true;
109         }
110         return false;
111 }
112
113 /* When the HEAD marker is unequal to the actual HEAD, we get
114  * a virtual device INO interrupt.  We should process the
115  * completed CWQ entries and adjust the HEAD marker to clear
116  * the IRQ.
117  */
118 static irqreturn_t cwq_intr(int irq, void *dev_id)
119 {
120         unsigned long off, new_head, hv_ret;
121         struct spu_queue *q = dev_id;
122
123         pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
124                smp_processor_id(), q->qhandle);
125
126         spin_lock(&q->lock);
127
128         hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
129
130         pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
131                smp_processor_id(), new_head, hv_ret);
132
133         for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
134                 /* XXX ... XXX */
135         }
136
137         hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
138         if (hv_ret == HV_EOK)
139                 q->head = new_head;
140
141         spin_unlock(&q->lock);
142
143         return IRQ_HANDLED;
144 }
145
146 static irqreturn_t mau_intr(int irq, void *dev_id)
147 {
148         struct spu_queue *q = dev_id;
149         unsigned long head, hv_ret;
150
151         spin_lock(&q->lock);
152
153         pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
154                smp_processor_id(), q->qhandle);
155
156         hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
157
158         pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
159                smp_processor_id(), head, hv_ret);
160
161         sun4v_ncs_sethead_marker(q->qhandle, head);
162
163         spin_unlock(&q->lock);
164
165         return IRQ_HANDLED;
166 }
167
168 static void *spu_queue_next(struct spu_queue *q, void *cur)
169 {
170         return q->q + spu_next_offset(q, cur - q->q);
171 }
172
173 static int spu_queue_num_free(struct spu_queue *q)
174 {
175         unsigned long head = q->head;
176         unsigned long tail = q->tail;
177         unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
178         unsigned long diff;
179
180         if (head > tail)
181                 diff = head - tail;
182         else
183                 diff = (end - tail) + head;
184
185         return (diff / CWQ_ENTRY_SIZE) - 1;
186 }
187
188 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
189 {
190         int avail = spu_queue_num_free(q);
191
192         if (avail >= num_entries)
193                 return q->q + q->tail;
194
195         return NULL;
196 }
197
198 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
199 {
200         unsigned long hv_ret, new_tail;
201
202         new_tail = spu_next_offset(q, last - q->q);
203
204         hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
205         if (hv_ret == HV_EOK)
206                 q->tail = new_tail;
207         return hv_ret;
208 }
209
210 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
211                              int enc_type, int auth_type,
212                              unsigned int hash_len,
213                              bool sfas, bool sob, bool eob, bool encrypt,
214                              int opcode)
215 {
216         u64 word = (len - 1) & CONTROL_LEN;
217
218         word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
219         word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
220         word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
221         if (sfas)
222                 word |= CONTROL_STORE_FINAL_AUTH_STATE;
223         if (sob)
224                 word |= CONTROL_START_OF_BLOCK;
225         if (eob)
226                 word |= CONTROL_END_OF_BLOCK;
227         if (encrypt)
228                 word |= CONTROL_ENCRYPT;
229         if (hmac_key_len)
230                 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
231         if (hash_len)
232                 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
233
234         return word;
235 }
236
237 #if 0
238 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
239 {
240         if (this_len >= 64 ||
241             qp->head != qp->tail)
242                 return true;
243         return false;
244 }
245 #endif
246
247 struct n2_ahash_alg {
248         struct list_head        entry;
249         const u8                *hash_zero;
250         const u32               *hash_init;
251         u8                      hw_op_hashsz;
252         u8                      digest_size;
253         u8                      auth_type;
254         u8                      hmac_type;
255         struct ahash_alg        alg;
256 };
257
258 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
259 {
260         struct crypto_alg *alg = tfm->__crt_alg;
261         struct ahash_alg *ahash_alg;
262
263         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
264
265         return container_of(ahash_alg, struct n2_ahash_alg, alg);
266 }
267
268 struct n2_hmac_alg {
269         const char              *child_alg;
270         struct n2_ahash_alg     derived;
271 };
272
273 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
274 {
275         struct crypto_alg *alg = tfm->__crt_alg;
276         struct ahash_alg *ahash_alg;
277
278         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
279
280         return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
281 }
282
283 struct n2_hash_ctx {
284         struct crypto_ahash             *fallback_tfm;
285 };
286
287 #define N2_HASH_KEY_MAX                 32 /* HW limit for all HMAC requests */
288
289 struct n2_hmac_ctx {
290         struct n2_hash_ctx              base;
291
292         struct crypto_shash             *child_shash;
293
294         int                             hash_key_len;
295         unsigned char                   hash_key[N2_HASH_KEY_MAX];
296 };
297
298 struct n2_hash_req_ctx {
299         union {
300                 struct md5_state        md5;
301                 struct sha1_state       sha1;
302                 struct sha256_state     sha256;
303         } u;
304
305         struct ahash_request            fallback_req;
306 };
307
308 static int n2_hash_async_init(struct ahash_request *req)
309 {
310         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
311         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
312         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
313
314         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
315         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
316
317         return crypto_ahash_init(&rctx->fallback_req);
318 }
319
320 static int n2_hash_async_update(struct ahash_request *req)
321 {
322         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
323         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
324         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
325
326         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
327         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
328         rctx->fallback_req.nbytes = req->nbytes;
329         rctx->fallback_req.src = req->src;
330
331         return crypto_ahash_update(&rctx->fallback_req);
332 }
333
334 static int n2_hash_async_final(struct ahash_request *req)
335 {
336         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
337         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
338         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
339
340         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
341         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
342         rctx->fallback_req.result = req->result;
343
344         return crypto_ahash_final(&rctx->fallback_req);
345 }
346
347 static int n2_hash_async_finup(struct ahash_request *req)
348 {
349         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
350         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
351         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
352
353         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
354         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
355         rctx->fallback_req.nbytes = req->nbytes;
356         rctx->fallback_req.src = req->src;
357         rctx->fallback_req.result = req->result;
358
359         return crypto_ahash_finup(&rctx->fallback_req);
360 }
361
362 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
363 {
364         return -ENOSYS;
365 }
366
367 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
368 {
369         return -ENOSYS;
370 }
371
372 static int n2_hash_cra_init(struct crypto_tfm *tfm)
373 {
374         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
375         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
376         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
377         struct crypto_ahash *fallback_tfm;
378         int err;
379
380         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
381                                           CRYPTO_ALG_NEED_FALLBACK);
382         if (IS_ERR(fallback_tfm)) {
383                 pr_warning("Fallback driver '%s' could not be loaded!\n",
384                            fallback_driver_name);
385                 err = PTR_ERR(fallback_tfm);
386                 goto out;
387         }
388
389         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
390                                          crypto_ahash_reqsize(fallback_tfm)));
391
392         ctx->fallback_tfm = fallback_tfm;
393         return 0;
394
395 out:
396         return err;
397 }
398
399 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
400 {
401         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
402         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
403
404         crypto_free_ahash(ctx->fallback_tfm);
405 }
406
407 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
408 {
409         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
410         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
411         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
412         struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
413         struct crypto_ahash *fallback_tfm;
414         struct crypto_shash *child_shash;
415         int err;
416
417         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
418                                           CRYPTO_ALG_NEED_FALLBACK);
419         if (IS_ERR(fallback_tfm)) {
420                 pr_warning("Fallback driver '%s' could not be loaded!\n",
421                            fallback_driver_name);
422                 err = PTR_ERR(fallback_tfm);
423                 goto out;
424         }
425
426         child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
427         if (IS_ERR(child_shash)) {
428                 pr_warning("Child shash '%s' could not be loaded!\n",
429                            n2alg->child_alg);
430                 err = PTR_ERR(child_shash);
431                 goto out_free_fallback;
432         }
433
434         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
435                                          crypto_ahash_reqsize(fallback_tfm)));
436
437         ctx->child_shash = child_shash;
438         ctx->base.fallback_tfm = fallback_tfm;
439         return 0;
440
441 out_free_fallback:
442         crypto_free_ahash(fallback_tfm);
443
444 out:
445         return err;
446 }
447
448 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
449 {
450         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
451         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
452
453         crypto_free_ahash(ctx->base.fallback_tfm);
454         crypto_free_shash(ctx->child_shash);
455 }
456
457 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
458                                 unsigned int keylen)
459 {
460         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
461         struct crypto_shash *child_shash = ctx->child_shash;
462         struct crypto_ahash *fallback_tfm;
463         SHASH_DESC_ON_STACK(shash, child_shash);
464         int err, bs, ds;
465
466         fallback_tfm = ctx->base.fallback_tfm;
467         err = crypto_ahash_setkey(fallback_tfm, key, keylen);
468         if (err)
469                 return err;
470
471         shash->tfm = child_shash;
472
473         bs = crypto_shash_blocksize(child_shash);
474         ds = crypto_shash_digestsize(child_shash);
475         BUG_ON(ds > N2_HASH_KEY_MAX);
476         if (keylen > bs) {
477                 err = crypto_shash_digest(shash, key, keylen,
478                                           ctx->hash_key);
479                 if (err)
480                         return err;
481                 keylen = ds;
482         } else if (keylen <= N2_HASH_KEY_MAX)
483                 memcpy(ctx->hash_key, key, keylen);
484
485         ctx->hash_key_len = keylen;
486
487         return err;
488 }
489
490 static unsigned long wait_for_tail(struct spu_queue *qp)
491 {
492         unsigned long head, hv_ret;
493
494         do {
495                 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
496                 if (hv_ret != HV_EOK) {
497                         pr_err("Hypervisor error on gethead\n");
498                         break;
499                 }
500                 if (head == qp->tail) {
501                         qp->head = head;
502                         break;
503                 }
504         } while (1);
505         return hv_ret;
506 }
507
508 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
509                                               struct cwq_initial_entry *ent)
510 {
511         unsigned long hv_ret = spu_queue_submit(qp, ent);
512
513         if (hv_ret == HV_EOK)
514                 hv_ret = wait_for_tail(qp);
515
516         return hv_ret;
517 }
518
519 static int n2_do_async_digest(struct ahash_request *req,
520                               unsigned int auth_type, unsigned int digest_size,
521                               unsigned int result_size, void *hash_loc,
522                               unsigned long auth_key, unsigned int auth_key_len)
523 {
524         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
525         struct cwq_initial_entry *ent;
526         struct crypto_hash_walk walk;
527         struct spu_queue *qp;
528         unsigned long flags;
529         int err = -ENODEV;
530         int nbytes, cpu;
531
532         /* The total effective length of the operation may not
533          * exceed 2^16.
534          */
535         if (unlikely(req->nbytes > (1 << 16))) {
536                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
537                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
538
539                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
540                 rctx->fallback_req.base.flags =
541                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
542                 rctx->fallback_req.nbytes = req->nbytes;
543                 rctx->fallback_req.src = req->src;
544                 rctx->fallback_req.result = req->result;
545
546                 return crypto_ahash_digest(&rctx->fallback_req);
547         }
548
549         nbytes = crypto_hash_walk_first(req, &walk);
550
551         cpu = get_cpu();
552         qp = cpu_to_cwq[cpu];
553         if (!qp)
554                 goto out;
555
556         spin_lock_irqsave(&qp->lock, flags);
557
558         /* XXX can do better, improve this later by doing a by-hand scatterlist
559          * XXX walk, etc.
560          */
561         ent = qp->q + qp->tail;
562
563         ent->control = control_word_base(nbytes, auth_key_len, 0,
564                                          auth_type, digest_size,
565                                          false, true, false, false,
566                                          OPCODE_INPLACE_BIT |
567                                          OPCODE_AUTH_MAC);
568         ent->src_addr = __pa(walk.data);
569         ent->auth_key_addr = auth_key;
570         ent->auth_iv_addr = __pa(hash_loc);
571         ent->final_auth_state_addr = 0UL;
572         ent->enc_key_addr = 0UL;
573         ent->enc_iv_addr = 0UL;
574         ent->dest_addr = __pa(hash_loc);
575
576         nbytes = crypto_hash_walk_done(&walk, 0);
577         while (nbytes > 0) {
578                 ent = spu_queue_next(qp, ent);
579
580                 ent->control = (nbytes - 1);
581                 ent->src_addr = __pa(walk.data);
582                 ent->auth_key_addr = 0UL;
583                 ent->auth_iv_addr = 0UL;
584                 ent->final_auth_state_addr = 0UL;
585                 ent->enc_key_addr = 0UL;
586                 ent->enc_iv_addr = 0UL;
587                 ent->dest_addr = 0UL;
588
589                 nbytes = crypto_hash_walk_done(&walk, 0);
590         }
591         ent->control |= CONTROL_END_OF_BLOCK;
592
593         if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
594                 err = -EINVAL;
595         else
596                 err = 0;
597
598         spin_unlock_irqrestore(&qp->lock, flags);
599
600         if (!err)
601                 memcpy(req->result, hash_loc, result_size);
602 out:
603         put_cpu();
604
605         return err;
606 }
607
608 static int n2_hash_async_digest(struct ahash_request *req)
609 {
610         struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
611         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
612         int ds;
613
614         ds = n2alg->digest_size;
615         if (unlikely(req->nbytes == 0)) {
616                 memcpy(req->result, n2alg->hash_zero, ds);
617                 return 0;
618         }
619         memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
620
621         return n2_do_async_digest(req, n2alg->auth_type,
622                                   n2alg->hw_op_hashsz, ds,
623                                   &rctx->u, 0UL, 0);
624 }
625
626 static int n2_hmac_async_digest(struct ahash_request *req)
627 {
628         struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
629         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
630         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
631         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
632         int ds;
633
634         ds = n2alg->derived.digest_size;
635         if (unlikely(req->nbytes == 0) ||
636             unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
637                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
638                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
639
640                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
641                 rctx->fallback_req.base.flags =
642                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
643                 rctx->fallback_req.nbytes = req->nbytes;
644                 rctx->fallback_req.src = req->src;
645                 rctx->fallback_req.result = req->result;
646
647                 return crypto_ahash_digest(&rctx->fallback_req);
648         }
649         memcpy(&rctx->u, n2alg->derived.hash_init,
650                n2alg->derived.hw_op_hashsz);
651
652         return n2_do_async_digest(req, n2alg->derived.hmac_type,
653                                   n2alg->derived.hw_op_hashsz, ds,
654                                   &rctx->u,
655                                   __pa(&ctx->hash_key),
656                                   ctx->hash_key_len);
657 }
658
659 struct n2_cipher_context {
660         int                     key_len;
661         int                     enc_type;
662         union {
663                 u8              aes[AES_MAX_KEY_SIZE];
664                 u8              des[DES_KEY_SIZE];
665                 u8              des3[3 * DES_KEY_SIZE];
666                 u8              arc4[258]; /* S-box, X, Y */
667         } key;
668 };
669
670 #define N2_CHUNK_ARR_LEN        16
671
672 struct n2_crypto_chunk {
673         struct list_head        entry;
674         unsigned long           iv_paddr : 44;
675         unsigned long           arr_len : 20;
676         unsigned long           dest_paddr;
677         unsigned long           dest_final;
678         struct {
679                 unsigned long   src_paddr : 44;
680                 unsigned long   src_len : 20;
681         } arr[N2_CHUNK_ARR_LEN];
682 };
683
684 struct n2_request_context {
685         struct ablkcipher_walk  walk;
686         struct list_head        chunk_list;
687         struct n2_crypto_chunk  chunk;
688         u8                      temp_iv[16];
689 };
690
691 /* The SPU allows some level of flexibility for partial cipher blocks
692  * being specified in a descriptor.
693  *
694  * It merely requires that every descriptor's length field is at least
695  * as large as the cipher block size.  This means that a cipher block
696  * can span at most 2 descriptors.  However, this does not allow a
697  * partial block to span into the final descriptor as that would
698  * violate the rule (since every descriptor's length must be at lest
699  * the block size).  So, for example, assuming an 8 byte block size:
700  *
701  *      0xe --> 0xa --> 0x8
702  *
703  * is a valid length sequence, whereas:
704  *
705  *      0xe --> 0xb --> 0x7
706  *
707  * is not a valid sequence.
708  */
709
710 struct n2_cipher_alg {
711         struct list_head        entry;
712         u8                      enc_type;
713         struct crypto_alg       alg;
714 };
715
716 static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
717 {
718         struct crypto_alg *alg = tfm->__crt_alg;
719
720         return container_of(alg, struct n2_cipher_alg, alg);
721 }
722
723 struct n2_cipher_request_context {
724         struct ablkcipher_walk  walk;
725 };
726
727 static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
728                          unsigned int keylen)
729 {
730         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
731         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
732         struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
733
734         ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
735
736         switch (keylen) {
737         case AES_KEYSIZE_128:
738                 ctx->enc_type |= ENC_TYPE_ALG_AES128;
739                 break;
740         case AES_KEYSIZE_192:
741                 ctx->enc_type |= ENC_TYPE_ALG_AES192;
742                 break;
743         case AES_KEYSIZE_256:
744                 ctx->enc_type |= ENC_TYPE_ALG_AES256;
745                 break;
746         default:
747                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
748                 return -EINVAL;
749         }
750
751         ctx->key_len = keylen;
752         memcpy(ctx->key.aes, key, keylen);
753         return 0;
754 }
755
756 static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
757                          unsigned int keylen)
758 {
759         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
760         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
761         struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
762         u32 tmp[DES_EXPKEY_WORDS];
763         int err;
764
765         ctx->enc_type = n2alg->enc_type;
766
767         if (keylen != DES_KEY_SIZE) {
768                 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
769                 return -EINVAL;
770         }
771
772         err = des_ekey(tmp, key);
773         if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
774                 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
775                 return -EINVAL;
776         }
777
778         ctx->key_len = keylen;
779         memcpy(ctx->key.des, key, keylen);
780         return 0;
781 }
782
783 static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
784                           unsigned int keylen)
785 {
786         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
787         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
788         struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
789         u32 flags;
790         int err;
791
792         flags = crypto_ablkcipher_get_flags(cipher);
793         err = __des3_verify_key(&flags, key);
794         if (unlikely(err)) {
795                 crypto_ablkcipher_set_flags(cipher, flags);
796                 return err;
797         }
798
799         ctx->enc_type = n2alg->enc_type;
800
801         ctx->key_len = keylen;
802         memcpy(ctx->key.des3, key, keylen);
803         return 0;
804 }
805
806 static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
807                           unsigned int keylen)
808 {
809         struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
810         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
811         struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
812         u8 *s = ctx->key.arc4;
813         u8 *x = s + 256;
814         u8 *y = x + 1;
815         int i, j, k;
816
817         ctx->enc_type = n2alg->enc_type;
818
819         j = k = 0;
820         *x = 0;
821         *y = 0;
822         for (i = 0; i < 256; i++)
823                 s[i] = i;
824         for (i = 0; i < 256; i++) {
825                 u8 a = s[i];
826                 j = (j + key[k] + a) & 0xff;
827                 s[i] = s[j];
828                 s[j] = a;
829                 if (++k >= keylen)
830                         k = 0;
831         }
832
833         return 0;
834 }
835
836 static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
837 {
838         int this_len = nbytes;
839
840         this_len -= (nbytes & (block_size - 1));
841         return this_len > (1 << 16) ? (1 << 16) : this_len;
842 }
843
844 static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
845                             struct spu_queue *qp, bool encrypt)
846 {
847         struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
848         struct cwq_initial_entry *ent;
849         bool in_place;
850         int i;
851
852         ent = spu_queue_alloc(qp, cp->arr_len);
853         if (!ent) {
854                 pr_info("queue_alloc() of %d fails\n",
855                         cp->arr_len);
856                 return -EBUSY;
857         }
858
859         in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
860
861         ent->control = control_word_base(cp->arr[0].src_len,
862                                          0, ctx->enc_type, 0, 0,
863                                          false, true, false, encrypt,
864                                          OPCODE_ENCRYPT |
865                                          (in_place ? OPCODE_INPLACE_BIT : 0));
866         ent->src_addr = cp->arr[0].src_paddr;
867         ent->auth_key_addr = 0UL;
868         ent->auth_iv_addr = 0UL;
869         ent->final_auth_state_addr = 0UL;
870         ent->enc_key_addr = __pa(&ctx->key);
871         ent->enc_iv_addr = cp->iv_paddr;
872         ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
873
874         for (i = 1; i < cp->arr_len; i++) {
875                 ent = spu_queue_next(qp, ent);
876
877                 ent->control = cp->arr[i].src_len - 1;
878                 ent->src_addr = cp->arr[i].src_paddr;
879                 ent->auth_key_addr = 0UL;
880                 ent->auth_iv_addr = 0UL;
881                 ent->final_auth_state_addr = 0UL;
882                 ent->enc_key_addr = 0UL;
883                 ent->enc_iv_addr = 0UL;
884                 ent->dest_addr = 0UL;
885         }
886         ent->control |= CONTROL_END_OF_BLOCK;
887
888         return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
889 }
890
891 static int n2_compute_chunks(struct ablkcipher_request *req)
892 {
893         struct n2_request_context *rctx = ablkcipher_request_ctx(req);
894         struct ablkcipher_walk *walk = &rctx->walk;
895         struct n2_crypto_chunk *chunk;
896         unsigned long dest_prev;
897         unsigned int tot_len;
898         bool prev_in_place;
899         int err, nbytes;
900
901         ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
902         err = ablkcipher_walk_phys(req, walk);
903         if (err)
904                 return err;
905
906         INIT_LIST_HEAD(&rctx->chunk_list);
907
908         chunk = &rctx->chunk;
909         INIT_LIST_HEAD(&chunk->entry);
910
911         chunk->iv_paddr = 0UL;
912         chunk->arr_len = 0;
913         chunk->dest_paddr = 0UL;
914
915         prev_in_place = false;
916         dest_prev = ~0UL;
917         tot_len = 0;
918
919         while ((nbytes = walk->nbytes) != 0) {
920                 unsigned long dest_paddr, src_paddr;
921                 bool in_place;
922                 int this_len;
923
924                 src_paddr = (page_to_phys(walk->src.page) +
925                              walk->src.offset);
926                 dest_paddr = (page_to_phys(walk->dst.page) +
927                               walk->dst.offset);
928                 in_place = (src_paddr == dest_paddr);
929                 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
930
931                 if (chunk->arr_len != 0) {
932                         if (in_place != prev_in_place ||
933                             (!prev_in_place &&
934                              dest_paddr != dest_prev) ||
935                             chunk->arr_len == N2_CHUNK_ARR_LEN ||
936                             tot_len + this_len > (1 << 16)) {
937                                 chunk->dest_final = dest_prev;
938                                 list_add_tail(&chunk->entry,
939                                               &rctx->chunk_list);
940                                 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
941                                 if (!chunk) {
942                                         err = -ENOMEM;
943                                         break;
944                                 }
945                                 INIT_LIST_HEAD(&chunk->entry);
946                         }
947                 }
948                 if (chunk->arr_len == 0) {
949                         chunk->dest_paddr = dest_paddr;
950                         tot_len = 0;
951                 }
952                 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
953                 chunk->arr[chunk->arr_len].src_len = this_len;
954                 chunk->arr_len++;
955
956                 dest_prev = dest_paddr + this_len;
957                 prev_in_place = in_place;
958                 tot_len += this_len;
959
960                 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
961                 if (err)
962                         break;
963         }
964         if (!err && chunk->arr_len != 0) {
965                 chunk->dest_final = dest_prev;
966                 list_add_tail(&chunk->entry, &rctx->chunk_list);
967         }
968
969         return err;
970 }
971
972 static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
973 {
974         struct n2_request_context *rctx = ablkcipher_request_ctx(req);
975         struct n2_crypto_chunk *c, *tmp;
976
977         if (final_iv)
978                 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
979
980         ablkcipher_walk_complete(&rctx->walk);
981         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
982                 list_del(&c->entry);
983                 if (unlikely(c != &rctx->chunk))
984                         kfree(c);
985         }
986
987 }
988
989 static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
990 {
991         struct n2_request_context *rctx = ablkcipher_request_ctx(req);
992         struct crypto_tfm *tfm = req->base.tfm;
993         int err = n2_compute_chunks(req);
994         struct n2_crypto_chunk *c, *tmp;
995         unsigned long flags, hv_ret;
996         struct spu_queue *qp;
997
998         if (err)
999                 return err;
1000
1001         qp = cpu_to_cwq[get_cpu()];
1002         err = -ENODEV;
1003         if (!qp)
1004                 goto out;
1005
1006         spin_lock_irqsave(&qp->lock, flags);
1007
1008         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
1009                 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
1010                 if (err)
1011                         break;
1012                 list_del(&c->entry);
1013                 if (unlikely(c != &rctx->chunk))
1014                         kfree(c);
1015         }
1016         if (!err) {
1017                 hv_ret = wait_for_tail(qp);
1018                 if (hv_ret != HV_EOK)
1019                         err = -EINVAL;
1020         }
1021
1022         spin_unlock_irqrestore(&qp->lock, flags);
1023
1024 out:
1025         put_cpu();
1026
1027         n2_chunk_complete(req, NULL);
1028         return err;
1029 }
1030
1031 static int n2_encrypt_ecb(struct ablkcipher_request *req)
1032 {
1033         return n2_do_ecb(req, true);
1034 }
1035
1036 static int n2_decrypt_ecb(struct ablkcipher_request *req)
1037 {
1038         return n2_do_ecb(req, false);
1039 }
1040
1041 static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
1042 {
1043         struct n2_request_context *rctx = ablkcipher_request_ctx(req);
1044         struct crypto_tfm *tfm = req->base.tfm;
1045         unsigned long flags, hv_ret, iv_paddr;
1046         int err = n2_compute_chunks(req);
1047         struct n2_crypto_chunk *c, *tmp;
1048         struct spu_queue *qp;
1049         void *final_iv_addr;
1050
1051         final_iv_addr = NULL;
1052
1053         if (err)
1054                 return err;
1055
1056         qp = cpu_to_cwq[get_cpu()];
1057         err = -ENODEV;
1058         if (!qp)
1059                 goto out;
1060
1061         spin_lock_irqsave(&qp->lock, flags);
1062
1063         if (encrypt) {
1064                 iv_paddr = __pa(rctx->walk.iv);
1065                 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1066                                          entry) {
1067                         c->iv_paddr = iv_paddr;
1068                         err = __n2_crypt_chunk(tfm, c, qp, true);
1069                         if (err)
1070                                 break;
1071                         iv_paddr = c->dest_final - rctx->walk.blocksize;
1072                         list_del(&c->entry);
1073                         if (unlikely(c != &rctx->chunk))
1074                                 kfree(c);
1075                 }
1076                 final_iv_addr = __va(iv_paddr);
1077         } else {
1078                 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1079                                                  entry) {
1080                         if (c == &rctx->chunk) {
1081                                 iv_paddr = __pa(rctx->walk.iv);
1082                         } else {
1083                                 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1084                                             tmp->arr[tmp->arr_len-1].src_len -
1085                                             rctx->walk.blocksize);
1086                         }
1087                         if (!final_iv_addr) {
1088                                 unsigned long pa;
1089
1090                                 pa = (c->arr[c->arr_len-1].src_paddr +
1091                                       c->arr[c->arr_len-1].src_len -
1092                                       rctx->walk.blocksize);
1093                                 final_iv_addr = rctx->temp_iv;
1094                                 memcpy(rctx->temp_iv, __va(pa),
1095                                        rctx->walk.blocksize);
1096                         }
1097                         c->iv_paddr = iv_paddr;
1098                         err = __n2_crypt_chunk(tfm, c, qp, false);
1099                         if (err)
1100                                 break;
1101                         list_del(&c->entry);
1102                         if (unlikely(c != &rctx->chunk))
1103                                 kfree(c);
1104                 }
1105         }
1106         if (!err) {
1107                 hv_ret = wait_for_tail(qp);
1108                 if (hv_ret != HV_EOK)
1109                         err = -EINVAL;
1110         }
1111
1112         spin_unlock_irqrestore(&qp->lock, flags);
1113
1114 out:
1115         put_cpu();
1116
1117         n2_chunk_complete(req, err ? NULL : final_iv_addr);
1118         return err;
1119 }
1120
1121 static int n2_encrypt_chaining(struct ablkcipher_request *req)
1122 {
1123         return n2_do_chaining(req, true);
1124 }
1125
1126 static int n2_decrypt_chaining(struct ablkcipher_request *req)
1127 {
1128         return n2_do_chaining(req, false);
1129 }
1130
1131 struct n2_cipher_tmpl {
1132         const char              *name;
1133         const char              *drv_name;
1134         u8                      block_size;
1135         u8                      enc_type;
1136         struct ablkcipher_alg   ablkcipher;
1137 };
1138
1139 static const struct n2_cipher_tmpl cipher_tmpls[] = {
1140         /* ARC4: only ECB is supported (chaining bits ignored) */
1141         {       .name           = "ecb(arc4)",
1142                 .drv_name       = "ecb-arc4",
1143                 .block_size     = 1,
1144                 .enc_type       = (ENC_TYPE_ALG_RC4_STREAM |
1145                                    ENC_TYPE_CHAINING_ECB),
1146                 .ablkcipher     = {
1147                         .min_keysize    = 1,
1148                         .max_keysize    = 256,
1149                         .setkey         = n2_arc4_setkey,
1150                         .encrypt        = n2_encrypt_ecb,
1151                         .decrypt        = n2_decrypt_ecb,
1152                 },
1153         },
1154
1155         /* DES: ECB CBC and CFB are supported */
1156         {       .name           = "ecb(des)",
1157                 .drv_name       = "ecb-des",
1158                 .block_size     = DES_BLOCK_SIZE,
1159                 .enc_type       = (ENC_TYPE_ALG_DES |
1160                                    ENC_TYPE_CHAINING_ECB),
1161                 .ablkcipher     = {
1162                         .min_keysize    = DES_KEY_SIZE,
1163                         .max_keysize    = DES_KEY_SIZE,
1164                         .setkey         = n2_des_setkey,
1165                         .encrypt        = n2_encrypt_ecb,
1166                         .decrypt        = n2_decrypt_ecb,
1167                 },
1168         },
1169         {       .name           = "cbc(des)",
1170                 .drv_name       = "cbc-des",
1171                 .block_size     = DES_BLOCK_SIZE,
1172                 .enc_type       = (ENC_TYPE_ALG_DES |
1173                                    ENC_TYPE_CHAINING_CBC),
1174                 .ablkcipher     = {
1175                         .ivsize         = DES_BLOCK_SIZE,
1176                         .min_keysize    = DES_KEY_SIZE,
1177                         .max_keysize    = DES_KEY_SIZE,
1178                         .setkey         = n2_des_setkey,
1179                         .encrypt        = n2_encrypt_chaining,
1180                         .decrypt        = n2_decrypt_chaining,
1181                 },
1182         },
1183         {       .name           = "cfb(des)",
1184                 .drv_name       = "cfb-des",
1185                 .block_size     = DES_BLOCK_SIZE,
1186                 .enc_type       = (ENC_TYPE_ALG_DES |
1187                                    ENC_TYPE_CHAINING_CFB),
1188                 .ablkcipher     = {
1189                         .min_keysize    = DES_KEY_SIZE,
1190                         .max_keysize    = DES_KEY_SIZE,
1191                         .setkey         = n2_des_setkey,
1192                         .encrypt        = n2_encrypt_chaining,
1193                         .decrypt        = n2_decrypt_chaining,
1194                 },
1195         },
1196
1197         /* 3DES: ECB CBC and CFB are supported */
1198         {       .name           = "ecb(des3_ede)",
1199                 .drv_name       = "ecb-3des",
1200                 .block_size     = DES_BLOCK_SIZE,
1201                 .enc_type       = (ENC_TYPE_ALG_3DES |
1202                                    ENC_TYPE_CHAINING_ECB),
1203                 .ablkcipher     = {
1204                         .min_keysize    = 3 * DES_KEY_SIZE,
1205                         .max_keysize    = 3 * DES_KEY_SIZE,
1206                         .setkey         = n2_3des_setkey,
1207                         .encrypt        = n2_encrypt_ecb,
1208                         .decrypt        = n2_decrypt_ecb,
1209                 },
1210         },
1211         {       .name           = "cbc(des3_ede)",
1212                 .drv_name       = "cbc-3des",
1213                 .block_size     = DES_BLOCK_SIZE,
1214                 .enc_type       = (ENC_TYPE_ALG_3DES |
1215                                    ENC_TYPE_CHAINING_CBC),
1216                 .ablkcipher     = {
1217                         .ivsize         = DES_BLOCK_SIZE,
1218                         .min_keysize    = 3 * DES_KEY_SIZE,
1219                         .max_keysize    = 3 * DES_KEY_SIZE,
1220                         .setkey         = n2_3des_setkey,
1221                         .encrypt        = n2_encrypt_chaining,
1222                         .decrypt        = n2_decrypt_chaining,
1223                 },
1224         },
1225         {       .name           = "cfb(des3_ede)",
1226                 .drv_name       = "cfb-3des",
1227                 .block_size     = DES_BLOCK_SIZE,
1228                 .enc_type       = (ENC_TYPE_ALG_3DES |
1229                                    ENC_TYPE_CHAINING_CFB),
1230                 .ablkcipher     = {
1231                         .min_keysize    = 3 * DES_KEY_SIZE,
1232                         .max_keysize    = 3 * DES_KEY_SIZE,
1233                         .setkey         = n2_3des_setkey,
1234                         .encrypt        = n2_encrypt_chaining,
1235                         .decrypt        = n2_decrypt_chaining,
1236                 },
1237         },
1238         /* AES: ECB CBC and CTR are supported */
1239         {       .name           = "ecb(aes)",
1240                 .drv_name       = "ecb-aes",
1241                 .block_size     = AES_BLOCK_SIZE,
1242                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1243                                    ENC_TYPE_CHAINING_ECB),
1244                 .ablkcipher     = {
1245                         .min_keysize    = AES_MIN_KEY_SIZE,
1246                         .max_keysize    = AES_MAX_KEY_SIZE,
1247                         .setkey         = n2_aes_setkey,
1248                         .encrypt        = n2_encrypt_ecb,
1249                         .decrypt        = n2_decrypt_ecb,
1250                 },
1251         },
1252         {       .name           = "cbc(aes)",
1253                 .drv_name       = "cbc-aes",
1254                 .block_size     = AES_BLOCK_SIZE,
1255                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1256                                    ENC_TYPE_CHAINING_CBC),
1257                 .ablkcipher     = {
1258                         .ivsize         = AES_BLOCK_SIZE,
1259                         .min_keysize    = AES_MIN_KEY_SIZE,
1260                         .max_keysize    = AES_MAX_KEY_SIZE,
1261                         .setkey         = n2_aes_setkey,
1262                         .encrypt        = n2_encrypt_chaining,
1263                         .decrypt        = n2_decrypt_chaining,
1264                 },
1265         },
1266         {       .name           = "ctr(aes)",
1267                 .drv_name       = "ctr-aes",
1268                 .block_size     = AES_BLOCK_SIZE,
1269                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1270                                    ENC_TYPE_CHAINING_COUNTER),
1271                 .ablkcipher     = {
1272                         .ivsize         = AES_BLOCK_SIZE,
1273                         .min_keysize    = AES_MIN_KEY_SIZE,
1274                         .max_keysize    = AES_MAX_KEY_SIZE,
1275                         .setkey         = n2_aes_setkey,
1276                         .encrypt        = n2_encrypt_chaining,
1277                         .decrypt        = n2_encrypt_chaining,
1278                 },
1279         },
1280
1281 };
1282 #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1283
1284 static LIST_HEAD(cipher_algs);
1285
1286 struct n2_hash_tmpl {
1287         const char      *name;
1288         const u8        *hash_zero;
1289         const u32       *hash_init;
1290         u8              hw_op_hashsz;
1291         u8              digest_size;
1292         u8              block_size;
1293         u8              auth_type;
1294         u8              hmac_type;
1295 };
1296
1297 static const u32 md5_init[MD5_HASH_WORDS] = {
1298         cpu_to_le32(MD5_H0),
1299         cpu_to_le32(MD5_H1),
1300         cpu_to_le32(MD5_H2),
1301         cpu_to_le32(MD5_H3),
1302 };
1303 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
1304         SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1305 };
1306 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
1307         SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1308         SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1309 };
1310 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1311         SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1312         SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1313 };
1314
1315 static const struct n2_hash_tmpl hash_tmpls[] = {
1316         { .name         = "md5",
1317           .hash_zero    = md5_zero_message_hash,
1318           .hash_init    = md5_init,
1319           .auth_type    = AUTH_TYPE_MD5,
1320           .hmac_type    = AUTH_TYPE_HMAC_MD5,
1321           .hw_op_hashsz = MD5_DIGEST_SIZE,
1322           .digest_size  = MD5_DIGEST_SIZE,
1323           .block_size   = MD5_HMAC_BLOCK_SIZE },
1324         { .name         = "sha1",
1325           .hash_zero    = sha1_zero_message_hash,
1326           .hash_init    = sha1_init,
1327           .auth_type    = AUTH_TYPE_SHA1,
1328           .hmac_type    = AUTH_TYPE_HMAC_SHA1,
1329           .hw_op_hashsz = SHA1_DIGEST_SIZE,
1330           .digest_size  = SHA1_DIGEST_SIZE,
1331           .block_size   = SHA1_BLOCK_SIZE },
1332         { .name         = "sha256",
1333           .hash_zero    = sha256_zero_message_hash,
1334           .hash_init    = sha256_init,
1335           .auth_type    = AUTH_TYPE_SHA256,
1336           .hmac_type    = AUTH_TYPE_HMAC_SHA256,
1337           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1338           .digest_size  = SHA256_DIGEST_SIZE,
1339           .block_size   = SHA256_BLOCK_SIZE },
1340         { .name         = "sha224",
1341           .hash_zero    = sha224_zero_message_hash,
1342           .hash_init    = sha224_init,
1343           .auth_type    = AUTH_TYPE_SHA256,
1344           .hmac_type    = AUTH_TYPE_RESERVED,
1345           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1346           .digest_size  = SHA224_DIGEST_SIZE,
1347           .block_size   = SHA224_BLOCK_SIZE },
1348 };
1349 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1350
1351 static LIST_HEAD(ahash_algs);
1352 static LIST_HEAD(hmac_algs);
1353
1354 static int algs_registered;
1355
1356 static void __n2_unregister_algs(void)
1357 {
1358         struct n2_cipher_alg *cipher, *cipher_tmp;
1359         struct n2_ahash_alg *alg, *alg_tmp;
1360         struct n2_hmac_alg *hmac, *hmac_tmp;
1361
1362         list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1363                 crypto_unregister_alg(&cipher->alg);
1364                 list_del(&cipher->entry);
1365                 kfree(cipher);
1366         }
1367         list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1368                 crypto_unregister_ahash(&hmac->derived.alg);
1369                 list_del(&hmac->derived.entry);
1370                 kfree(hmac);
1371         }
1372         list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1373                 crypto_unregister_ahash(&alg->alg);
1374                 list_del(&alg->entry);
1375                 kfree(alg);
1376         }
1377 }
1378
1379 static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1380 {
1381         tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1382         return 0;
1383 }
1384
1385 static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1386 {
1387         struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1388         struct crypto_alg *alg;
1389         int err;
1390
1391         if (!p)
1392                 return -ENOMEM;
1393
1394         alg = &p->alg;
1395
1396         snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1397         snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1398         alg->cra_priority = N2_CRA_PRIORITY;
1399         alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1400                          CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
1401         alg->cra_blocksize = tmpl->block_size;
1402         p->enc_type = tmpl->enc_type;
1403         alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1404         alg->cra_type = &crypto_ablkcipher_type;
1405         alg->cra_u.ablkcipher = tmpl->ablkcipher;
1406         alg->cra_init = n2_cipher_cra_init;
1407         alg->cra_module = THIS_MODULE;
1408
1409         list_add(&p->entry, &cipher_algs);
1410         err = crypto_register_alg(alg);
1411         if (err) {
1412                 pr_err("%s alg registration failed\n", alg->cra_name);
1413                 list_del(&p->entry);
1414                 kfree(p);
1415         } else {
1416                 pr_info("%s alg registered\n", alg->cra_name);
1417         }
1418         return err;
1419 }
1420
1421 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1422 {
1423         struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1424         struct ahash_alg *ahash;
1425         struct crypto_alg *base;
1426         int err;
1427
1428         if (!p)
1429                 return -ENOMEM;
1430
1431         p->child_alg = n2ahash->alg.halg.base.cra_name;
1432         memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1433         INIT_LIST_HEAD(&p->derived.entry);
1434
1435         ahash = &p->derived.alg;
1436         ahash->digest = n2_hmac_async_digest;
1437         ahash->setkey = n2_hmac_async_setkey;
1438
1439         base = &ahash->halg.base;
1440         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1441         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1442
1443         base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1444         base->cra_init = n2_hmac_cra_init;
1445         base->cra_exit = n2_hmac_cra_exit;
1446
1447         list_add(&p->derived.entry, &hmac_algs);
1448         err = crypto_register_ahash(ahash);
1449         if (err) {
1450                 pr_err("%s alg registration failed\n", base->cra_name);
1451                 list_del(&p->derived.entry);
1452                 kfree(p);
1453         } else {
1454                 pr_info("%s alg registered\n", base->cra_name);
1455         }
1456         return err;
1457 }
1458
1459 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1460 {
1461         struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1462         struct hash_alg_common *halg;
1463         struct crypto_alg *base;
1464         struct ahash_alg *ahash;
1465         int err;
1466
1467         if (!p)
1468                 return -ENOMEM;
1469
1470         p->hash_zero = tmpl->hash_zero;
1471         p->hash_init = tmpl->hash_init;
1472         p->auth_type = tmpl->auth_type;
1473         p->hmac_type = tmpl->hmac_type;
1474         p->hw_op_hashsz = tmpl->hw_op_hashsz;
1475         p->digest_size = tmpl->digest_size;
1476
1477         ahash = &p->alg;
1478         ahash->init = n2_hash_async_init;
1479         ahash->update = n2_hash_async_update;
1480         ahash->final = n2_hash_async_final;
1481         ahash->finup = n2_hash_async_finup;
1482         ahash->digest = n2_hash_async_digest;
1483         ahash->export = n2_hash_async_noexport;
1484         ahash->import = n2_hash_async_noimport;
1485
1486         halg = &ahash->halg;
1487         halg->digestsize = tmpl->digest_size;
1488
1489         base = &halg->base;
1490         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1491         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1492         base->cra_priority = N2_CRA_PRIORITY;
1493         base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1494                           CRYPTO_ALG_NEED_FALLBACK;
1495         base->cra_blocksize = tmpl->block_size;
1496         base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1497         base->cra_module = THIS_MODULE;
1498         base->cra_init = n2_hash_cra_init;
1499         base->cra_exit = n2_hash_cra_exit;
1500
1501         list_add(&p->entry, &ahash_algs);
1502         err = crypto_register_ahash(ahash);
1503         if (err) {
1504                 pr_err("%s alg registration failed\n", base->cra_name);
1505                 list_del(&p->entry);
1506                 kfree(p);
1507         } else {
1508                 pr_info("%s alg registered\n", base->cra_name);
1509         }
1510         if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1511                 err = __n2_register_one_hmac(p);
1512         return err;
1513 }
1514
1515 static int n2_register_algs(void)
1516 {
1517         int i, err = 0;
1518
1519         mutex_lock(&spu_lock);
1520         if (algs_registered++)
1521                 goto out;
1522
1523         for (i = 0; i < NUM_HASH_TMPLS; i++) {
1524                 err = __n2_register_one_ahash(&hash_tmpls[i]);
1525                 if (err) {
1526                         __n2_unregister_algs();
1527                         goto out;
1528                 }
1529         }
1530         for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1531                 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1532                 if (err) {
1533                         __n2_unregister_algs();
1534                         goto out;
1535                 }
1536         }
1537
1538 out:
1539         mutex_unlock(&spu_lock);
1540         return err;
1541 }
1542
1543 static void n2_unregister_algs(void)
1544 {
1545         mutex_lock(&spu_lock);
1546         if (!--algs_registered)
1547                 __n2_unregister_algs();
1548         mutex_unlock(&spu_lock);
1549 }
1550
1551 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1552  * a devino.  This isn't very useful to us because all of the
1553  * interrupts listed in the device_node have been translated to
1554  * Linux virtual IRQ cookie numbers.
1555  *
1556  * So we have to back-translate, going through the 'intr' and 'ino'
1557  * property tables of the n2cp MDESC node, matching it with the OF
1558  * 'interrupts' property entries, in order to to figure out which
1559  * devino goes to which already-translated IRQ.
1560  */
1561 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1562                              unsigned long dev_ino)
1563 {
1564         const unsigned int *dev_intrs;
1565         unsigned int intr;
1566         int i;
1567
1568         for (i = 0; i < ip->num_intrs; i++) {
1569                 if (ip->ino_table[i].ino == dev_ino)
1570                         break;
1571         }
1572         if (i == ip->num_intrs)
1573                 return -ENODEV;
1574
1575         intr = ip->ino_table[i].intr;
1576
1577         dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1578         if (!dev_intrs)
1579                 return -ENODEV;
1580
1581         for (i = 0; i < dev->archdata.num_irqs; i++) {
1582                 if (dev_intrs[i] == intr)
1583                         return i;
1584         }
1585
1586         return -ENODEV;
1587 }
1588
1589 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1590                        const char *irq_name, struct spu_queue *p,
1591                        irq_handler_t handler)
1592 {
1593         unsigned long herr;
1594         int index;
1595
1596         herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1597         if (herr)
1598                 return -EINVAL;
1599
1600         index = find_devino_index(dev, ip, p->devino);
1601         if (index < 0)
1602                 return index;
1603
1604         p->irq = dev->archdata.irqs[index];
1605
1606         sprintf(p->irq_name, "%s-%d", irq_name, index);
1607
1608         return request_irq(p->irq, handler, 0, p->irq_name, p);
1609 }
1610
1611 static struct kmem_cache *queue_cache[2];
1612
1613 static void *new_queue(unsigned long q_type)
1614 {
1615         return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1616 }
1617
1618 static void free_queue(void *p, unsigned long q_type)
1619 {
1620         kmem_cache_free(queue_cache[q_type - 1], p);
1621 }
1622
1623 static int queue_cache_init(void)
1624 {
1625         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1626                 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1627                         kmem_cache_create("mau_queue",
1628                                           (MAU_NUM_ENTRIES *
1629                                            MAU_ENTRY_SIZE),
1630                                           MAU_ENTRY_SIZE, 0, NULL);
1631         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1632                 return -ENOMEM;
1633
1634         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1635                 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1636                         kmem_cache_create("cwq_queue",
1637                                           (CWQ_NUM_ENTRIES *
1638                                            CWQ_ENTRY_SIZE),
1639                                           CWQ_ENTRY_SIZE, 0, NULL);
1640         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1641                 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1642                 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1643                 return -ENOMEM;
1644         }
1645         return 0;
1646 }
1647
1648 static void queue_cache_destroy(void)
1649 {
1650         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1651         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1652         queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1653         queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1654 }
1655
1656 static long spu_queue_register_workfn(void *arg)
1657 {
1658         struct spu_qreg *qr = arg;
1659         struct spu_queue *p = qr->queue;
1660         unsigned long q_type = qr->type;
1661         unsigned long hv_ret;
1662
1663         hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1664                                  CWQ_NUM_ENTRIES, &p->qhandle);
1665         if (!hv_ret)
1666                 sun4v_ncs_sethead_marker(p->qhandle, 0);
1667
1668         return hv_ret ? -EINVAL : 0;
1669 }
1670
1671 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1672 {
1673         int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1674         struct spu_qreg qr = { .queue = p, .type = q_type };
1675
1676         return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1677 }
1678
1679 static int spu_queue_setup(struct spu_queue *p)
1680 {
1681         int err;
1682
1683         p->q = new_queue(p->q_type);
1684         if (!p->q)
1685                 return -ENOMEM;
1686
1687         err = spu_queue_register(p, p->q_type);
1688         if (err) {
1689                 free_queue(p->q, p->q_type);
1690                 p->q = NULL;
1691         }
1692
1693         return err;
1694 }
1695
1696 static void spu_queue_destroy(struct spu_queue *p)
1697 {
1698         unsigned long hv_ret;
1699
1700         if (!p->q)
1701                 return;
1702
1703         hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1704
1705         if (!hv_ret)
1706                 free_queue(p->q, p->q_type);
1707 }
1708
1709 static void spu_list_destroy(struct list_head *list)
1710 {
1711         struct spu_queue *p, *n;
1712
1713         list_for_each_entry_safe(p, n, list, list) {
1714                 int i;
1715
1716                 for (i = 0; i < NR_CPUS; i++) {
1717                         if (cpu_to_cwq[i] == p)
1718                                 cpu_to_cwq[i] = NULL;
1719                 }
1720
1721                 if (p->irq) {
1722                         free_irq(p->irq, p);
1723                         p->irq = 0;
1724                 }
1725                 spu_queue_destroy(p);
1726                 list_del(&p->list);
1727                 kfree(p);
1728         }
1729 }
1730
1731 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1732  * gathering cpu membership information.
1733  */
1734 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1735                                struct platform_device *dev,
1736                                u64 node, struct spu_queue *p,
1737                                struct spu_queue **table)
1738 {
1739         u64 arc;
1740
1741         mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1742                 u64 tgt = mdesc_arc_target(mdesc, arc);
1743                 const char *name = mdesc_node_name(mdesc, tgt);
1744                 const u64 *id;
1745
1746                 if (strcmp(name, "cpu"))
1747                         continue;
1748                 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1749                 if (table[*id] != NULL) {
1750                         dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1751                                 dev->dev.of_node);
1752                         return -EINVAL;
1753                 }
1754                 cpumask_set_cpu(*id, &p->sharing);
1755                 table[*id] = p;
1756         }
1757         return 0;
1758 }
1759
1760 /* Process an 'exec-unit' MDESC node of type 'cwq'.  */
1761 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1762                             struct platform_device *dev, struct mdesc_handle *mdesc,
1763                             u64 node, const char *iname, unsigned long q_type,
1764                             irq_handler_t handler, struct spu_queue **table)
1765 {
1766         struct spu_queue *p;
1767         int err;
1768
1769         p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1770         if (!p) {
1771                 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1772                         dev->dev.of_node);
1773                 return -ENOMEM;
1774         }
1775
1776         cpumask_clear(&p->sharing);
1777         spin_lock_init(&p->lock);
1778         p->q_type = q_type;
1779         INIT_LIST_HEAD(&p->jobs);
1780         list_add(&p->list, list);
1781
1782         err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1783         if (err)
1784                 return err;
1785
1786         err = spu_queue_setup(p);
1787         if (err)
1788                 return err;
1789
1790         return spu_map_ino(dev, ip, iname, p, handler);
1791 }
1792
1793 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1794                           struct spu_mdesc_info *ip, struct list_head *list,
1795                           const char *exec_name, unsigned long q_type,
1796                           irq_handler_t handler, struct spu_queue **table)
1797 {
1798         int err = 0;
1799         u64 node;
1800
1801         mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1802                 const char *type;
1803
1804                 type = mdesc_get_property(mdesc, node, "type", NULL);
1805                 if (!type || strcmp(type, exec_name))
1806                         continue;
1807
1808                 err = handle_exec_unit(ip, list, dev, mdesc, node,
1809                                        exec_name, q_type, handler, table);
1810                 if (err) {
1811                         spu_list_destroy(list);
1812                         break;
1813                 }
1814         }
1815
1816         return err;
1817 }
1818
1819 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1820                          struct spu_mdesc_info *ip)
1821 {
1822         const u64 *ino;
1823         int ino_len;
1824         int i;
1825
1826         ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1827         if (!ino) {
1828                 printk("NO 'ino'\n");
1829                 return -ENODEV;
1830         }
1831
1832         ip->num_intrs = ino_len / sizeof(u64);
1833         ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1834                                  ip->num_intrs),
1835                                 GFP_KERNEL);
1836         if (!ip->ino_table)
1837                 return -ENOMEM;
1838
1839         for (i = 0; i < ip->num_intrs; i++) {
1840                 struct ino_blob *b = &ip->ino_table[i];
1841                 b->intr = i + 1;
1842                 b->ino = ino[i];
1843         }
1844
1845         return 0;
1846 }
1847
1848 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1849                                 struct platform_device *dev,
1850                                 struct spu_mdesc_info *ip,
1851                                 const char *node_name)
1852 {
1853         const unsigned int *reg;
1854         u64 node;
1855
1856         reg = of_get_property(dev->dev.of_node, "reg", NULL);
1857         if (!reg)
1858                 return -ENODEV;
1859
1860         mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1861                 const char *name;
1862                 const u64 *chdl;
1863
1864                 name = mdesc_get_property(mdesc, node, "name", NULL);
1865                 if (!name || strcmp(name, node_name))
1866                         continue;
1867                 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1868                 if (!chdl || (*chdl != *reg))
1869                         continue;
1870                 ip->cfg_handle = *chdl;
1871                 return get_irq_props(mdesc, node, ip);
1872         }
1873
1874         return -ENODEV;
1875 }
1876
1877 static unsigned long n2_spu_hvapi_major;
1878 static unsigned long n2_spu_hvapi_minor;
1879
1880 static int n2_spu_hvapi_register(void)
1881 {
1882         int err;
1883
1884         n2_spu_hvapi_major = 2;
1885         n2_spu_hvapi_minor = 0;
1886
1887         err = sun4v_hvapi_register(HV_GRP_NCS,
1888                                    n2_spu_hvapi_major,
1889                                    &n2_spu_hvapi_minor);
1890
1891         if (!err)
1892                 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1893                         n2_spu_hvapi_major,
1894                         n2_spu_hvapi_minor);
1895
1896         return err;
1897 }
1898
1899 static void n2_spu_hvapi_unregister(void)
1900 {
1901         sun4v_hvapi_unregister(HV_GRP_NCS);
1902 }
1903
1904 static int global_ref;
1905
1906 static int grab_global_resources(void)
1907 {
1908         int err = 0;
1909
1910         mutex_lock(&spu_lock);
1911
1912         if (global_ref++)
1913                 goto out;
1914
1915         err = n2_spu_hvapi_register();
1916         if (err)
1917                 goto out;
1918
1919         err = queue_cache_init();
1920         if (err)
1921                 goto out_hvapi_release;
1922
1923         err = -ENOMEM;
1924         cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1925                              GFP_KERNEL);
1926         if (!cpu_to_cwq)
1927                 goto out_queue_cache_destroy;
1928
1929         cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1930                              GFP_KERNEL);
1931         if (!cpu_to_mau)
1932                 goto out_free_cwq_table;
1933
1934         err = 0;
1935
1936 out:
1937         if (err)
1938                 global_ref--;
1939         mutex_unlock(&spu_lock);
1940         return err;
1941
1942 out_free_cwq_table:
1943         kfree(cpu_to_cwq);
1944         cpu_to_cwq = NULL;
1945
1946 out_queue_cache_destroy:
1947         queue_cache_destroy();
1948
1949 out_hvapi_release:
1950         n2_spu_hvapi_unregister();
1951         goto out;
1952 }
1953
1954 static void release_global_resources(void)
1955 {
1956         mutex_lock(&spu_lock);
1957         if (!--global_ref) {
1958                 kfree(cpu_to_cwq);
1959                 cpu_to_cwq = NULL;
1960
1961                 kfree(cpu_to_mau);
1962                 cpu_to_mau = NULL;
1963
1964                 queue_cache_destroy();
1965                 n2_spu_hvapi_unregister();
1966         }
1967         mutex_unlock(&spu_lock);
1968 }
1969
1970 static struct n2_crypto *alloc_n2cp(void)
1971 {
1972         struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1973
1974         if (np)
1975                 INIT_LIST_HEAD(&np->cwq_list);
1976
1977         return np;
1978 }
1979
1980 static void free_n2cp(struct n2_crypto *np)
1981 {
1982         kfree(np->cwq_info.ino_table);
1983         np->cwq_info.ino_table = NULL;
1984
1985         kfree(np);
1986 }
1987
1988 static void n2_spu_driver_version(void)
1989 {
1990         static int n2_spu_version_printed;
1991
1992         if (n2_spu_version_printed++ == 0)
1993                 pr_info("%s", version);
1994 }
1995
1996 static int n2_crypto_probe(struct platform_device *dev)
1997 {
1998         struct mdesc_handle *mdesc;
1999         struct n2_crypto *np;
2000         int err;
2001
2002         n2_spu_driver_version();
2003
2004         pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
2005
2006         np = alloc_n2cp();
2007         if (!np) {
2008                 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
2009                         dev->dev.of_node);
2010                 return -ENOMEM;
2011         }
2012
2013         err = grab_global_resources();
2014         if (err) {
2015                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2016                         dev->dev.of_node);
2017                 goto out_free_n2cp;
2018         }
2019
2020         mdesc = mdesc_grab();
2021
2022         if (!mdesc) {
2023                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2024                         dev->dev.of_node);
2025                 err = -ENODEV;
2026                 goto out_free_global;
2027         }
2028         err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2029         if (err) {
2030                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2031                         dev->dev.of_node);
2032                 mdesc_release(mdesc);
2033                 goto out_free_global;
2034         }
2035
2036         err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2037                              "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2038                              cpu_to_cwq);
2039         mdesc_release(mdesc);
2040
2041         if (err) {
2042                 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
2043                         dev->dev.of_node);
2044                 goto out_free_global;
2045         }
2046
2047         err = n2_register_algs();
2048         if (err) {
2049                 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
2050                         dev->dev.of_node);
2051                 goto out_free_spu_list;
2052         }
2053
2054         dev_set_drvdata(&dev->dev, np);
2055
2056         return 0;
2057
2058 out_free_spu_list:
2059         spu_list_destroy(&np->cwq_list);
2060
2061 out_free_global:
2062         release_global_resources();
2063
2064 out_free_n2cp:
2065         free_n2cp(np);
2066
2067         return err;
2068 }
2069
2070 static int n2_crypto_remove(struct platform_device *dev)
2071 {
2072         struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2073
2074         n2_unregister_algs();
2075
2076         spu_list_destroy(&np->cwq_list);
2077
2078         release_global_resources();
2079
2080         free_n2cp(np);
2081
2082         return 0;
2083 }
2084
2085 static struct n2_mau *alloc_ncp(void)
2086 {
2087         struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2088
2089         if (mp)
2090                 INIT_LIST_HEAD(&mp->mau_list);
2091
2092         return mp;
2093 }
2094
2095 static void free_ncp(struct n2_mau *mp)
2096 {
2097         kfree(mp->mau_info.ino_table);
2098         mp->mau_info.ino_table = NULL;
2099
2100         kfree(mp);
2101 }
2102
2103 static int n2_mau_probe(struct platform_device *dev)
2104 {
2105         struct mdesc_handle *mdesc;
2106         struct n2_mau *mp;
2107         int err;
2108
2109         n2_spu_driver_version();
2110
2111         pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2112
2113         mp = alloc_ncp();
2114         if (!mp) {
2115                 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2116                         dev->dev.of_node);
2117                 return -ENOMEM;
2118         }
2119
2120         err = grab_global_resources();
2121         if (err) {
2122                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2123                         dev->dev.of_node);
2124                 goto out_free_ncp;
2125         }
2126
2127         mdesc = mdesc_grab();
2128
2129         if (!mdesc) {
2130                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2131                         dev->dev.of_node);
2132                 err = -ENODEV;
2133                 goto out_free_global;
2134         }
2135
2136         err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2137         if (err) {
2138                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2139                         dev->dev.of_node);
2140                 mdesc_release(mdesc);
2141                 goto out_free_global;
2142         }
2143
2144         err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2145                              "mau", HV_NCS_QTYPE_MAU, mau_intr,
2146                              cpu_to_mau);
2147         mdesc_release(mdesc);
2148
2149         if (err) {
2150                 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2151                         dev->dev.of_node);
2152                 goto out_free_global;
2153         }
2154
2155         dev_set_drvdata(&dev->dev, mp);
2156
2157         return 0;
2158
2159 out_free_global:
2160         release_global_resources();
2161
2162 out_free_ncp:
2163         free_ncp(mp);
2164
2165         return err;
2166 }
2167
2168 static int n2_mau_remove(struct platform_device *dev)
2169 {
2170         struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2171
2172         spu_list_destroy(&mp->mau_list);
2173
2174         release_global_resources();
2175
2176         free_ncp(mp);
2177
2178         return 0;
2179 }
2180
2181 static const struct of_device_id n2_crypto_match[] = {
2182         {
2183                 .name = "n2cp",
2184                 .compatible = "SUNW,n2-cwq",
2185         },
2186         {
2187                 .name = "n2cp",
2188                 .compatible = "SUNW,vf-cwq",
2189         },
2190         {
2191                 .name = "n2cp",
2192                 .compatible = "SUNW,kt-cwq",
2193         },
2194         {},
2195 };
2196
2197 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2198
2199 static struct platform_driver n2_crypto_driver = {
2200         .driver = {
2201                 .name           =       "n2cp",
2202                 .of_match_table =       n2_crypto_match,
2203         },
2204         .probe          =       n2_crypto_probe,
2205         .remove         =       n2_crypto_remove,
2206 };
2207
2208 static const struct of_device_id n2_mau_match[] = {
2209         {
2210                 .name = "ncp",
2211                 .compatible = "SUNW,n2-mau",
2212         },
2213         {
2214                 .name = "ncp",
2215                 .compatible = "SUNW,vf-mau",
2216         },
2217         {
2218                 .name = "ncp",
2219                 .compatible = "SUNW,kt-mau",
2220         },
2221         {},
2222 };
2223
2224 MODULE_DEVICE_TABLE(of, n2_mau_match);
2225
2226 static struct platform_driver n2_mau_driver = {
2227         .driver = {
2228                 .name           =       "ncp",
2229                 .of_match_table =       n2_mau_match,
2230         },
2231         .probe          =       n2_mau_probe,
2232         .remove         =       n2_mau_remove,
2233 };
2234
2235 static struct platform_driver * const drivers[] = {
2236         &n2_crypto_driver,
2237         &n2_mau_driver,
2238 };
2239
2240 static int __init n2_init(void)
2241 {
2242         return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2243 }
2244
2245 static void __exit n2_exit(void)
2246 {
2247         platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2248 }
2249
2250 module_init(n2_init);
2251 module_exit(n2_exit);
This page took 0.168542 seconds and 4 git commands to generate.