]> Git Repo - linux.git/blob - drivers/crypto/n2_core.c
ACPI: CPPC: Adjust debug messages in amd_set_max_freq_ratio() to warn
[linux.git] / drivers / crypto / n2_core.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
3  *
4  * Copyright (C) 2010, 2011 David S. Miller <[email protected]>
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/of.h>
12 #include <linux/of_address.h>
13 #include <linux/platform_device.h>
14 #include <linux/cpumask.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/crypto.h>
18 #include <crypto/md5.h>
19 #include <crypto/sha1.h>
20 #include <crypto/sha2.h>
21 #include <crypto/aes.h>
22 #include <crypto/internal/des.h>
23 #include <linux/mutex.h>
24 #include <linux/delay.h>
25 #include <linux/sched.h>
26
27 #include <crypto/internal/hash.h>
28 #include <crypto/internal/skcipher.h>
29 #include <crypto/scatterwalk.h>
30 #include <crypto/algapi.h>
31
32 #include <asm/hypervisor.h>
33 #include <asm/mdesc.h>
34
35 #include "n2_core.h"
36
37 #define DRV_MODULE_NAME         "n2_crypto"
38 #define DRV_MODULE_VERSION      "0.2"
39 #define DRV_MODULE_RELDATE      "July 28, 2011"
40
41 static const char version[] =
42         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
43
44 MODULE_AUTHOR("David S. Miller <[email protected]>");
45 MODULE_DESCRIPTION("Niagara2 Crypto driver");
46 MODULE_LICENSE("GPL");
47 MODULE_VERSION(DRV_MODULE_VERSION);
48
49 #define N2_CRA_PRIORITY         200
50
51 static DEFINE_MUTEX(spu_lock);
52
53 struct spu_queue {
54         cpumask_t               sharing;
55         unsigned long           qhandle;
56
57         spinlock_t              lock;
58         u8                      q_type;
59         void                    *q;
60         unsigned long           head;
61         unsigned long           tail;
62         struct list_head        jobs;
63
64         unsigned long           devino;
65
66         char                    irq_name[32];
67         unsigned int            irq;
68
69         struct list_head        list;
70 };
71
72 struct spu_qreg {
73         struct spu_queue        *queue;
74         unsigned long           type;
75 };
76
77 static struct spu_queue **cpu_to_cwq;
78 static struct spu_queue **cpu_to_mau;
79
80 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
81 {
82         if (q->q_type == HV_NCS_QTYPE_MAU) {
83                 off += MAU_ENTRY_SIZE;
84                 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
85                         off = 0;
86         } else {
87                 off += CWQ_ENTRY_SIZE;
88                 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
89                         off = 0;
90         }
91         return off;
92 }
93
94 struct n2_request_common {
95         struct list_head        entry;
96         unsigned int            offset;
97 };
98 #define OFFSET_NOT_RUNNING      (~(unsigned int)0)
99
100 /* An async job request records the final tail value it used in
101  * n2_request_common->offset, test to see if that offset is in
102  * the range old_head, new_head, inclusive.
103  */
104 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
105                                 unsigned long old_head, unsigned long new_head)
106 {
107         if (old_head <= new_head) {
108                 if (offset > old_head && offset <= new_head)
109                         return true;
110         } else {
111                 if (offset > old_head || offset <= new_head)
112                         return true;
113         }
114         return false;
115 }
116
117 /* When the HEAD marker is unequal to the actual HEAD, we get
118  * a virtual device INO interrupt.  We should process the
119  * completed CWQ entries and adjust the HEAD marker to clear
120  * the IRQ.
121  */
122 static irqreturn_t cwq_intr(int irq, void *dev_id)
123 {
124         unsigned long off, new_head, hv_ret;
125         struct spu_queue *q = dev_id;
126
127         pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
128                smp_processor_id(), q->qhandle);
129
130         spin_lock(&q->lock);
131
132         hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
133
134         pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
135                smp_processor_id(), new_head, hv_ret);
136
137         for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
138                 /* XXX ... XXX */
139         }
140
141         hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
142         if (hv_ret == HV_EOK)
143                 q->head = new_head;
144
145         spin_unlock(&q->lock);
146
147         return IRQ_HANDLED;
148 }
149
150 static irqreturn_t mau_intr(int irq, void *dev_id)
151 {
152         struct spu_queue *q = dev_id;
153         unsigned long head, hv_ret;
154
155         spin_lock(&q->lock);
156
157         pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
158                smp_processor_id(), q->qhandle);
159
160         hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
161
162         pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
163                smp_processor_id(), head, hv_ret);
164
165         sun4v_ncs_sethead_marker(q->qhandle, head);
166
167         spin_unlock(&q->lock);
168
169         return IRQ_HANDLED;
170 }
171
172 static void *spu_queue_next(struct spu_queue *q, void *cur)
173 {
174         return q->q + spu_next_offset(q, cur - q->q);
175 }
176
177 static int spu_queue_num_free(struct spu_queue *q)
178 {
179         unsigned long head = q->head;
180         unsigned long tail = q->tail;
181         unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
182         unsigned long diff;
183
184         if (head > tail)
185                 diff = head - tail;
186         else
187                 diff = (end - tail) + head;
188
189         return (diff / CWQ_ENTRY_SIZE) - 1;
190 }
191
192 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
193 {
194         int avail = spu_queue_num_free(q);
195
196         if (avail >= num_entries)
197                 return q->q + q->tail;
198
199         return NULL;
200 }
201
202 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
203 {
204         unsigned long hv_ret, new_tail;
205
206         new_tail = spu_next_offset(q, last - q->q);
207
208         hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
209         if (hv_ret == HV_EOK)
210                 q->tail = new_tail;
211         return hv_ret;
212 }
213
214 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
215                              int enc_type, int auth_type,
216                              unsigned int hash_len,
217                              bool sfas, bool sob, bool eob, bool encrypt,
218                              int opcode)
219 {
220         u64 word = (len - 1) & CONTROL_LEN;
221
222         word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
223         word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
224         word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
225         if (sfas)
226                 word |= CONTROL_STORE_FINAL_AUTH_STATE;
227         if (sob)
228                 word |= CONTROL_START_OF_BLOCK;
229         if (eob)
230                 word |= CONTROL_END_OF_BLOCK;
231         if (encrypt)
232                 word |= CONTROL_ENCRYPT;
233         if (hmac_key_len)
234                 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
235         if (hash_len)
236                 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
237
238         return word;
239 }
240
241 #if 0
242 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
243 {
244         if (this_len >= 64 ||
245             qp->head != qp->tail)
246                 return true;
247         return false;
248 }
249 #endif
250
251 struct n2_ahash_alg {
252         struct list_head        entry;
253         const u8                *hash_zero;
254         const u8                *hash_init;
255         u8                      hw_op_hashsz;
256         u8                      digest_size;
257         u8                      auth_type;
258         u8                      hmac_type;
259         struct ahash_alg        alg;
260 };
261
262 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
263 {
264         struct crypto_alg *alg = tfm->__crt_alg;
265         struct ahash_alg *ahash_alg;
266
267         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
268
269         return container_of(ahash_alg, struct n2_ahash_alg, alg);
270 }
271
272 struct n2_hmac_alg {
273         const char              *child_alg;
274         struct n2_ahash_alg     derived;
275 };
276
277 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
278 {
279         struct crypto_alg *alg = tfm->__crt_alg;
280         struct ahash_alg *ahash_alg;
281
282         ahash_alg = container_of(alg, struct ahash_alg, halg.base);
283
284         return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
285 }
286
287 struct n2_hash_ctx {
288         struct crypto_ahash             *fallback_tfm;
289 };
290
291 #define N2_HASH_KEY_MAX                 32 /* HW limit for all HMAC requests */
292
293 struct n2_hmac_ctx {
294         struct n2_hash_ctx              base;
295
296         struct crypto_shash             *child_shash;
297
298         int                             hash_key_len;
299         unsigned char                   hash_key[N2_HASH_KEY_MAX];
300 };
301
302 struct n2_hash_req_ctx {
303         union {
304                 struct md5_state        md5;
305                 struct sha1_state       sha1;
306                 struct sha256_state     sha256;
307         } u;
308
309         struct ahash_request            fallback_req;
310 };
311
312 static int n2_hash_async_init(struct ahash_request *req)
313 {
314         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
315         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
316         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
317
318         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
319         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
320
321         return crypto_ahash_init(&rctx->fallback_req);
322 }
323
324 static int n2_hash_async_update(struct ahash_request *req)
325 {
326         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
327         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
328         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
329
330         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
331         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
332         rctx->fallback_req.nbytes = req->nbytes;
333         rctx->fallback_req.src = req->src;
334
335         return crypto_ahash_update(&rctx->fallback_req);
336 }
337
338 static int n2_hash_async_final(struct ahash_request *req)
339 {
340         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
341         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
342         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
343
344         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
345         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
346         rctx->fallback_req.result = req->result;
347
348         return crypto_ahash_final(&rctx->fallback_req);
349 }
350
351 static int n2_hash_async_finup(struct ahash_request *req)
352 {
353         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
354         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
355         struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
356
357         ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
358         rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
359         rctx->fallback_req.nbytes = req->nbytes;
360         rctx->fallback_req.src = req->src;
361         rctx->fallback_req.result = req->result;
362
363         return crypto_ahash_finup(&rctx->fallback_req);
364 }
365
366 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
367 {
368         return -ENOSYS;
369 }
370
371 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
372 {
373         return -ENOSYS;
374 }
375
376 static int n2_hash_cra_init(struct crypto_tfm *tfm)
377 {
378         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
379         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
380         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
381         struct crypto_ahash *fallback_tfm;
382         int err;
383
384         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
385                                           CRYPTO_ALG_NEED_FALLBACK);
386         if (IS_ERR(fallback_tfm)) {
387                 pr_warn("Fallback driver '%s' could not be loaded!\n",
388                         fallback_driver_name);
389                 err = PTR_ERR(fallback_tfm);
390                 goto out;
391         }
392
393         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
394                                          crypto_ahash_reqsize(fallback_tfm)));
395
396         ctx->fallback_tfm = fallback_tfm;
397         return 0;
398
399 out:
400         return err;
401 }
402
403 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
404 {
405         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
406         struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
407
408         crypto_free_ahash(ctx->fallback_tfm);
409 }
410
411 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
412 {
413         const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
414         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
415         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
416         struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
417         struct crypto_ahash *fallback_tfm;
418         struct crypto_shash *child_shash;
419         int err;
420
421         fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
422                                           CRYPTO_ALG_NEED_FALLBACK);
423         if (IS_ERR(fallback_tfm)) {
424                 pr_warn("Fallback driver '%s' could not be loaded!\n",
425                         fallback_driver_name);
426                 err = PTR_ERR(fallback_tfm);
427                 goto out;
428         }
429
430         child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
431         if (IS_ERR(child_shash)) {
432                 pr_warn("Child shash '%s' could not be loaded!\n",
433                         n2alg->child_alg);
434                 err = PTR_ERR(child_shash);
435                 goto out_free_fallback;
436         }
437
438         crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
439                                          crypto_ahash_reqsize(fallback_tfm)));
440
441         ctx->child_shash = child_shash;
442         ctx->base.fallback_tfm = fallback_tfm;
443         return 0;
444
445 out_free_fallback:
446         crypto_free_ahash(fallback_tfm);
447
448 out:
449         return err;
450 }
451
452 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
453 {
454         struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
455         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
456
457         crypto_free_ahash(ctx->base.fallback_tfm);
458         crypto_free_shash(ctx->child_shash);
459 }
460
461 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
462                                 unsigned int keylen)
463 {
464         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
465         struct crypto_shash *child_shash = ctx->child_shash;
466         struct crypto_ahash *fallback_tfm;
467         int err, bs, ds;
468
469         fallback_tfm = ctx->base.fallback_tfm;
470         err = crypto_ahash_setkey(fallback_tfm, key, keylen);
471         if (err)
472                 return err;
473
474         bs = crypto_shash_blocksize(child_shash);
475         ds = crypto_shash_digestsize(child_shash);
476         BUG_ON(ds > N2_HASH_KEY_MAX);
477         if (keylen > bs) {
478                 err = crypto_shash_tfm_digest(child_shash, key, keylen,
479                                               ctx->hash_key);
480                 if (err)
481                         return err;
482                 keylen = ds;
483         } else if (keylen <= N2_HASH_KEY_MAX)
484                 memcpy(ctx->hash_key, key, keylen);
485
486         ctx->hash_key_len = keylen;
487
488         return err;
489 }
490
491 static unsigned long wait_for_tail(struct spu_queue *qp)
492 {
493         unsigned long head, hv_ret;
494
495         do {
496                 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
497                 if (hv_ret != HV_EOK) {
498                         pr_err("Hypervisor error on gethead\n");
499                         break;
500                 }
501                 if (head == qp->tail) {
502                         qp->head = head;
503                         break;
504                 }
505         } while (1);
506         return hv_ret;
507 }
508
509 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
510                                               struct cwq_initial_entry *ent)
511 {
512         unsigned long hv_ret = spu_queue_submit(qp, ent);
513
514         if (hv_ret == HV_EOK)
515                 hv_ret = wait_for_tail(qp);
516
517         return hv_ret;
518 }
519
520 static int n2_do_async_digest(struct ahash_request *req,
521                               unsigned int auth_type, unsigned int digest_size,
522                               unsigned int result_size, void *hash_loc,
523                               unsigned long auth_key, unsigned int auth_key_len)
524 {
525         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
526         struct cwq_initial_entry *ent;
527         struct crypto_hash_walk walk;
528         struct spu_queue *qp;
529         unsigned long flags;
530         int err = -ENODEV;
531         int nbytes, cpu;
532
533         /* The total effective length of the operation may not
534          * exceed 2^16.
535          */
536         if (unlikely(req->nbytes > (1 << 16))) {
537                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
538                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
539
540                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
541                 rctx->fallback_req.base.flags =
542                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
543                 rctx->fallback_req.nbytes = req->nbytes;
544                 rctx->fallback_req.src = req->src;
545                 rctx->fallback_req.result = req->result;
546
547                 return crypto_ahash_digest(&rctx->fallback_req);
548         }
549
550         nbytes = crypto_hash_walk_first(req, &walk);
551
552         cpu = get_cpu();
553         qp = cpu_to_cwq[cpu];
554         if (!qp)
555                 goto out;
556
557         spin_lock_irqsave(&qp->lock, flags);
558
559         /* XXX can do better, improve this later by doing a by-hand scatterlist
560          * XXX walk, etc.
561          */
562         ent = qp->q + qp->tail;
563
564         ent->control = control_word_base(nbytes, auth_key_len, 0,
565                                          auth_type, digest_size,
566                                          false, true, false, false,
567                                          OPCODE_INPLACE_BIT |
568                                          OPCODE_AUTH_MAC);
569         ent->src_addr = __pa(walk.data);
570         ent->auth_key_addr = auth_key;
571         ent->auth_iv_addr = __pa(hash_loc);
572         ent->final_auth_state_addr = 0UL;
573         ent->enc_key_addr = 0UL;
574         ent->enc_iv_addr = 0UL;
575         ent->dest_addr = __pa(hash_loc);
576
577         nbytes = crypto_hash_walk_done(&walk, 0);
578         while (nbytes > 0) {
579                 ent = spu_queue_next(qp, ent);
580
581                 ent->control = (nbytes - 1);
582                 ent->src_addr = __pa(walk.data);
583                 ent->auth_key_addr = 0UL;
584                 ent->auth_iv_addr = 0UL;
585                 ent->final_auth_state_addr = 0UL;
586                 ent->enc_key_addr = 0UL;
587                 ent->enc_iv_addr = 0UL;
588                 ent->dest_addr = 0UL;
589
590                 nbytes = crypto_hash_walk_done(&walk, 0);
591         }
592         ent->control |= CONTROL_END_OF_BLOCK;
593
594         if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
595                 err = -EINVAL;
596         else
597                 err = 0;
598
599         spin_unlock_irqrestore(&qp->lock, flags);
600
601         if (!err)
602                 memcpy(req->result, hash_loc, result_size);
603 out:
604         put_cpu();
605
606         return err;
607 }
608
609 static int n2_hash_async_digest(struct ahash_request *req)
610 {
611         struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
612         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
613         int ds;
614
615         ds = n2alg->digest_size;
616         if (unlikely(req->nbytes == 0)) {
617                 memcpy(req->result, n2alg->hash_zero, ds);
618                 return 0;
619         }
620         memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
621
622         return n2_do_async_digest(req, n2alg->auth_type,
623                                   n2alg->hw_op_hashsz, ds,
624                                   &rctx->u, 0UL, 0);
625 }
626
627 static int n2_hmac_async_digest(struct ahash_request *req)
628 {
629         struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
630         struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
631         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
632         struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
633         int ds;
634
635         ds = n2alg->derived.digest_size;
636         if (unlikely(req->nbytes == 0) ||
637             unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
638                 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
639                 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
640
641                 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
642                 rctx->fallback_req.base.flags =
643                         req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
644                 rctx->fallback_req.nbytes = req->nbytes;
645                 rctx->fallback_req.src = req->src;
646                 rctx->fallback_req.result = req->result;
647
648                 return crypto_ahash_digest(&rctx->fallback_req);
649         }
650         memcpy(&rctx->u, n2alg->derived.hash_init,
651                n2alg->derived.hw_op_hashsz);
652
653         return n2_do_async_digest(req, n2alg->derived.hmac_type,
654                                   n2alg->derived.hw_op_hashsz, ds,
655                                   &rctx->u,
656                                   __pa(&ctx->hash_key),
657                                   ctx->hash_key_len);
658 }
659
660 struct n2_skcipher_context {
661         int                     key_len;
662         int                     enc_type;
663         union {
664                 u8              aes[AES_MAX_KEY_SIZE];
665                 u8              des[DES_KEY_SIZE];
666                 u8              des3[3 * DES_KEY_SIZE];
667         } key;
668 };
669
670 #define N2_CHUNK_ARR_LEN        16
671
672 struct n2_crypto_chunk {
673         struct list_head        entry;
674         unsigned long           iv_paddr : 44;
675         unsigned long           arr_len : 20;
676         unsigned long           dest_paddr;
677         unsigned long           dest_final;
678         struct {
679                 unsigned long   src_paddr : 44;
680                 unsigned long   src_len : 20;
681         } arr[N2_CHUNK_ARR_LEN];
682 };
683
684 struct n2_request_context {
685         struct skcipher_walk    walk;
686         struct list_head        chunk_list;
687         struct n2_crypto_chunk  chunk;
688         u8                      temp_iv[16];
689 };
690
691 /* The SPU allows some level of flexibility for partial cipher blocks
692  * being specified in a descriptor.
693  *
694  * It merely requires that every descriptor's length field is at least
695  * as large as the cipher block size.  This means that a cipher block
696  * can span at most 2 descriptors.  However, this does not allow a
697  * partial block to span into the final descriptor as that would
698  * violate the rule (since every descriptor's length must be at lest
699  * the block size).  So, for example, assuming an 8 byte block size:
700  *
701  *      0xe --> 0xa --> 0x8
702  *
703  * is a valid length sequence, whereas:
704  *
705  *      0xe --> 0xb --> 0x7
706  *
707  * is not a valid sequence.
708  */
709
710 struct n2_skcipher_alg {
711         struct list_head        entry;
712         u8                      enc_type;
713         struct skcipher_alg     skcipher;
714 };
715
716 static inline struct n2_skcipher_alg *n2_skcipher_alg(struct crypto_skcipher *tfm)
717 {
718         struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
719
720         return container_of(alg, struct n2_skcipher_alg, skcipher);
721 }
722
723 static int n2_aes_setkey(struct crypto_skcipher *skcipher, const u8 *key,
724                          unsigned int keylen)
725 {
726         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
727         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
728         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
729
730         ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
731
732         switch (keylen) {
733         case AES_KEYSIZE_128:
734                 ctx->enc_type |= ENC_TYPE_ALG_AES128;
735                 break;
736         case AES_KEYSIZE_192:
737                 ctx->enc_type |= ENC_TYPE_ALG_AES192;
738                 break;
739         case AES_KEYSIZE_256:
740                 ctx->enc_type |= ENC_TYPE_ALG_AES256;
741                 break;
742         default:
743                 return -EINVAL;
744         }
745
746         ctx->key_len = keylen;
747         memcpy(ctx->key.aes, key, keylen);
748         return 0;
749 }
750
751 static int n2_des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
752                          unsigned int keylen)
753 {
754         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
755         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
756         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
757         int err;
758
759         err = verify_skcipher_des_key(skcipher, key);
760         if (err)
761                 return err;
762
763         ctx->enc_type = n2alg->enc_type;
764
765         ctx->key_len = keylen;
766         memcpy(ctx->key.des, key, keylen);
767         return 0;
768 }
769
770 static int n2_3des_setkey(struct crypto_skcipher *skcipher, const u8 *key,
771                           unsigned int keylen)
772 {
773         struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
774         struct n2_skcipher_context *ctx = crypto_tfm_ctx(tfm);
775         struct n2_skcipher_alg *n2alg = n2_skcipher_alg(skcipher);
776         int err;
777
778         err = verify_skcipher_des3_key(skcipher, key);
779         if (err)
780                 return err;
781
782         ctx->enc_type = n2alg->enc_type;
783
784         ctx->key_len = keylen;
785         memcpy(ctx->key.des3, key, keylen);
786         return 0;
787 }
788
789 static inline int skcipher_descriptor_len(int nbytes, unsigned int block_size)
790 {
791         int this_len = nbytes;
792
793         this_len -= (nbytes & (block_size - 1));
794         return this_len > (1 << 16) ? (1 << 16) : this_len;
795 }
796
797 static int __n2_crypt_chunk(struct crypto_skcipher *skcipher,
798                             struct n2_crypto_chunk *cp,
799                             struct spu_queue *qp, bool encrypt)
800 {
801         struct n2_skcipher_context *ctx = crypto_skcipher_ctx(skcipher);
802         struct cwq_initial_entry *ent;
803         bool in_place;
804         int i;
805
806         ent = spu_queue_alloc(qp, cp->arr_len);
807         if (!ent) {
808                 pr_info("queue_alloc() of %d fails\n",
809                         cp->arr_len);
810                 return -EBUSY;
811         }
812
813         in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
814
815         ent->control = control_word_base(cp->arr[0].src_len,
816                                          0, ctx->enc_type, 0, 0,
817                                          false, true, false, encrypt,
818                                          OPCODE_ENCRYPT |
819                                          (in_place ? OPCODE_INPLACE_BIT : 0));
820         ent->src_addr = cp->arr[0].src_paddr;
821         ent->auth_key_addr = 0UL;
822         ent->auth_iv_addr = 0UL;
823         ent->final_auth_state_addr = 0UL;
824         ent->enc_key_addr = __pa(&ctx->key);
825         ent->enc_iv_addr = cp->iv_paddr;
826         ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
827
828         for (i = 1; i < cp->arr_len; i++) {
829                 ent = spu_queue_next(qp, ent);
830
831                 ent->control = cp->arr[i].src_len - 1;
832                 ent->src_addr = cp->arr[i].src_paddr;
833                 ent->auth_key_addr = 0UL;
834                 ent->auth_iv_addr = 0UL;
835                 ent->final_auth_state_addr = 0UL;
836                 ent->enc_key_addr = 0UL;
837                 ent->enc_iv_addr = 0UL;
838                 ent->dest_addr = 0UL;
839         }
840         ent->control |= CONTROL_END_OF_BLOCK;
841
842         return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
843 }
844
845 static int n2_compute_chunks(struct skcipher_request *req)
846 {
847         struct n2_request_context *rctx = skcipher_request_ctx(req);
848         struct skcipher_walk *walk = &rctx->walk;
849         struct n2_crypto_chunk *chunk;
850         unsigned long dest_prev;
851         unsigned int tot_len;
852         bool prev_in_place;
853         int err, nbytes;
854
855         err = skcipher_walk_async(walk, req);
856         if (err)
857                 return err;
858
859         INIT_LIST_HEAD(&rctx->chunk_list);
860
861         chunk = &rctx->chunk;
862         INIT_LIST_HEAD(&chunk->entry);
863
864         chunk->iv_paddr = 0UL;
865         chunk->arr_len = 0;
866         chunk->dest_paddr = 0UL;
867
868         prev_in_place = false;
869         dest_prev = ~0UL;
870         tot_len = 0;
871
872         while ((nbytes = walk->nbytes) != 0) {
873                 unsigned long dest_paddr, src_paddr;
874                 bool in_place;
875                 int this_len;
876
877                 src_paddr = (page_to_phys(walk->src.phys.page) +
878                              walk->src.phys.offset);
879                 dest_paddr = (page_to_phys(walk->dst.phys.page) +
880                               walk->dst.phys.offset);
881                 in_place = (src_paddr == dest_paddr);
882                 this_len = skcipher_descriptor_len(nbytes, walk->blocksize);
883
884                 if (chunk->arr_len != 0) {
885                         if (in_place != prev_in_place ||
886                             (!prev_in_place &&
887                              dest_paddr != dest_prev) ||
888                             chunk->arr_len == N2_CHUNK_ARR_LEN ||
889                             tot_len + this_len > (1 << 16)) {
890                                 chunk->dest_final = dest_prev;
891                                 list_add_tail(&chunk->entry,
892                                               &rctx->chunk_list);
893                                 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
894                                 if (!chunk) {
895                                         err = -ENOMEM;
896                                         break;
897                                 }
898                                 INIT_LIST_HEAD(&chunk->entry);
899                         }
900                 }
901                 if (chunk->arr_len == 0) {
902                         chunk->dest_paddr = dest_paddr;
903                         tot_len = 0;
904                 }
905                 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
906                 chunk->arr[chunk->arr_len].src_len = this_len;
907                 chunk->arr_len++;
908
909                 dest_prev = dest_paddr + this_len;
910                 prev_in_place = in_place;
911                 tot_len += this_len;
912
913                 err = skcipher_walk_done(walk, nbytes - this_len);
914                 if (err)
915                         break;
916         }
917         if (!err && chunk->arr_len != 0) {
918                 chunk->dest_final = dest_prev;
919                 list_add_tail(&chunk->entry, &rctx->chunk_list);
920         }
921
922         return err;
923 }
924
925 static void n2_chunk_complete(struct skcipher_request *req, void *final_iv)
926 {
927         struct n2_request_context *rctx = skcipher_request_ctx(req);
928         struct n2_crypto_chunk *c, *tmp;
929
930         if (final_iv)
931                 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
932
933         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
934                 list_del(&c->entry);
935                 if (unlikely(c != &rctx->chunk))
936                         kfree(c);
937         }
938
939 }
940
941 static int n2_do_ecb(struct skcipher_request *req, bool encrypt)
942 {
943         struct n2_request_context *rctx = skcipher_request_ctx(req);
944         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
945         int err = n2_compute_chunks(req);
946         struct n2_crypto_chunk *c, *tmp;
947         unsigned long flags, hv_ret;
948         struct spu_queue *qp;
949
950         if (err)
951                 return err;
952
953         qp = cpu_to_cwq[get_cpu()];
954         err = -ENODEV;
955         if (!qp)
956                 goto out;
957
958         spin_lock_irqsave(&qp->lock, flags);
959
960         list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
961                 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
962                 if (err)
963                         break;
964                 list_del(&c->entry);
965                 if (unlikely(c != &rctx->chunk))
966                         kfree(c);
967         }
968         if (!err) {
969                 hv_ret = wait_for_tail(qp);
970                 if (hv_ret != HV_EOK)
971                         err = -EINVAL;
972         }
973
974         spin_unlock_irqrestore(&qp->lock, flags);
975
976 out:
977         put_cpu();
978
979         n2_chunk_complete(req, NULL);
980         return err;
981 }
982
983 static int n2_encrypt_ecb(struct skcipher_request *req)
984 {
985         return n2_do_ecb(req, true);
986 }
987
988 static int n2_decrypt_ecb(struct skcipher_request *req)
989 {
990         return n2_do_ecb(req, false);
991 }
992
993 static int n2_do_chaining(struct skcipher_request *req, bool encrypt)
994 {
995         struct n2_request_context *rctx = skcipher_request_ctx(req);
996         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
997         unsigned long flags, hv_ret, iv_paddr;
998         int err = n2_compute_chunks(req);
999         struct n2_crypto_chunk *c, *tmp;
1000         struct spu_queue *qp;
1001         void *final_iv_addr;
1002
1003         final_iv_addr = NULL;
1004
1005         if (err)
1006                 return err;
1007
1008         qp = cpu_to_cwq[get_cpu()];
1009         err = -ENODEV;
1010         if (!qp)
1011                 goto out;
1012
1013         spin_lock_irqsave(&qp->lock, flags);
1014
1015         if (encrypt) {
1016                 iv_paddr = __pa(rctx->walk.iv);
1017                 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1018                                          entry) {
1019                         c->iv_paddr = iv_paddr;
1020                         err = __n2_crypt_chunk(tfm, c, qp, true);
1021                         if (err)
1022                                 break;
1023                         iv_paddr = c->dest_final - rctx->walk.blocksize;
1024                         list_del(&c->entry);
1025                         if (unlikely(c != &rctx->chunk))
1026                                 kfree(c);
1027                 }
1028                 final_iv_addr = __va(iv_paddr);
1029         } else {
1030                 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1031                                                  entry) {
1032                         if (c == &rctx->chunk) {
1033                                 iv_paddr = __pa(rctx->walk.iv);
1034                         } else {
1035                                 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1036                                             tmp->arr[tmp->arr_len-1].src_len -
1037                                             rctx->walk.blocksize);
1038                         }
1039                         if (!final_iv_addr) {
1040                                 unsigned long pa;
1041
1042                                 pa = (c->arr[c->arr_len-1].src_paddr +
1043                                       c->arr[c->arr_len-1].src_len -
1044                                       rctx->walk.blocksize);
1045                                 final_iv_addr = rctx->temp_iv;
1046                                 memcpy(rctx->temp_iv, __va(pa),
1047                                        rctx->walk.blocksize);
1048                         }
1049                         c->iv_paddr = iv_paddr;
1050                         err = __n2_crypt_chunk(tfm, c, qp, false);
1051                         if (err)
1052                                 break;
1053                         list_del(&c->entry);
1054                         if (unlikely(c != &rctx->chunk))
1055                                 kfree(c);
1056                 }
1057         }
1058         if (!err) {
1059                 hv_ret = wait_for_tail(qp);
1060                 if (hv_ret != HV_EOK)
1061                         err = -EINVAL;
1062         }
1063
1064         spin_unlock_irqrestore(&qp->lock, flags);
1065
1066 out:
1067         put_cpu();
1068
1069         n2_chunk_complete(req, err ? NULL : final_iv_addr);
1070         return err;
1071 }
1072
1073 static int n2_encrypt_chaining(struct skcipher_request *req)
1074 {
1075         return n2_do_chaining(req, true);
1076 }
1077
1078 static int n2_decrypt_chaining(struct skcipher_request *req)
1079 {
1080         return n2_do_chaining(req, false);
1081 }
1082
1083 struct n2_skcipher_tmpl {
1084         const char              *name;
1085         const char              *drv_name;
1086         u8                      block_size;
1087         u8                      enc_type;
1088         struct skcipher_alg     skcipher;
1089 };
1090
1091 static const struct n2_skcipher_tmpl skcipher_tmpls[] = {
1092         /* DES: ECB CBC and CFB are supported */
1093         {       .name           = "ecb(des)",
1094                 .drv_name       = "ecb-des",
1095                 .block_size     = DES_BLOCK_SIZE,
1096                 .enc_type       = (ENC_TYPE_ALG_DES |
1097                                    ENC_TYPE_CHAINING_ECB),
1098                 .skcipher       = {
1099                         .min_keysize    = DES_KEY_SIZE,
1100                         .max_keysize    = DES_KEY_SIZE,
1101                         .setkey         = n2_des_setkey,
1102                         .encrypt        = n2_encrypt_ecb,
1103                         .decrypt        = n2_decrypt_ecb,
1104                 },
1105         },
1106         {       .name           = "cbc(des)",
1107                 .drv_name       = "cbc-des",
1108                 .block_size     = DES_BLOCK_SIZE,
1109                 .enc_type       = (ENC_TYPE_ALG_DES |
1110                                    ENC_TYPE_CHAINING_CBC),
1111                 .skcipher       = {
1112                         .ivsize         = DES_BLOCK_SIZE,
1113                         .min_keysize    = DES_KEY_SIZE,
1114                         .max_keysize    = DES_KEY_SIZE,
1115                         .setkey         = n2_des_setkey,
1116                         .encrypt        = n2_encrypt_chaining,
1117                         .decrypt        = n2_decrypt_chaining,
1118                 },
1119         },
1120
1121         /* 3DES: ECB CBC and CFB are supported */
1122         {       .name           = "ecb(des3_ede)",
1123                 .drv_name       = "ecb-3des",
1124                 .block_size     = DES_BLOCK_SIZE,
1125                 .enc_type       = (ENC_TYPE_ALG_3DES |
1126                                    ENC_TYPE_CHAINING_ECB),
1127                 .skcipher       = {
1128                         .min_keysize    = 3 * DES_KEY_SIZE,
1129                         .max_keysize    = 3 * DES_KEY_SIZE,
1130                         .setkey         = n2_3des_setkey,
1131                         .encrypt        = n2_encrypt_ecb,
1132                         .decrypt        = n2_decrypt_ecb,
1133                 },
1134         },
1135         {       .name           = "cbc(des3_ede)",
1136                 .drv_name       = "cbc-3des",
1137                 .block_size     = DES_BLOCK_SIZE,
1138                 .enc_type       = (ENC_TYPE_ALG_3DES |
1139                                    ENC_TYPE_CHAINING_CBC),
1140                 .skcipher       = {
1141                         .ivsize         = DES_BLOCK_SIZE,
1142                         .min_keysize    = 3 * DES_KEY_SIZE,
1143                         .max_keysize    = 3 * DES_KEY_SIZE,
1144                         .setkey         = n2_3des_setkey,
1145                         .encrypt        = n2_encrypt_chaining,
1146                         .decrypt        = n2_decrypt_chaining,
1147                 },
1148         },
1149
1150         /* AES: ECB CBC and CTR are supported */
1151         {       .name           = "ecb(aes)",
1152                 .drv_name       = "ecb-aes",
1153                 .block_size     = AES_BLOCK_SIZE,
1154                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1155                                    ENC_TYPE_CHAINING_ECB),
1156                 .skcipher       = {
1157                         .min_keysize    = AES_MIN_KEY_SIZE,
1158                         .max_keysize    = AES_MAX_KEY_SIZE,
1159                         .setkey         = n2_aes_setkey,
1160                         .encrypt        = n2_encrypt_ecb,
1161                         .decrypt        = n2_decrypt_ecb,
1162                 },
1163         },
1164         {       .name           = "cbc(aes)",
1165                 .drv_name       = "cbc-aes",
1166                 .block_size     = AES_BLOCK_SIZE,
1167                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1168                                    ENC_TYPE_CHAINING_CBC),
1169                 .skcipher       = {
1170                         .ivsize         = AES_BLOCK_SIZE,
1171                         .min_keysize    = AES_MIN_KEY_SIZE,
1172                         .max_keysize    = AES_MAX_KEY_SIZE,
1173                         .setkey         = n2_aes_setkey,
1174                         .encrypt        = n2_encrypt_chaining,
1175                         .decrypt        = n2_decrypt_chaining,
1176                 },
1177         },
1178         {       .name           = "ctr(aes)",
1179                 .drv_name       = "ctr-aes",
1180                 .block_size     = AES_BLOCK_SIZE,
1181                 .enc_type       = (ENC_TYPE_ALG_AES128 |
1182                                    ENC_TYPE_CHAINING_COUNTER),
1183                 .skcipher       = {
1184                         .ivsize         = AES_BLOCK_SIZE,
1185                         .min_keysize    = AES_MIN_KEY_SIZE,
1186                         .max_keysize    = AES_MAX_KEY_SIZE,
1187                         .setkey         = n2_aes_setkey,
1188                         .encrypt        = n2_encrypt_chaining,
1189                         .decrypt        = n2_encrypt_chaining,
1190                 },
1191         },
1192
1193 };
1194 #define NUM_CIPHER_TMPLS ARRAY_SIZE(skcipher_tmpls)
1195
1196 static LIST_HEAD(skcipher_algs);
1197
1198 struct n2_hash_tmpl {
1199         const char      *name;
1200         const u8        *hash_zero;
1201         const u8        *hash_init;
1202         u8              hw_op_hashsz;
1203         u8              digest_size;
1204         u8              statesize;
1205         u8              block_size;
1206         u8              auth_type;
1207         u8              hmac_type;
1208 };
1209
1210 static const __le32 n2_md5_init[MD5_HASH_WORDS] = {
1211         cpu_to_le32(MD5_H0),
1212         cpu_to_le32(MD5_H1),
1213         cpu_to_le32(MD5_H2),
1214         cpu_to_le32(MD5_H3),
1215 };
1216 static const u32 n2_sha1_init[SHA1_DIGEST_SIZE / 4] = {
1217         SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1218 };
1219 static const u32 n2_sha256_init[SHA256_DIGEST_SIZE / 4] = {
1220         SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1221         SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1222 };
1223 static const u32 n2_sha224_init[SHA256_DIGEST_SIZE / 4] = {
1224         SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1225         SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1226 };
1227
1228 static const struct n2_hash_tmpl hash_tmpls[] = {
1229         { .name         = "md5",
1230           .hash_zero    = md5_zero_message_hash,
1231           .hash_init    = (u8 *)n2_md5_init,
1232           .auth_type    = AUTH_TYPE_MD5,
1233           .hmac_type    = AUTH_TYPE_HMAC_MD5,
1234           .hw_op_hashsz = MD5_DIGEST_SIZE,
1235           .digest_size  = MD5_DIGEST_SIZE,
1236           .statesize    = sizeof(struct md5_state),
1237           .block_size   = MD5_HMAC_BLOCK_SIZE },
1238         { .name         = "sha1",
1239           .hash_zero    = sha1_zero_message_hash,
1240           .hash_init    = (u8 *)n2_sha1_init,
1241           .auth_type    = AUTH_TYPE_SHA1,
1242           .hmac_type    = AUTH_TYPE_HMAC_SHA1,
1243           .hw_op_hashsz = SHA1_DIGEST_SIZE,
1244           .digest_size  = SHA1_DIGEST_SIZE,
1245           .statesize    = sizeof(struct sha1_state),
1246           .block_size   = SHA1_BLOCK_SIZE },
1247         { .name         = "sha256",
1248           .hash_zero    = sha256_zero_message_hash,
1249           .hash_init    = (u8 *)n2_sha256_init,
1250           .auth_type    = AUTH_TYPE_SHA256,
1251           .hmac_type    = AUTH_TYPE_HMAC_SHA256,
1252           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1253           .digest_size  = SHA256_DIGEST_SIZE,
1254           .statesize    = sizeof(struct sha256_state),
1255           .block_size   = SHA256_BLOCK_SIZE },
1256         { .name         = "sha224",
1257           .hash_zero    = sha224_zero_message_hash,
1258           .hash_init    = (u8 *)n2_sha224_init,
1259           .auth_type    = AUTH_TYPE_SHA256,
1260           .hmac_type    = AUTH_TYPE_RESERVED,
1261           .hw_op_hashsz = SHA256_DIGEST_SIZE,
1262           .digest_size  = SHA224_DIGEST_SIZE,
1263           .statesize    = sizeof(struct sha256_state),
1264           .block_size   = SHA224_BLOCK_SIZE },
1265 };
1266 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1267
1268 static LIST_HEAD(ahash_algs);
1269 static LIST_HEAD(hmac_algs);
1270
1271 static int algs_registered;
1272
1273 static void __n2_unregister_algs(void)
1274 {
1275         struct n2_skcipher_alg *skcipher, *skcipher_tmp;
1276         struct n2_ahash_alg *alg, *alg_tmp;
1277         struct n2_hmac_alg *hmac, *hmac_tmp;
1278
1279         list_for_each_entry_safe(skcipher, skcipher_tmp, &skcipher_algs, entry) {
1280                 crypto_unregister_skcipher(&skcipher->skcipher);
1281                 list_del(&skcipher->entry);
1282                 kfree(skcipher);
1283         }
1284         list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1285                 crypto_unregister_ahash(&hmac->derived.alg);
1286                 list_del(&hmac->derived.entry);
1287                 kfree(hmac);
1288         }
1289         list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1290                 crypto_unregister_ahash(&alg->alg);
1291                 list_del(&alg->entry);
1292                 kfree(alg);
1293         }
1294 }
1295
1296 static int n2_skcipher_init_tfm(struct crypto_skcipher *tfm)
1297 {
1298         crypto_skcipher_set_reqsize(tfm, sizeof(struct n2_request_context));
1299         return 0;
1300 }
1301
1302 static int __n2_register_one_skcipher(const struct n2_skcipher_tmpl *tmpl)
1303 {
1304         struct n2_skcipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1305         struct skcipher_alg *alg;
1306         int err;
1307
1308         if (!p)
1309                 return -ENOMEM;
1310
1311         alg = &p->skcipher;
1312         *alg = tmpl->skcipher;
1313
1314         snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1315         snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1316         alg->base.cra_priority = N2_CRA_PRIORITY;
1317         alg->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC |
1318                               CRYPTO_ALG_ALLOCATES_MEMORY;
1319         alg->base.cra_blocksize = tmpl->block_size;
1320         p->enc_type = tmpl->enc_type;
1321         alg->base.cra_ctxsize = sizeof(struct n2_skcipher_context);
1322         alg->base.cra_module = THIS_MODULE;
1323         alg->init = n2_skcipher_init_tfm;
1324
1325         list_add(&p->entry, &skcipher_algs);
1326         err = crypto_register_skcipher(alg);
1327         if (err) {
1328                 pr_err("%s alg registration failed\n", alg->base.cra_name);
1329                 list_del(&p->entry);
1330                 kfree(p);
1331         } else {
1332                 pr_info("%s alg registered\n", alg->base.cra_name);
1333         }
1334         return err;
1335 }
1336
1337 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1338 {
1339         struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1340         struct ahash_alg *ahash;
1341         struct crypto_alg *base;
1342         int err;
1343
1344         if (!p)
1345                 return -ENOMEM;
1346
1347         p->child_alg = n2ahash->alg.halg.base.cra_name;
1348         memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1349         INIT_LIST_HEAD(&p->derived.entry);
1350
1351         ahash = &p->derived.alg;
1352         ahash->digest = n2_hmac_async_digest;
1353         ahash->setkey = n2_hmac_async_setkey;
1354
1355         base = &ahash->halg.base;
1356         if (snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1357                      p->child_alg) >= CRYPTO_MAX_ALG_NAME)
1358                 goto out_free_p;
1359         if (snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2",
1360                      p->child_alg) >= CRYPTO_MAX_ALG_NAME)
1361                 goto out_free_p;
1362
1363         base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1364         base->cra_init = n2_hmac_cra_init;
1365         base->cra_exit = n2_hmac_cra_exit;
1366
1367         list_add(&p->derived.entry, &hmac_algs);
1368         err = crypto_register_ahash(ahash);
1369         if (err) {
1370                 pr_err("%s alg registration failed\n", base->cra_name);
1371                 list_del(&p->derived.entry);
1372 out_free_p:
1373                 kfree(p);
1374         } else {
1375                 pr_info("%s alg registered\n", base->cra_name);
1376         }
1377         return err;
1378 }
1379
1380 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1381 {
1382         struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1383         struct hash_alg_common *halg;
1384         struct crypto_alg *base;
1385         struct ahash_alg *ahash;
1386         int err;
1387
1388         if (!p)
1389                 return -ENOMEM;
1390
1391         p->hash_zero = tmpl->hash_zero;
1392         p->hash_init = tmpl->hash_init;
1393         p->auth_type = tmpl->auth_type;
1394         p->hmac_type = tmpl->hmac_type;
1395         p->hw_op_hashsz = tmpl->hw_op_hashsz;
1396         p->digest_size = tmpl->digest_size;
1397
1398         ahash = &p->alg;
1399         ahash->init = n2_hash_async_init;
1400         ahash->update = n2_hash_async_update;
1401         ahash->final = n2_hash_async_final;
1402         ahash->finup = n2_hash_async_finup;
1403         ahash->digest = n2_hash_async_digest;
1404         ahash->export = n2_hash_async_noexport;
1405         ahash->import = n2_hash_async_noimport;
1406
1407         halg = &ahash->halg;
1408         halg->digestsize = tmpl->digest_size;
1409         halg->statesize = tmpl->statesize;
1410
1411         base = &halg->base;
1412         snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1413         snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1414         base->cra_priority = N2_CRA_PRIORITY;
1415         base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1416                           CRYPTO_ALG_NEED_FALLBACK;
1417         base->cra_blocksize = tmpl->block_size;
1418         base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1419         base->cra_module = THIS_MODULE;
1420         base->cra_init = n2_hash_cra_init;
1421         base->cra_exit = n2_hash_cra_exit;
1422
1423         list_add(&p->entry, &ahash_algs);
1424         err = crypto_register_ahash(ahash);
1425         if (err) {
1426                 pr_err("%s alg registration failed\n", base->cra_name);
1427                 list_del(&p->entry);
1428                 kfree(p);
1429         } else {
1430                 pr_info("%s alg registered\n", base->cra_name);
1431         }
1432         if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1433                 err = __n2_register_one_hmac(p);
1434         return err;
1435 }
1436
1437 static int n2_register_algs(void)
1438 {
1439         int i, err = 0;
1440
1441         mutex_lock(&spu_lock);
1442         if (algs_registered++)
1443                 goto out;
1444
1445         for (i = 0; i < NUM_HASH_TMPLS; i++) {
1446                 err = __n2_register_one_ahash(&hash_tmpls[i]);
1447                 if (err) {
1448                         __n2_unregister_algs();
1449                         goto out;
1450                 }
1451         }
1452         for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1453                 err = __n2_register_one_skcipher(&skcipher_tmpls[i]);
1454                 if (err) {
1455                         __n2_unregister_algs();
1456                         goto out;
1457                 }
1458         }
1459
1460 out:
1461         mutex_unlock(&spu_lock);
1462         return err;
1463 }
1464
1465 static void n2_unregister_algs(void)
1466 {
1467         mutex_lock(&spu_lock);
1468         if (!--algs_registered)
1469                 __n2_unregister_algs();
1470         mutex_unlock(&spu_lock);
1471 }
1472
1473 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1474  * a devino.  This isn't very useful to us because all of the
1475  * interrupts listed in the device_node have been translated to
1476  * Linux virtual IRQ cookie numbers.
1477  *
1478  * So we have to back-translate, going through the 'intr' and 'ino'
1479  * property tables of the n2cp MDESC node, matching it with the OF
1480  * 'interrupts' property entries, in order to figure out which
1481  * devino goes to which already-translated IRQ.
1482  */
1483 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1484                              unsigned long dev_ino)
1485 {
1486         const unsigned int *dev_intrs;
1487         unsigned int intr;
1488         int i;
1489
1490         for (i = 0; i < ip->num_intrs; i++) {
1491                 if (ip->ino_table[i].ino == dev_ino)
1492                         break;
1493         }
1494         if (i == ip->num_intrs)
1495                 return -ENODEV;
1496
1497         intr = ip->ino_table[i].intr;
1498
1499         dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1500         if (!dev_intrs)
1501                 return -ENODEV;
1502
1503         for (i = 0; i < dev->archdata.num_irqs; i++) {
1504                 if (dev_intrs[i] == intr)
1505                         return i;
1506         }
1507
1508         return -ENODEV;
1509 }
1510
1511 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1512                        const char *irq_name, struct spu_queue *p,
1513                        irq_handler_t handler)
1514 {
1515         unsigned long herr;
1516         int index;
1517
1518         herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1519         if (herr)
1520                 return -EINVAL;
1521
1522         index = find_devino_index(dev, ip, p->devino);
1523         if (index < 0)
1524                 return index;
1525
1526         p->irq = dev->archdata.irqs[index];
1527
1528         sprintf(p->irq_name, "%s-%d", irq_name, index);
1529
1530         return request_irq(p->irq, handler, 0, p->irq_name, p);
1531 }
1532
1533 static struct kmem_cache *queue_cache[2];
1534
1535 static void *new_queue(unsigned long q_type)
1536 {
1537         return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1538 }
1539
1540 static void free_queue(void *p, unsigned long q_type)
1541 {
1542         kmem_cache_free(queue_cache[q_type - 1], p);
1543 }
1544
1545 static int queue_cache_init(void)
1546 {
1547         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1548                 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1549                         kmem_cache_create("mau_queue",
1550                                           (MAU_NUM_ENTRIES *
1551                                            MAU_ENTRY_SIZE),
1552                                           MAU_ENTRY_SIZE, 0, NULL);
1553         if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1554                 return -ENOMEM;
1555
1556         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1557                 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1558                         kmem_cache_create("cwq_queue",
1559                                           (CWQ_NUM_ENTRIES *
1560                                            CWQ_ENTRY_SIZE),
1561                                           CWQ_ENTRY_SIZE, 0, NULL);
1562         if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1563                 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1564                 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1565                 return -ENOMEM;
1566         }
1567         return 0;
1568 }
1569
1570 static void queue_cache_destroy(void)
1571 {
1572         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1573         kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1574         queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1575         queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1576 }
1577
1578 static long spu_queue_register_workfn(void *arg)
1579 {
1580         struct spu_qreg *qr = arg;
1581         struct spu_queue *p = qr->queue;
1582         unsigned long q_type = qr->type;
1583         unsigned long hv_ret;
1584
1585         hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1586                                  CWQ_NUM_ENTRIES, &p->qhandle);
1587         if (!hv_ret)
1588                 sun4v_ncs_sethead_marker(p->qhandle, 0);
1589
1590         return hv_ret ? -EINVAL : 0;
1591 }
1592
1593 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1594 {
1595         int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1596         struct spu_qreg qr = { .queue = p, .type = q_type };
1597
1598         return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1599 }
1600
1601 static int spu_queue_setup(struct spu_queue *p)
1602 {
1603         int err;
1604
1605         p->q = new_queue(p->q_type);
1606         if (!p->q)
1607                 return -ENOMEM;
1608
1609         err = spu_queue_register(p, p->q_type);
1610         if (err) {
1611                 free_queue(p->q, p->q_type);
1612                 p->q = NULL;
1613         }
1614
1615         return err;
1616 }
1617
1618 static void spu_queue_destroy(struct spu_queue *p)
1619 {
1620         unsigned long hv_ret;
1621
1622         if (!p->q)
1623                 return;
1624
1625         hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1626
1627         if (!hv_ret)
1628                 free_queue(p->q, p->q_type);
1629 }
1630
1631 static void spu_list_destroy(struct list_head *list)
1632 {
1633         struct spu_queue *p, *n;
1634
1635         list_for_each_entry_safe(p, n, list, list) {
1636                 int i;
1637
1638                 for (i = 0; i < NR_CPUS; i++) {
1639                         if (cpu_to_cwq[i] == p)
1640                                 cpu_to_cwq[i] = NULL;
1641                 }
1642
1643                 if (p->irq) {
1644                         free_irq(p->irq, p);
1645                         p->irq = 0;
1646                 }
1647                 spu_queue_destroy(p);
1648                 list_del(&p->list);
1649                 kfree(p);
1650         }
1651 }
1652
1653 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1654  * gathering cpu membership information.
1655  */
1656 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1657                                struct platform_device *dev,
1658                                u64 node, struct spu_queue *p,
1659                                struct spu_queue **table)
1660 {
1661         u64 arc;
1662
1663         mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1664                 u64 tgt = mdesc_arc_target(mdesc, arc);
1665                 const char *name = mdesc_node_name(mdesc, tgt);
1666                 const u64 *id;
1667
1668                 if (strcmp(name, "cpu"))
1669                         continue;
1670                 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1671                 if (table[*id] != NULL) {
1672                         dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1673                                 dev->dev.of_node);
1674                         return -EINVAL;
1675                 }
1676                 cpumask_set_cpu(*id, &p->sharing);
1677                 table[*id] = p;
1678         }
1679         return 0;
1680 }
1681
1682 /* Process an 'exec-unit' MDESC node of type 'cwq'.  */
1683 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1684                             struct platform_device *dev, struct mdesc_handle *mdesc,
1685                             u64 node, const char *iname, unsigned long q_type,
1686                             irq_handler_t handler, struct spu_queue **table)
1687 {
1688         struct spu_queue *p;
1689         int err;
1690
1691         p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1692         if (!p) {
1693                 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1694                         dev->dev.of_node);
1695                 return -ENOMEM;
1696         }
1697
1698         cpumask_clear(&p->sharing);
1699         spin_lock_init(&p->lock);
1700         p->q_type = q_type;
1701         INIT_LIST_HEAD(&p->jobs);
1702         list_add(&p->list, list);
1703
1704         err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1705         if (err)
1706                 return err;
1707
1708         err = spu_queue_setup(p);
1709         if (err)
1710                 return err;
1711
1712         return spu_map_ino(dev, ip, iname, p, handler);
1713 }
1714
1715 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1716                           struct spu_mdesc_info *ip, struct list_head *list,
1717                           const char *exec_name, unsigned long q_type,
1718                           irq_handler_t handler, struct spu_queue **table)
1719 {
1720         int err = 0;
1721         u64 node;
1722
1723         mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1724                 const char *type;
1725
1726                 type = mdesc_get_property(mdesc, node, "type", NULL);
1727                 if (!type || strcmp(type, exec_name))
1728                         continue;
1729
1730                 err = handle_exec_unit(ip, list, dev, mdesc, node,
1731                                        exec_name, q_type, handler, table);
1732                 if (err) {
1733                         spu_list_destroy(list);
1734                         break;
1735                 }
1736         }
1737
1738         return err;
1739 }
1740
1741 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1742                          struct spu_mdesc_info *ip)
1743 {
1744         const u64 *ino;
1745         int ino_len;
1746         int i;
1747
1748         ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1749         if (!ino) {
1750                 printk("NO 'ino'\n");
1751                 return -ENODEV;
1752         }
1753
1754         ip->num_intrs = ino_len / sizeof(u64);
1755         ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1756                                  ip->num_intrs),
1757                                 GFP_KERNEL);
1758         if (!ip->ino_table)
1759                 return -ENOMEM;
1760
1761         for (i = 0; i < ip->num_intrs; i++) {
1762                 struct ino_blob *b = &ip->ino_table[i];
1763                 b->intr = i + 1;
1764                 b->ino = ino[i];
1765         }
1766
1767         return 0;
1768 }
1769
1770 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1771                                 struct platform_device *dev,
1772                                 struct spu_mdesc_info *ip,
1773                                 const char *node_name)
1774 {
1775         u64 node, reg;
1776
1777         if (of_property_read_reg(dev->dev.of_node, 0, &reg, NULL) < 0)
1778                 return -ENODEV;
1779
1780         mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1781                 const char *name;
1782                 const u64 *chdl;
1783
1784                 name = mdesc_get_property(mdesc, node, "name", NULL);
1785                 if (!name || strcmp(name, node_name))
1786                         continue;
1787                 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1788                 if (!chdl || (*chdl != reg))
1789                         continue;
1790                 ip->cfg_handle = *chdl;
1791                 return get_irq_props(mdesc, node, ip);
1792         }
1793
1794         return -ENODEV;
1795 }
1796
1797 static unsigned long n2_spu_hvapi_major;
1798 static unsigned long n2_spu_hvapi_minor;
1799
1800 static int n2_spu_hvapi_register(void)
1801 {
1802         int err;
1803
1804         n2_spu_hvapi_major = 2;
1805         n2_spu_hvapi_minor = 0;
1806
1807         err = sun4v_hvapi_register(HV_GRP_NCS,
1808                                    n2_spu_hvapi_major,
1809                                    &n2_spu_hvapi_minor);
1810
1811         if (!err)
1812                 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1813                         n2_spu_hvapi_major,
1814                         n2_spu_hvapi_minor);
1815
1816         return err;
1817 }
1818
1819 static void n2_spu_hvapi_unregister(void)
1820 {
1821         sun4v_hvapi_unregister(HV_GRP_NCS);
1822 }
1823
1824 static int global_ref;
1825
1826 static int grab_global_resources(void)
1827 {
1828         int err = 0;
1829
1830         mutex_lock(&spu_lock);
1831
1832         if (global_ref++)
1833                 goto out;
1834
1835         err = n2_spu_hvapi_register();
1836         if (err)
1837                 goto out;
1838
1839         err = queue_cache_init();
1840         if (err)
1841                 goto out_hvapi_release;
1842
1843         err = -ENOMEM;
1844         cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1845                              GFP_KERNEL);
1846         if (!cpu_to_cwq)
1847                 goto out_queue_cache_destroy;
1848
1849         cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1850                              GFP_KERNEL);
1851         if (!cpu_to_mau)
1852                 goto out_free_cwq_table;
1853
1854         err = 0;
1855
1856 out:
1857         if (err)
1858                 global_ref--;
1859         mutex_unlock(&spu_lock);
1860         return err;
1861
1862 out_free_cwq_table:
1863         kfree(cpu_to_cwq);
1864         cpu_to_cwq = NULL;
1865
1866 out_queue_cache_destroy:
1867         queue_cache_destroy();
1868
1869 out_hvapi_release:
1870         n2_spu_hvapi_unregister();
1871         goto out;
1872 }
1873
1874 static void release_global_resources(void)
1875 {
1876         mutex_lock(&spu_lock);
1877         if (!--global_ref) {
1878                 kfree(cpu_to_cwq);
1879                 cpu_to_cwq = NULL;
1880
1881                 kfree(cpu_to_mau);
1882                 cpu_to_mau = NULL;
1883
1884                 queue_cache_destroy();
1885                 n2_spu_hvapi_unregister();
1886         }
1887         mutex_unlock(&spu_lock);
1888 }
1889
1890 static struct n2_crypto *alloc_n2cp(void)
1891 {
1892         struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1893
1894         if (np)
1895                 INIT_LIST_HEAD(&np->cwq_list);
1896
1897         return np;
1898 }
1899
1900 static void free_n2cp(struct n2_crypto *np)
1901 {
1902         kfree(np->cwq_info.ino_table);
1903         np->cwq_info.ino_table = NULL;
1904
1905         kfree(np);
1906 }
1907
1908 static void n2_spu_driver_version(void)
1909 {
1910         static int n2_spu_version_printed;
1911
1912         if (n2_spu_version_printed++ == 0)
1913                 pr_info("%s", version);
1914 }
1915
1916 static int n2_crypto_probe(struct platform_device *dev)
1917 {
1918         struct mdesc_handle *mdesc;
1919         struct n2_crypto *np;
1920         int err;
1921
1922         n2_spu_driver_version();
1923
1924         pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
1925
1926         np = alloc_n2cp();
1927         if (!np) {
1928                 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
1929                         dev->dev.of_node);
1930                 return -ENOMEM;
1931         }
1932
1933         err = grab_global_resources();
1934         if (err) {
1935                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
1936                         dev->dev.of_node);
1937                 goto out_free_n2cp;
1938         }
1939
1940         mdesc = mdesc_grab();
1941
1942         if (!mdesc) {
1943                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
1944                         dev->dev.of_node);
1945                 err = -ENODEV;
1946                 goto out_free_global;
1947         }
1948         err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
1949         if (err) {
1950                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
1951                         dev->dev.of_node);
1952                 mdesc_release(mdesc);
1953                 goto out_free_global;
1954         }
1955
1956         err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
1957                              "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
1958                              cpu_to_cwq);
1959         mdesc_release(mdesc);
1960
1961         if (err) {
1962                 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
1963                         dev->dev.of_node);
1964                 goto out_free_global;
1965         }
1966
1967         err = n2_register_algs();
1968         if (err) {
1969                 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
1970                         dev->dev.of_node);
1971                 goto out_free_spu_list;
1972         }
1973
1974         dev_set_drvdata(&dev->dev, np);
1975
1976         return 0;
1977
1978 out_free_spu_list:
1979         spu_list_destroy(&np->cwq_list);
1980
1981 out_free_global:
1982         release_global_resources();
1983
1984 out_free_n2cp:
1985         free_n2cp(np);
1986
1987         return err;
1988 }
1989
1990 static void n2_crypto_remove(struct platform_device *dev)
1991 {
1992         struct n2_crypto *np = dev_get_drvdata(&dev->dev);
1993
1994         n2_unregister_algs();
1995
1996         spu_list_destroy(&np->cwq_list);
1997
1998         release_global_resources();
1999
2000         free_n2cp(np);
2001 }
2002
2003 static struct n2_mau *alloc_ncp(void)
2004 {
2005         struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2006
2007         if (mp)
2008                 INIT_LIST_HEAD(&mp->mau_list);
2009
2010         return mp;
2011 }
2012
2013 static void free_ncp(struct n2_mau *mp)
2014 {
2015         kfree(mp->mau_info.ino_table);
2016         mp->mau_info.ino_table = NULL;
2017
2018         kfree(mp);
2019 }
2020
2021 static int n2_mau_probe(struct platform_device *dev)
2022 {
2023         struct mdesc_handle *mdesc;
2024         struct n2_mau *mp;
2025         int err;
2026
2027         n2_spu_driver_version();
2028
2029         pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2030
2031         mp = alloc_ncp();
2032         if (!mp) {
2033                 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2034                         dev->dev.of_node);
2035                 return -ENOMEM;
2036         }
2037
2038         err = grab_global_resources();
2039         if (err) {
2040                 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2041                         dev->dev.of_node);
2042                 goto out_free_ncp;
2043         }
2044
2045         mdesc = mdesc_grab();
2046
2047         if (!mdesc) {
2048                 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2049                         dev->dev.of_node);
2050                 err = -ENODEV;
2051                 goto out_free_global;
2052         }
2053
2054         err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2055         if (err) {
2056                 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2057                         dev->dev.of_node);
2058                 mdesc_release(mdesc);
2059                 goto out_free_global;
2060         }
2061
2062         err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2063                              "mau", HV_NCS_QTYPE_MAU, mau_intr,
2064                              cpu_to_mau);
2065         mdesc_release(mdesc);
2066
2067         if (err) {
2068                 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2069                         dev->dev.of_node);
2070                 goto out_free_global;
2071         }
2072
2073         dev_set_drvdata(&dev->dev, mp);
2074
2075         return 0;
2076
2077 out_free_global:
2078         release_global_resources();
2079
2080 out_free_ncp:
2081         free_ncp(mp);
2082
2083         return err;
2084 }
2085
2086 static void n2_mau_remove(struct platform_device *dev)
2087 {
2088         struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2089
2090         spu_list_destroy(&mp->mau_list);
2091
2092         release_global_resources();
2093
2094         free_ncp(mp);
2095 }
2096
2097 static const struct of_device_id n2_crypto_match[] = {
2098         {
2099                 .name = "n2cp",
2100                 .compatible = "SUNW,n2-cwq",
2101         },
2102         {
2103                 .name = "n2cp",
2104                 .compatible = "SUNW,vf-cwq",
2105         },
2106         {
2107                 .name = "n2cp",
2108                 .compatible = "SUNW,kt-cwq",
2109         },
2110         {},
2111 };
2112
2113 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2114
2115 static struct platform_driver n2_crypto_driver = {
2116         .driver = {
2117                 .name           =       "n2cp",
2118                 .of_match_table =       n2_crypto_match,
2119         },
2120         .probe          =       n2_crypto_probe,
2121         .remove_new     =       n2_crypto_remove,
2122 };
2123
2124 static const struct of_device_id n2_mau_match[] = {
2125         {
2126                 .name = "ncp",
2127                 .compatible = "SUNW,n2-mau",
2128         },
2129         {
2130                 .name = "ncp",
2131                 .compatible = "SUNW,vf-mau",
2132         },
2133         {
2134                 .name = "ncp",
2135                 .compatible = "SUNW,kt-mau",
2136         },
2137         {},
2138 };
2139
2140 MODULE_DEVICE_TABLE(of, n2_mau_match);
2141
2142 static struct platform_driver n2_mau_driver = {
2143         .driver = {
2144                 .name           =       "ncp",
2145                 .of_match_table =       n2_mau_match,
2146         },
2147         .probe          =       n2_mau_probe,
2148         .remove_new     =       n2_mau_remove,
2149 };
2150
2151 static struct platform_driver * const drivers[] = {
2152         &n2_crypto_driver,
2153         &n2_mau_driver,
2154 };
2155
2156 static int __init n2_init(void)
2157 {
2158         return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2159 }
2160
2161 static void __exit n2_exit(void)
2162 {
2163         platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2164 }
2165
2166 module_init(n2_init);
2167 module_exit(n2_exit);
This page took 0.154631 seconds and 4 git commands to generate.