1 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/module.h>
11 #include <linux/of_device.h>
12 #include <linux/cpumask.h>
13 #include <linux/slab.h>
14 #include <linux/interrupt.h>
15 #include <linux/crypto.h>
16 #include <crypto/md5.h>
17 #include <crypto/sha.h>
18 #include <crypto/aes.h>
19 #include <crypto/des.h>
20 #include <linux/mutex.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>
24 #include <crypto/internal/hash.h>
25 #include <crypto/scatterwalk.h>
26 #include <crypto/algapi.h>
28 #include <asm/hypervisor.h>
29 #include <asm/mdesc.h>
33 #define DRV_MODULE_NAME "n2_crypto"
34 #define DRV_MODULE_VERSION "0.2"
35 #define DRV_MODULE_RELDATE "July 28, 2011"
37 static const char version[] =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
41 MODULE_DESCRIPTION("Niagara2 Crypto driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(DRV_MODULE_VERSION);
45 #define N2_CRA_PRIORITY 200
47 static DEFINE_MUTEX(spu_lock);
51 unsigned long qhandle;
58 struct list_head jobs;
65 struct list_head list;
69 struct spu_queue *queue;
73 static struct spu_queue **cpu_to_cwq;
74 static struct spu_queue **cpu_to_mau;
76 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
78 if (q->q_type == HV_NCS_QTYPE_MAU) {
79 off += MAU_ENTRY_SIZE;
80 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
83 off += CWQ_ENTRY_SIZE;
84 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
90 struct n2_request_common {
91 struct list_head entry;
94 #define OFFSET_NOT_RUNNING (~(unsigned int)0)
96 /* An async job request records the final tail value it used in
97 * n2_request_common->offset, test to see if that offset is in
98 * the range old_head, new_head, inclusive.
100 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
101 unsigned long old_head, unsigned long new_head)
103 if (old_head <= new_head) {
104 if (offset > old_head && offset <= new_head)
107 if (offset > old_head || offset <= new_head)
113 /* When the HEAD marker is unequal to the actual HEAD, we get
114 * a virtual device INO interrupt. We should process the
115 * completed CWQ entries and adjust the HEAD marker to clear
118 static irqreturn_t cwq_intr(int irq, void *dev_id)
120 unsigned long off, new_head, hv_ret;
121 struct spu_queue *q = dev_id;
123 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
124 smp_processor_id(), q->qhandle);
128 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
130 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
131 smp_processor_id(), new_head, hv_ret);
133 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
137 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
138 if (hv_ret == HV_EOK)
141 spin_unlock(&q->lock);
146 static irqreturn_t mau_intr(int irq, void *dev_id)
148 struct spu_queue *q = dev_id;
149 unsigned long head, hv_ret;
153 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
154 smp_processor_id(), q->qhandle);
156 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
158 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
159 smp_processor_id(), head, hv_ret);
161 sun4v_ncs_sethead_marker(q->qhandle, head);
163 spin_unlock(&q->lock);
168 static void *spu_queue_next(struct spu_queue *q, void *cur)
170 return q->q + spu_next_offset(q, cur - q->q);
173 static int spu_queue_num_free(struct spu_queue *q)
175 unsigned long head = q->head;
176 unsigned long tail = q->tail;
177 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
183 diff = (end - tail) + head;
185 return (diff / CWQ_ENTRY_SIZE) - 1;
188 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
190 int avail = spu_queue_num_free(q);
192 if (avail >= num_entries)
193 return q->q + q->tail;
198 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
200 unsigned long hv_ret, new_tail;
202 new_tail = spu_next_offset(q, last - q->q);
204 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
205 if (hv_ret == HV_EOK)
210 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
211 int enc_type, int auth_type,
212 unsigned int hash_len,
213 bool sfas, bool sob, bool eob, bool encrypt,
216 u64 word = (len - 1) & CONTROL_LEN;
218 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
219 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
220 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
222 word |= CONTROL_STORE_FINAL_AUTH_STATE;
224 word |= CONTROL_START_OF_BLOCK;
226 word |= CONTROL_END_OF_BLOCK;
228 word |= CONTROL_ENCRYPT;
230 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
232 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
238 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
240 if (this_len >= 64 ||
241 qp->head != qp->tail)
247 struct n2_ahash_alg {
248 struct list_head entry;
250 const u32 *hash_init;
255 struct ahash_alg alg;
258 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
260 struct crypto_alg *alg = tfm->__crt_alg;
261 struct ahash_alg *ahash_alg;
263 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
265 return container_of(ahash_alg, struct n2_ahash_alg, alg);
269 const char *child_alg;
270 struct n2_ahash_alg derived;
273 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
275 struct crypto_alg *alg = tfm->__crt_alg;
276 struct ahash_alg *ahash_alg;
278 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
280 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
284 struct crypto_ahash *fallback_tfm;
287 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
290 struct n2_hash_ctx base;
292 struct crypto_shash *child_shash;
295 unsigned char hash_key[N2_HASH_KEY_MAX];
298 struct n2_hash_req_ctx {
300 struct md5_state md5;
301 struct sha1_state sha1;
302 struct sha256_state sha256;
305 struct ahash_request fallback_req;
308 static int n2_hash_async_init(struct ahash_request *req)
310 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
311 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
312 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
314 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
315 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
317 return crypto_ahash_init(&rctx->fallback_req);
320 static int n2_hash_async_update(struct ahash_request *req)
322 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
323 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
324 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
326 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
327 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
328 rctx->fallback_req.nbytes = req->nbytes;
329 rctx->fallback_req.src = req->src;
331 return crypto_ahash_update(&rctx->fallback_req);
334 static int n2_hash_async_final(struct ahash_request *req)
336 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
337 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
338 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
340 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
341 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
342 rctx->fallback_req.result = req->result;
344 return crypto_ahash_final(&rctx->fallback_req);
347 static int n2_hash_async_finup(struct ahash_request *req)
349 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
350 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
351 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
353 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
354 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
355 rctx->fallback_req.nbytes = req->nbytes;
356 rctx->fallback_req.src = req->src;
357 rctx->fallback_req.result = req->result;
359 return crypto_ahash_finup(&rctx->fallback_req);
362 static int n2_hash_async_noimport(struct ahash_request *req, const void *in)
367 static int n2_hash_async_noexport(struct ahash_request *req, void *out)
372 static int n2_hash_cra_init(struct crypto_tfm *tfm)
374 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
375 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
376 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
377 struct crypto_ahash *fallback_tfm;
380 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
381 CRYPTO_ALG_NEED_FALLBACK);
382 if (IS_ERR(fallback_tfm)) {
383 pr_warning("Fallback driver '%s' could not be loaded!\n",
384 fallback_driver_name);
385 err = PTR_ERR(fallback_tfm);
389 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
390 crypto_ahash_reqsize(fallback_tfm)));
392 ctx->fallback_tfm = fallback_tfm;
399 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
401 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
402 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
404 crypto_free_ahash(ctx->fallback_tfm);
407 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
409 const char *fallback_driver_name = crypto_tfm_alg_name(tfm);
410 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
411 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
412 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
413 struct crypto_ahash *fallback_tfm;
414 struct crypto_shash *child_shash;
417 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
418 CRYPTO_ALG_NEED_FALLBACK);
419 if (IS_ERR(fallback_tfm)) {
420 pr_warning("Fallback driver '%s' could not be loaded!\n",
421 fallback_driver_name);
422 err = PTR_ERR(fallback_tfm);
426 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
427 if (IS_ERR(child_shash)) {
428 pr_warning("Child shash '%s' could not be loaded!\n",
430 err = PTR_ERR(child_shash);
431 goto out_free_fallback;
434 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
435 crypto_ahash_reqsize(fallback_tfm)));
437 ctx->child_shash = child_shash;
438 ctx->base.fallback_tfm = fallback_tfm;
442 crypto_free_ahash(fallback_tfm);
448 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
450 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
451 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
453 crypto_free_ahash(ctx->base.fallback_tfm);
454 crypto_free_shash(ctx->child_shash);
457 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
460 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
461 struct crypto_shash *child_shash = ctx->child_shash;
462 struct crypto_ahash *fallback_tfm;
463 SHASH_DESC_ON_STACK(shash, child_shash);
466 fallback_tfm = ctx->base.fallback_tfm;
467 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
471 shash->tfm = child_shash;
473 bs = crypto_shash_blocksize(child_shash);
474 ds = crypto_shash_digestsize(child_shash);
475 BUG_ON(ds > N2_HASH_KEY_MAX);
477 err = crypto_shash_digest(shash, key, keylen,
482 } else if (keylen <= N2_HASH_KEY_MAX)
483 memcpy(ctx->hash_key, key, keylen);
485 ctx->hash_key_len = keylen;
490 static unsigned long wait_for_tail(struct spu_queue *qp)
492 unsigned long head, hv_ret;
495 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
496 if (hv_ret != HV_EOK) {
497 pr_err("Hypervisor error on gethead\n");
500 if (head == qp->tail) {
508 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
509 struct cwq_initial_entry *ent)
511 unsigned long hv_ret = spu_queue_submit(qp, ent);
513 if (hv_ret == HV_EOK)
514 hv_ret = wait_for_tail(qp);
519 static int n2_do_async_digest(struct ahash_request *req,
520 unsigned int auth_type, unsigned int digest_size,
521 unsigned int result_size, void *hash_loc,
522 unsigned long auth_key, unsigned int auth_key_len)
524 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
525 struct cwq_initial_entry *ent;
526 struct crypto_hash_walk walk;
527 struct spu_queue *qp;
532 /* The total effective length of the operation may not
535 if (unlikely(req->nbytes > (1 << 16))) {
536 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
537 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
539 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
540 rctx->fallback_req.base.flags =
541 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
542 rctx->fallback_req.nbytes = req->nbytes;
543 rctx->fallback_req.src = req->src;
544 rctx->fallback_req.result = req->result;
546 return crypto_ahash_digest(&rctx->fallback_req);
549 nbytes = crypto_hash_walk_first(req, &walk);
552 qp = cpu_to_cwq[cpu];
556 spin_lock_irqsave(&qp->lock, flags);
558 /* XXX can do better, improve this later by doing a by-hand scatterlist
561 ent = qp->q + qp->tail;
563 ent->control = control_word_base(nbytes, auth_key_len, 0,
564 auth_type, digest_size,
565 false, true, false, false,
568 ent->src_addr = __pa(walk.data);
569 ent->auth_key_addr = auth_key;
570 ent->auth_iv_addr = __pa(hash_loc);
571 ent->final_auth_state_addr = 0UL;
572 ent->enc_key_addr = 0UL;
573 ent->enc_iv_addr = 0UL;
574 ent->dest_addr = __pa(hash_loc);
576 nbytes = crypto_hash_walk_done(&walk, 0);
578 ent = spu_queue_next(qp, ent);
580 ent->control = (nbytes - 1);
581 ent->src_addr = __pa(walk.data);
582 ent->auth_key_addr = 0UL;
583 ent->auth_iv_addr = 0UL;
584 ent->final_auth_state_addr = 0UL;
585 ent->enc_key_addr = 0UL;
586 ent->enc_iv_addr = 0UL;
587 ent->dest_addr = 0UL;
589 nbytes = crypto_hash_walk_done(&walk, 0);
591 ent->control |= CONTROL_END_OF_BLOCK;
593 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
598 spin_unlock_irqrestore(&qp->lock, flags);
601 memcpy(req->result, hash_loc, result_size);
608 static int n2_hash_async_digest(struct ahash_request *req)
610 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
611 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
614 ds = n2alg->digest_size;
615 if (unlikely(req->nbytes == 0)) {
616 memcpy(req->result, n2alg->hash_zero, ds);
619 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
621 return n2_do_async_digest(req, n2alg->auth_type,
622 n2alg->hw_op_hashsz, ds,
626 static int n2_hmac_async_digest(struct ahash_request *req)
628 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
629 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
630 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
631 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
634 ds = n2alg->derived.digest_size;
635 if (unlikely(req->nbytes == 0) ||
636 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
637 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
638 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
640 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
641 rctx->fallback_req.base.flags =
642 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
643 rctx->fallback_req.nbytes = req->nbytes;
644 rctx->fallback_req.src = req->src;
645 rctx->fallback_req.result = req->result;
647 return crypto_ahash_digest(&rctx->fallback_req);
649 memcpy(&rctx->u, n2alg->derived.hash_init,
650 n2alg->derived.hw_op_hashsz);
652 return n2_do_async_digest(req, n2alg->derived.hmac_type,
653 n2alg->derived.hw_op_hashsz, ds,
655 __pa(&ctx->hash_key),
659 struct n2_cipher_context {
663 u8 aes[AES_MAX_KEY_SIZE];
664 u8 des[DES_KEY_SIZE];
665 u8 des3[3 * DES_KEY_SIZE];
666 u8 arc4[258]; /* S-box, X, Y */
670 #define N2_CHUNK_ARR_LEN 16
672 struct n2_crypto_chunk {
673 struct list_head entry;
674 unsigned long iv_paddr : 44;
675 unsigned long arr_len : 20;
676 unsigned long dest_paddr;
677 unsigned long dest_final;
679 unsigned long src_paddr : 44;
680 unsigned long src_len : 20;
681 } arr[N2_CHUNK_ARR_LEN];
684 struct n2_request_context {
685 struct ablkcipher_walk walk;
686 struct list_head chunk_list;
687 struct n2_crypto_chunk chunk;
691 /* The SPU allows some level of flexibility for partial cipher blocks
692 * being specified in a descriptor.
694 * It merely requires that every descriptor's length field is at least
695 * as large as the cipher block size. This means that a cipher block
696 * can span at most 2 descriptors. However, this does not allow a
697 * partial block to span into the final descriptor as that would
698 * violate the rule (since every descriptor's length must be at lest
699 * the block size). So, for example, assuming an 8 byte block size:
701 * 0xe --> 0xa --> 0x8
703 * is a valid length sequence, whereas:
705 * 0xe --> 0xb --> 0x7
707 * is not a valid sequence.
710 struct n2_cipher_alg {
711 struct list_head entry;
713 struct crypto_alg alg;
716 static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
718 struct crypto_alg *alg = tfm->__crt_alg;
720 return container_of(alg, struct n2_cipher_alg, alg);
723 struct n2_cipher_request_context {
724 struct ablkcipher_walk walk;
727 static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
730 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
731 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
732 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
734 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
737 case AES_KEYSIZE_128:
738 ctx->enc_type |= ENC_TYPE_ALG_AES128;
740 case AES_KEYSIZE_192:
741 ctx->enc_type |= ENC_TYPE_ALG_AES192;
743 case AES_KEYSIZE_256:
744 ctx->enc_type |= ENC_TYPE_ALG_AES256;
747 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
751 ctx->key_len = keylen;
752 memcpy(ctx->key.aes, key, keylen);
756 static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
759 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
760 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
761 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
762 u32 tmp[DES_EXPKEY_WORDS];
765 ctx->enc_type = n2alg->enc_type;
767 if (keylen != DES_KEY_SIZE) {
768 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
772 err = des_ekey(tmp, key);
773 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) {
774 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
778 ctx->key_len = keylen;
779 memcpy(ctx->key.des, key, keylen);
783 static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
786 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
787 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
788 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
792 flags = crypto_ablkcipher_get_flags(cipher);
793 err = __des3_verify_key(&flags, key);
795 crypto_ablkcipher_set_flags(cipher, flags);
799 ctx->enc_type = n2alg->enc_type;
801 ctx->key_len = keylen;
802 memcpy(ctx->key.des3, key, keylen);
806 static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
809 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
810 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
811 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
812 u8 *s = ctx->key.arc4;
817 ctx->enc_type = n2alg->enc_type;
822 for (i = 0; i < 256; i++)
824 for (i = 0; i < 256; i++) {
826 j = (j + key[k] + a) & 0xff;
836 static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
838 int this_len = nbytes;
840 this_len -= (nbytes & (block_size - 1));
841 return this_len > (1 << 16) ? (1 << 16) : this_len;
844 static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
845 struct spu_queue *qp, bool encrypt)
847 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
848 struct cwq_initial_entry *ent;
852 ent = spu_queue_alloc(qp, cp->arr_len);
854 pr_info("queue_alloc() of %d fails\n",
859 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
861 ent->control = control_word_base(cp->arr[0].src_len,
862 0, ctx->enc_type, 0, 0,
863 false, true, false, encrypt,
865 (in_place ? OPCODE_INPLACE_BIT : 0));
866 ent->src_addr = cp->arr[0].src_paddr;
867 ent->auth_key_addr = 0UL;
868 ent->auth_iv_addr = 0UL;
869 ent->final_auth_state_addr = 0UL;
870 ent->enc_key_addr = __pa(&ctx->key);
871 ent->enc_iv_addr = cp->iv_paddr;
872 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
874 for (i = 1; i < cp->arr_len; i++) {
875 ent = spu_queue_next(qp, ent);
877 ent->control = cp->arr[i].src_len - 1;
878 ent->src_addr = cp->arr[i].src_paddr;
879 ent->auth_key_addr = 0UL;
880 ent->auth_iv_addr = 0UL;
881 ent->final_auth_state_addr = 0UL;
882 ent->enc_key_addr = 0UL;
883 ent->enc_iv_addr = 0UL;
884 ent->dest_addr = 0UL;
886 ent->control |= CONTROL_END_OF_BLOCK;
888 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
891 static int n2_compute_chunks(struct ablkcipher_request *req)
893 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
894 struct ablkcipher_walk *walk = &rctx->walk;
895 struct n2_crypto_chunk *chunk;
896 unsigned long dest_prev;
897 unsigned int tot_len;
901 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
902 err = ablkcipher_walk_phys(req, walk);
906 INIT_LIST_HEAD(&rctx->chunk_list);
908 chunk = &rctx->chunk;
909 INIT_LIST_HEAD(&chunk->entry);
911 chunk->iv_paddr = 0UL;
913 chunk->dest_paddr = 0UL;
915 prev_in_place = false;
919 while ((nbytes = walk->nbytes) != 0) {
920 unsigned long dest_paddr, src_paddr;
924 src_paddr = (page_to_phys(walk->src.page) +
926 dest_paddr = (page_to_phys(walk->dst.page) +
928 in_place = (src_paddr == dest_paddr);
929 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
931 if (chunk->arr_len != 0) {
932 if (in_place != prev_in_place ||
934 dest_paddr != dest_prev) ||
935 chunk->arr_len == N2_CHUNK_ARR_LEN ||
936 tot_len + this_len > (1 << 16)) {
937 chunk->dest_final = dest_prev;
938 list_add_tail(&chunk->entry,
940 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
945 INIT_LIST_HEAD(&chunk->entry);
948 if (chunk->arr_len == 0) {
949 chunk->dest_paddr = dest_paddr;
952 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
953 chunk->arr[chunk->arr_len].src_len = this_len;
956 dest_prev = dest_paddr + this_len;
957 prev_in_place = in_place;
960 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
964 if (!err && chunk->arr_len != 0) {
965 chunk->dest_final = dest_prev;
966 list_add_tail(&chunk->entry, &rctx->chunk_list);
972 static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
974 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
975 struct n2_crypto_chunk *c, *tmp;
978 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
980 ablkcipher_walk_complete(&rctx->walk);
981 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
983 if (unlikely(c != &rctx->chunk))
989 static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
991 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
992 struct crypto_tfm *tfm = req->base.tfm;
993 int err = n2_compute_chunks(req);
994 struct n2_crypto_chunk *c, *tmp;
995 unsigned long flags, hv_ret;
996 struct spu_queue *qp;
1001 qp = cpu_to_cwq[get_cpu()];
1006 spin_lock_irqsave(&qp->lock, flags);
1008 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
1009 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
1012 list_del(&c->entry);
1013 if (unlikely(c != &rctx->chunk))
1017 hv_ret = wait_for_tail(qp);
1018 if (hv_ret != HV_EOK)
1022 spin_unlock_irqrestore(&qp->lock, flags);
1027 n2_chunk_complete(req, NULL);
1031 static int n2_encrypt_ecb(struct ablkcipher_request *req)
1033 return n2_do_ecb(req, true);
1036 static int n2_decrypt_ecb(struct ablkcipher_request *req)
1038 return n2_do_ecb(req, false);
1041 static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
1043 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
1044 struct crypto_tfm *tfm = req->base.tfm;
1045 unsigned long flags, hv_ret, iv_paddr;
1046 int err = n2_compute_chunks(req);
1047 struct n2_crypto_chunk *c, *tmp;
1048 struct spu_queue *qp;
1049 void *final_iv_addr;
1051 final_iv_addr = NULL;
1056 qp = cpu_to_cwq[get_cpu()];
1061 spin_lock_irqsave(&qp->lock, flags);
1064 iv_paddr = __pa(rctx->walk.iv);
1065 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1067 c->iv_paddr = iv_paddr;
1068 err = __n2_crypt_chunk(tfm, c, qp, true);
1071 iv_paddr = c->dest_final - rctx->walk.blocksize;
1072 list_del(&c->entry);
1073 if (unlikely(c != &rctx->chunk))
1076 final_iv_addr = __va(iv_paddr);
1078 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1080 if (c == &rctx->chunk) {
1081 iv_paddr = __pa(rctx->walk.iv);
1083 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1084 tmp->arr[tmp->arr_len-1].src_len -
1085 rctx->walk.blocksize);
1087 if (!final_iv_addr) {
1090 pa = (c->arr[c->arr_len-1].src_paddr +
1091 c->arr[c->arr_len-1].src_len -
1092 rctx->walk.blocksize);
1093 final_iv_addr = rctx->temp_iv;
1094 memcpy(rctx->temp_iv, __va(pa),
1095 rctx->walk.blocksize);
1097 c->iv_paddr = iv_paddr;
1098 err = __n2_crypt_chunk(tfm, c, qp, false);
1101 list_del(&c->entry);
1102 if (unlikely(c != &rctx->chunk))
1107 hv_ret = wait_for_tail(qp);
1108 if (hv_ret != HV_EOK)
1112 spin_unlock_irqrestore(&qp->lock, flags);
1117 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1121 static int n2_encrypt_chaining(struct ablkcipher_request *req)
1123 return n2_do_chaining(req, true);
1126 static int n2_decrypt_chaining(struct ablkcipher_request *req)
1128 return n2_do_chaining(req, false);
1131 struct n2_cipher_tmpl {
1133 const char *drv_name;
1136 struct ablkcipher_alg ablkcipher;
1139 static const struct n2_cipher_tmpl cipher_tmpls[] = {
1140 /* ARC4: only ECB is supported (chaining bits ignored) */
1141 { .name = "ecb(arc4)",
1142 .drv_name = "ecb-arc4",
1144 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1145 ENC_TYPE_CHAINING_ECB),
1149 .setkey = n2_arc4_setkey,
1150 .encrypt = n2_encrypt_ecb,
1151 .decrypt = n2_decrypt_ecb,
1155 /* DES: ECB CBC and CFB are supported */
1156 { .name = "ecb(des)",
1157 .drv_name = "ecb-des",
1158 .block_size = DES_BLOCK_SIZE,
1159 .enc_type = (ENC_TYPE_ALG_DES |
1160 ENC_TYPE_CHAINING_ECB),
1162 .min_keysize = DES_KEY_SIZE,
1163 .max_keysize = DES_KEY_SIZE,
1164 .setkey = n2_des_setkey,
1165 .encrypt = n2_encrypt_ecb,
1166 .decrypt = n2_decrypt_ecb,
1169 { .name = "cbc(des)",
1170 .drv_name = "cbc-des",
1171 .block_size = DES_BLOCK_SIZE,
1172 .enc_type = (ENC_TYPE_ALG_DES |
1173 ENC_TYPE_CHAINING_CBC),
1175 .ivsize = DES_BLOCK_SIZE,
1176 .min_keysize = DES_KEY_SIZE,
1177 .max_keysize = DES_KEY_SIZE,
1178 .setkey = n2_des_setkey,
1179 .encrypt = n2_encrypt_chaining,
1180 .decrypt = n2_decrypt_chaining,
1183 { .name = "cfb(des)",
1184 .drv_name = "cfb-des",
1185 .block_size = DES_BLOCK_SIZE,
1186 .enc_type = (ENC_TYPE_ALG_DES |
1187 ENC_TYPE_CHAINING_CFB),
1189 .min_keysize = DES_KEY_SIZE,
1190 .max_keysize = DES_KEY_SIZE,
1191 .setkey = n2_des_setkey,
1192 .encrypt = n2_encrypt_chaining,
1193 .decrypt = n2_decrypt_chaining,
1197 /* 3DES: ECB CBC and CFB are supported */
1198 { .name = "ecb(des3_ede)",
1199 .drv_name = "ecb-3des",
1200 .block_size = DES_BLOCK_SIZE,
1201 .enc_type = (ENC_TYPE_ALG_3DES |
1202 ENC_TYPE_CHAINING_ECB),
1204 .min_keysize = 3 * DES_KEY_SIZE,
1205 .max_keysize = 3 * DES_KEY_SIZE,
1206 .setkey = n2_3des_setkey,
1207 .encrypt = n2_encrypt_ecb,
1208 .decrypt = n2_decrypt_ecb,
1211 { .name = "cbc(des3_ede)",
1212 .drv_name = "cbc-3des",
1213 .block_size = DES_BLOCK_SIZE,
1214 .enc_type = (ENC_TYPE_ALG_3DES |
1215 ENC_TYPE_CHAINING_CBC),
1217 .ivsize = DES_BLOCK_SIZE,
1218 .min_keysize = 3 * DES_KEY_SIZE,
1219 .max_keysize = 3 * DES_KEY_SIZE,
1220 .setkey = n2_3des_setkey,
1221 .encrypt = n2_encrypt_chaining,
1222 .decrypt = n2_decrypt_chaining,
1225 { .name = "cfb(des3_ede)",
1226 .drv_name = "cfb-3des",
1227 .block_size = DES_BLOCK_SIZE,
1228 .enc_type = (ENC_TYPE_ALG_3DES |
1229 ENC_TYPE_CHAINING_CFB),
1231 .min_keysize = 3 * DES_KEY_SIZE,
1232 .max_keysize = 3 * DES_KEY_SIZE,
1233 .setkey = n2_3des_setkey,
1234 .encrypt = n2_encrypt_chaining,
1235 .decrypt = n2_decrypt_chaining,
1238 /* AES: ECB CBC and CTR are supported */
1239 { .name = "ecb(aes)",
1240 .drv_name = "ecb-aes",
1241 .block_size = AES_BLOCK_SIZE,
1242 .enc_type = (ENC_TYPE_ALG_AES128 |
1243 ENC_TYPE_CHAINING_ECB),
1245 .min_keysize = AES_MIN_KEY_SIZE,
1246 .max_keysize = AES_MAX_KEY_SIZE,
1247 .setkey = n2_aes_setkey,
1248 .encrypt = n2_encrypt_ecb,
1249 .decrypt = n2_decrypt_ecb,
1252 { .name = "cbc(aes)",
1253 .drv_name = "cbc-aes",
1254 .block_size = AES_BLOCK_SIZE,
1255 .enc_type = (ENC_TYPE_ALG_AES128 |
1256 ENC_TYPE_CHAINING_CBC),
1258 .ivsize = AES_BLOCK_SIZE,
1259 .min_keysize = AES_MIN_KEY_SIZE,
1260 .max_keysize = AES_MAX_KEY_SIZE,
1261 .setkey = n2_aes_setkey,
1262 .encrypt = n2_encrypt_chaining,
1263 .decrypt = n2_decrypt_chaining,
1266 { .name = "ctr(aes)",
1267 .drv_name = "ctr-aes",
1268 .block_size = AES_BLOCK_SIZE,
1269 .enc_type = (ENC_TYPE_ALG_AES128 |
1270 ENC_TYPE_CHAINING_COUNTER),
1272 .ivsize = AES_BLOCK_SIZE,
1273 .min_keysize = AES_MIN_KEY_SIZE,
1274 .max_keysize = AES_MAX_KEY_SIZE,
1275 .setkey = n2_aes_setkey,
1276 .encrypt = n2_encrypt_chaining,
1277 .decrypt = n2_encrypt_chaining,
1282 #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1284 static LIST_HEAD(cipher_algs);
1286 struct n2_hash_tmpl {
1288 const u8 *hash_zero;
1289 const u32 *hash_init;
1297 static const u32 md5_init[MD5_HASH_WORDS] = {
1298 cpu_to_le32(MD5_H0),
1299 cpu_to_le32(MD5_H1),
1300 cpu_to_le32(MD5_H2),
1301 cpu_to_le32(MD5_H3),
1303 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
1304 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1306 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
1307 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1308 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1310 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1311 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1312 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1315 static const struct n2_hash_tmpl hash_tmpls[] = {
1317 .hash_zero = md5_zero_message_hash,
1318 .hash_init = md5_init,
1319 .auth_type = AUTH_TYPE_MD5,
1320 .hmac_type = AUTH_TYPE_HMAC_MD5,
1321 .hw_op_hashsz = MD5_DIGEST_SIZE,
1322 .digest_size = MD5_DIGEST_SIZE,
1323 .block_size = MD5_HMAC_BLOCK_SIZE },
1325 .hash_zero = sha1_zero_message_hash,
1326 .hash_init = sha1_init,
1327 .auth_type = AUTH_TYPE_SHA1,
1328 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1329 .hw_op_hashsz = SHA1_DIGEST_SIZE,
1330 .digest_size = SHA1_DIGEST_SIZE,
1331 .block_size = SHA1_BLOCK_SIZE },
1333 .hash_zero = sha256_zero_message_hash,
1334 .hash_init = sha256_init,
1335 .auth_type = AUTH_TYPE_SHA256,
1336 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1337 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1338 .digest_size = SHA256_DIGEST_SIZE,
1339 .block_size = SHA256_BLOCK_SIZE },
1341 .hash_zero = sha224_zero_message_hash,
1342 .hash_init = sha224_init,
1343 .auth_type = AUTH_TYPE_SHA256,
1344 .hmac_type = AUTH_TYPE_RESERVED,
1345 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1346 .digest_size = SHA224_DIGEST_SIZE,
1347 .block_size = SHA224_BLOCK_SIZE },
1349 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1351 static LIST_HEAD(ahash_algs);
1352 static LIST_HEAD(hmac_algs);
1354 static int algs_registered;
1356 static void __n2_unregister_algs(void)
1358 struct n2_cipher_alg *cipher, *cipher_tmp;
1359 struct n2_ahash_alg *alg, *alg_tmp;
1360 struct n2_hmac_alg *hmac, *hmac_tmp;
1362 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1363 crypto_unregister_alg(&cipher->alg);
1364 list_del(&cipher->entry);
1367 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1368 crypto_unregister_ahash(&hmac->derived.alg);
1369 list_del(&hmac->derived.entry);
1372 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1373 crypto_unregister_ahash(&alg->alg);
1374 list_del(&alg->entry);
1379 static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1381 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1385 static int __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1387 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1388 struct crypto_alg *alg;
1396 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1397 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1398 alg->cra_priority = N2_CRA_PRIORITY;
1399 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1400 CRYPTO_ALG_KERN_DRIVER_ONLY | CRYPTO_ALG_ASYNC;
1401 alg->cra_blocksize = tmpl->block_size;
1402 p->enc_type = tmpl->enc_type;
1403 alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1404 alg->cra_type = &crypto_ablkcipher_type;
1405 alg->cra_u.ablkcipher = tmpl->ablkcipher;
1406 alg->cra_init = n2_cipher_cra_init;
1407 alg->cra_module = THIS_MODULE;
1409 list_add(&p->entry, &cipher_algs);
1410 err = crypto_register_alg(alg);
1412 pr_err("%s alg registration failed\n", alg->cra_name);
1413 list_del(&p->entry);
1416 pr_info("%s alg registered\n", alg->cra_name);
1421 static int __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1423 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1424 struct ahash_alg *ahash;
1425 struct crypto_alg *base;
1431 p->child_alg = n2ahash->alg.halg.base.cra_name;
1432 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1433 INIT_LIST_HEAD(&p->derived.entry);
1435 ahash = &p->derived.alg;
1436 ahash->digest = n2_hmac_async_digest;
1437 ahash->setkey = n2_hmac_async_setkey;
1439 base = &ahash->halg.base;
1440 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1441 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1443 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1444 base->cra_init = n2_hmac_cra_init;
1445 base->cra_exit = n2_hmac_cra_exit;
1447 list_add(&p->derived.entry, &hmac_algs);
1448 err = crypto_register_ahash(ahash);
1450 pr_err("%s alg registration failed\n", base->cra_name);
1451 list_del(&p->derived.entry);
1454 pr_info("%s alg registered\n", base->cra_name);
1459 static int __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1461 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1462 struct hash_alg_common *halg;
1463 struct crypto_alg *base;
1464 struct ahash_alg *ahash;
1470 p->hash_zero = tmpl->hash_zero;
1471 p->hash_init = tmpl->hash_init;
1472 p->auth_type = tmpl->auth_type;
1473 p->hmac_type = tmpl->hmac_type;
1474 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1475 p->digest_size = tmpl->digest_size;
1478 ahash->init = n2_hash_async_init;
1479 ahash->update = n2_hash_async_update;
1480 ahash->final = n2_hash_async_final;
1481 ahash->finup = n2_hash_async_finup;
1482 ahash->digest = n2_hash_async_digest;
1483 ahash->export = n2_hash_async_noexport;
1484 ahash->import = n2_hash_async_noimport;
1486 halg = &ahash->halg;
1487 halg->digestsize = tmpl->digest_size;
1490 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1491 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1492 base->cra_priority = N2_CRA_PRIORITY;
1493 base->cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1494 CRYPTO_ALG_NEED_FALLBACK;
1495 base->cra_blocksize = tmpl->block_size;
1496 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1497 base->cra_module = THIS_MODULE;
1498 base->cra_init = n2_hash_cra_init;
1499 base->cra_exit = n2_hash_cra_exit;
1501 list_add(&p->entry, &ahash_algs);
1502 err = crypto_register_ahash(ahash);
1504 pr_err("%s alg registration failed\n", base->cra_name);
1505 list_del(&p->entry);
1508 pr_info("%s alg registered\n", base->cra_name);
1510 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1511 err = __n2_register_one_hmac(p);
1515 static int n2_register_algs(void)
1519 mutex_lock(&spu_lock);
1520 if (algs_registered++)
1523 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1524 err = __n2_register_one_ahash(&hash_tmpls[i]);
1526 __n2_unregister_algs();
1530 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1531 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1533 __n2_unregister_algs();
1539 mutex_unlock(&spu_lock);
1543 static void n2_unregister_algs(void)
1545 mutex_lock(&spu_lock);
1546 if (!--algs_registered)
1547 __n2_unregister_algs();
1548 mutex_unlock(&spu_lock);
1551 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1552 * a devino. This isn't very useful to us because all of the
1553 * interrupts listed in the device_node have been translated to
1554 * Linux virtual IRQ cookie numbers.
1556 * So we have to back-translate, going through the 'intr' and 'ino'
1557 * property tables of the n2cp MDESC node, matching it with the OF
1558 * 'interrupts' property entries, in order to to figure out which
1559 * devino goes to which already-translated IRQ.
1561 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1562 unsigned long dev_ino)
1564 const unsigned int *dev_intrs;
1568 for (i = 0; i < ip->num_intrs; i++) {
1569 if (ip->ino_table[i].ino == dev_ino)
1572 if (i == ip->num_intrs)
1575 intr = ip->ino_table[i].intr;
1577 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1581 for (i = 0; i < dev->archdata.num_irqs; i++) {
1582 if (dev_intrs[i] == intr)
1589 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1590 const char *irq_name, struct spu_queue *p,
1591 irq_handler_t handler)
1596 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1600 index = find_devino_index(dev, ip, p->devino);
1604 p->irq = dev->archdata.irqs[index];
1606 sprintf(p->irq_name, "%s-%d", irq_name, index);
1608 return request_irq(p->irq, handler, 0, p->irq_name, p);
1611 static struct kmem_cache *queue_cache[2];
1613 static void *new_queue(unsigned long q_type)
1615 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1618 static void free_queue(void *p, unsigned long q_type)
1620 kmem_cache_free(queue_cache[q_type - 1], p);
1623 static int queue_cache_init(void)
1625 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1626 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1627 kmem_cache_create("mau_queue",
1630 MAU_ENTRY_SIZE, 0, NULL);
1631 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1634 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1635 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1636 kmem_cache_create("cwq_queue",
1639 CWQ_ENTRY_SIZE, 0, NULL);
1640 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1641 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1642 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1648 static void queue_cache_destroy(void)
1650 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1651 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1652 queue_cache[HV_NCS_QTYPE_MAU - 1] = NULL;
1653 queue_cache[HV_NCS_QTYPE_CWQ - 1] = NULL;
1656 static long spu_queue_register_workfn(void *arg)
1658 struct spu_qreg *qr = arg;
1659 struct spu_queue *p = qr->queue;
1660 unsigned long q_type = qr->type;
1661 unsigned long hv_ret;
1663 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1664 CWQ_NUM_ENTRIES, &p->qhandle);
1666 sun4v_ncs_sethead_marker(p->qhandle, 0);
1668 return hv_ret ? -EINVAL : 0;
1671 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1673 int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
1674 struct spu_qreg qr = { .queue = p, .type = q_type };
1676 return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
1679 static int spu_queue_setup(struct spu_queue *p)
1683 p->q = new_queue(p->q_type);
1687 err = spu_queue_register(p, p->q_type);
1689 free_queue(p->q, p->q_type);
1696 static void spu_queue_destroy(struct spu_queue *p)
1698 unsigned long hv_ret;
1703 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1706 free_queue(p->q, p->q_type);
1709 static void spu_list_destroy(struct list_head *list)
1711 struct spu_queue *p, *n;
1713 list_for_each_entry_safe(p, n, list, list) {
1716 for (i = 0; i < NR_CPUS; i++) {
1717 if (cpu_to_cwq[i] == p)
1718 cpu_to_cwq[i] = NULL;
1722 free_irq(p->irq, p);
1725 spu_queue_destroy(p);
1731 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1732 * gathering cpu membership information.
1734 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1735 struct platform_device *dev,
1736 u64 node, struct spu_queue *p,
1737 struct spu_queue **table)
1741 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1742 u64 tgt = mdesc_arc_target(mdesc, arc);
1743 const char *name = mdesc_node_name(mdesc, tgt);
1746 if (strcmp(name, "cpu"))
1748 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1749 if (table[*id] != NULL) {
1750 dev_err(&dev->dev, "%pOF: SPU cpu slot already set.\n",
1754 cpumask_set_cpu(*id, &p->sharing);
1760 /* Process an 'exec-unit' MDESC node of type 'cwq'. */
1761 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1762 struct platform_device *dev, struct mdesc_handle *mdesc,
1763 u64 node, const char *iname, unsigned long q_type,
1764 irq_handler_t handler, struct spu_queue **table)
1766 struct spu_queue *p;
1769 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1771 dev_err(&dev->dev, "%pOF: Could not allocate SPU queue.\n",
1776 cpumask_clear(&p->sharing);
1777 spin_lock_init(&p->lock);
1779 INIT_LIST_HEAD(&p->jobs);
1780 list_add(&p->list, list);
1782 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1786 err = spu_queue_setup(p);
1790 return spu_map_ino(dev, ip, iname, p, handler);
1793 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1794 struct spu_mdesc_info *ip, struct list_head *list,
1795 const char *exec_name, unsigned long q_type,
1796 irq_handler_t handler, struct spu_queue **table)
1801 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1804 type = mdesc_get_property(mdesc, node, "type", NULL);
1805 if (!type || strcmp(type, exec_name))
1808 err = handle_exec_unit(ip, list, dev, mdesc, node,
1809 exec_name, q_type, handler, table);
1811 spu_list_destroy(list);
1819 static int get_irq_props(struct mdesc_handle *mdesc, u64 node,
1820 struct spu_mdesc_info *ip)
1826 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1828 printk("NO 'ino'\n");
1832 ip->num_intrs = ino_len / sizeof(u64);
1833 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1839 for (i = 0; i < ip->num_intrs; i++) {
1840 struct ino_blob *b = &ip->ino_table[i];
1848 static int grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1849 struct platform_device *dev,
1850 struct spu_mdesc_info *ip,
1851 const char *node_name)
1853 const unsigned int *reg;
1856 reg = of_get_property(dev->dev.of_node, "reg", NULL);
1860 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1864 name = mdesc_get_property(mdesc, node, "name", NULL);
1865 if (!name || strcmp(name, node_name))
1867 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1868 if (!chdl || (*chdl != *reg))
1870 ip->cfg_handle = *chdl;
1871 return get_irq_props(mdesc, node, ip);
1877 static unsigned long n2_spu_hvapi_major;
1878 static unsigned long n2_spu_hvapi_minor;
1880 static int n2_spu_hvapi_register(void)
1884 n2_spu_hvapi_major = 2;
1885 n2_spu_hvapi_minor = 0;
1887 err = sun4v_hvapi_register(HV_GRP_NCS,
1889 &n2_spu_hvapi_minor);
1892 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1894 n2_spu_hvapi_minor);
1899 static void n2_spu_hvapi_unregister(void)
1901 sun4v_hvapi_unregister(HV_GRP_NCS);
1904 static int global_ref;
1906 static int grab_global_resources(void)
1910 mutex_lock(&spu_lock);
1915 err = n2_spu_hvapi_register();
1919 err = queue_cache_init();
1921 goto out_hvapi_release;
1924 cpu_to_cwq = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1927 goto out_queue_cache_destroy;
1929 cpu_to_mau = kcalloc(NR_CPUS, sizeof(struct spu_queue *),
1932 goto out_free_cwq_table;
1939 mutex_unlock(&spu_lock);
1946 out_queue_cache_destroy:
1947 queue_cache_destroy();
1950 n2_spu_hvapi_unregister();
1954 static void release_global_resources(void)
1956 mutex_lock(&spu_lock);
1957 if (!--global_ref) {
1964 queue_cache_destroy();
1965 n2_spu_hvapi_unregister();
1967 mutex_unlock(&spu_lock);
1970 static struct n2_crypto *alloc_n2cp(void)
1972 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1975 INIT_LIST_HEAD(&np->cwq_list);
1980 static void free_n2cp(struct n2_crypto *np)
1982 kfree(np->cwq_info.ino_table);
1983 np->cwq_info.ino_table = NULL;
1988 static void n2_spu_driver_version(void)
1990 static int n2_spu_version_printed;
1992 if (n2_spu_version_printed++ == 0)
1993 pr_info("%s", version);
1996 static int n2_crypto_probe(struct platform_device *dev)
1998 struct mdesc_handle *mdesc;
1999 struct n2_crypto *np;
2002 n2_spu_driver_version();
2004 pr_info("Found N2CP at %pOF\n", dev->dev.of_node);
2008 dev_err(&dev->dev, "%pOF: Unable to allocate n2cp.\n",
2013 err = grab_global_resources();
2015 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2020 mdesc = mdesc_grab();
2023 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2026 goto out_free_global;
2028 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2030 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2032 mdesc_release(mdesc);
2033 goto out_free_global;
2036 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2037 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2039 mdesc_release(mdesc);
2042 dev_err(&dev->dev, "%pOF: CWQ MDESC scan failed.\n",
2044 goto out_free_global;
2047 err = n2_register_algs();
2049 dev_err(&dev->dev, "%pOF: Unable to register algorithms.\n",
2051 goto out_free_spu_list;
2054 dev_set_drvdata(&dev->dev, np);
2059 spu_list_destroy(&np->cwq_list);
2062 release_global_resources();
2070 static int n2_crypto_remove(struct platform_device *dev)
2072 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2074 n2_unregister_algs();
2076 spu_list_destroy(&np->cwq_list);
2078 release_global_resources();
2085 static struct n2_mau *alloc_ncp(void)
2087 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2090 INIT_LIST_HEAD(&mp->mau_list);
2095 static void free_ncp(struct n2_mau *mp)
2097 kfree(mp->mau_info.ino_table);
2098 mp->mau_info.ino_table = NULL;
2103 static int n2_mau_probe(struct platform_device *dev)
2105 struct mdesc_handle *mdesc;
2109 n2_spu_driver_version();
2111 pr_info("Found NCP at %pOF\n", dev->dev.of_node);
2115 dev_err(&dev->dev, "%pOF: Unable to allocate ncp.\n",
2120 err = grab_global_resources();
2122 dev_err(&dev->dev, "%pOF: Unable to grab global resources.\n",
2127 mdesc = mdesc_grab();
2130 dev_err(&dev->dev, "%pOF: Unable to grab MDESC.\n",
2133 goto out_free_global;
2136 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2138 dev_err(&dev->dev, "%pOF: Unable to grab IRQ props.\n",
2140 mdesc_release(mdesc);
2141 goto out_free_global;
2144 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2145 "mau", HV_NCS_QTYPE_MAU, mau_intr,
2147 mdesc_release(mdesc);
2150 dev_err(&dev->dev, "%pOF: MAU MDESC scan failed.\n",
2152 goto out_free_global;
2155 dev_set_drvdata(&dev->dev, mp);
2160 release_global_resources();
2168 static int n2_mau_remove(struct platform_device *dev)
2170 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2172 spu_list_destroy(&mp->mau_list);
2174 release_global_resources();
2181 static const struct of_device_id n2_crypto_match[] = {
2184 .compatible = "SUNW,n2-cwq",
2188 .compatible = "SUNW,vf-cwq",
2192 .compatible = "SUNW,kt-cwq",
2197 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2199 static struct platform_driver n2_crypto_driver = {
2202 .of_match_table = n2_crypto_match,
2204 .probe = n2_crypto_probe,
2205 .remove = n2_crypto_remove,
2208 static const struct of_device_id n2_mau_match[] = {
2211 .compatible = "SUNW,n2-mau",
2215 .compatible = "SUNW,vf-mau",
2219 .compatible = "SUNW,kt-mau",
2224 MODULE_DEVICE_TABLE(of, n2_mau_match);
2226 static struct platform_driver n2_mau_driver = {
2229 .of_match_table = n2_mau_match,
2231 .probe = n2_mau_probe,
2232 .remove = n2_mau_remove,
2235 static struct platform_driver * const drivers[] = {
2240 static int __init n2_init(void)
2242 return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
2245 static void __exit n2_exit(void)
2247 platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
2250 module_init(n2_init);
2251 module_exit(n2_exit);