3 * sep_crypto.c - Crypto interface structures
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
35 #include <linux/module.h>
36 #include <linux/miscdevice.h>
38 #include <linux/cdev.h>
39 #include <linux/kdev_t.h>
40 #include <linux/mutex.h>
41 #include <linux/sched.h>
43 #include <linux/poll.h>
44 #include <linux/wait.h>
45 #include <linux/pci.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/err.h>
48 #include <linux/device.h>
49 #include <linux/errno.h>
50 #include <linux/interrupt.h>
51 #include <linux/kernel.h>
52 #include <linux/clk.h>
53 #include <linux/irq.h>
55 #include <linux/platform_device.h>
56 #include <linux/list.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/delay.h>
59 #include <linux/jiffies.h>
60 #include <linux/workqueue.h>
61 #include <linux/crypto.h>
62 #include <crypto/internal/hash.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/md5.h>
66 #include <crypto/aes.h>
67 #include <crypto/des.h>
68 #include <crypto/hash.h>
69 #include "sep_driver_hw_defs.h"
70 #include "sep_driver_config.h"
71 #include "sep_driver_api.h"
73 #include "sep_crypto.h"
75 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
77 /* Globals for queuing */
78 static spinlock_t queue_lock;
79 static struct crypto_queue sep_queue;
81 /* Declare of dequeuer */
82 static void sep_dequeuer(void *data);
87 * @work: pointer to work_struct
88 * This is what is called by the queue; it is generic so that it
89 * can be used by any type of operation as each different callback
90 * function can use the data parameter in its own way
92 static void sep_do_callback(struct work_struct *work)
94 struct sep_work_struct *sep_work = container_of(work,
95 struct sep_work_struct, work);
97 if (sep_work != NULL) {
98 (sep_work->callback)(sep_work->data);
101 pr_debug("sep crypto: do callback - NULL container\n");
107 * @work_queue: pointer to struct_workqueue
108 * @funct: pointer to function to execute
109 * @data: pointer to data; function will know
111 * This is a generic API to submit something to
112 * the queue. The callback function will depend
113 * on what operation is to be done
115 static int sep_submit_work(struct workqueue_struct *work_queue,
116 void (*funct)(void *),
119 struct sep_work_struct *sep_work;
122 sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
124 if (sep_work == NULL) {
125 pr_debug("sep crypto: cant allocate work structure\n");
129 sep_work->callback = funct;
130 sep_work->data = data;
131 INIT_WORK(&sep_work->work, sep_do_callback);
132 result = queue_work(work_queue, &sep_work->work);
134 pr_debug("sep_crypto: queue_work failed\n");
142 * @sep: pointer to struct sep_device
143 * @size: total size of area
144 * @block_size: minimum size of chunks
145 * each page is minimum or modulo this size
146 * @returns: pointer to struct scatterlist for new
149 static struct scatterlist *sep_alloc_sg_buf(
150 struct sep_device *sep,
158 size_t real_page_size;
160 struct scatterlist *sg, *sg_temp;
165 dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
169 real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
171 * The size of each page must be modulo of the operation
172 * block size; increment by the modified page size until
173 * the total size is reached, then you have the number of
176 while (current_size < size) {
177 current_size += real_page_size;
181 sg = kmalloc_array(nbr_pages, sizeof(struct scatterlist), GFP_ATOMIC);
185 sg_init_table(sg, nbr_pages);
189 for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
190 buf = (void *)get_zeroed_page(GFP_ATOMIC);
192 dev_warn(&sep->pdev->dev,
193 "Cannot allocate page for new buffer\n");
198 sg_set_buf(sg_temp, buf, real_page_size);
199 if ((size - current_size) > real_page_size) {
200 sg_temp->length = real_page_size;
201 current_size += real_page_size;
203 sg_temp->length = (size - current_size);
206 sg_temp = sg_next(sg);
213 * @sg: pointer to struct scatterlist; points to area to free
215 static void sep_free_sg_buf(struct scatterlist *sg)
217 struct scatterlist *sg_temp = sg;
219 free_page((unsigned long)sg_virt(sg_temp));
220 sg_temp = sg_next(sg_temp);
227 * @sep: pointer to struct sep_device
228 * @sg_src: pointer to struct scatterlist for source
229 * @sg_dst: pointer to struct scatterlist for destination
230 * @size: size (in bytes) of data to copy
232 * Copy data from one scatterlist to another; both must
235 static void sep_copy_sg(
236 struct sep_device *sep,
237 struct scatterlist *sg_src,
238 struct scatterlist *sg_dst,
242 u32 in_offset, out_offset;
245 struct scatterlist *sg_src_tmp = sg_src;
246 struct scatterlist *sg_dst_tmp = sg_dst;
250 dev_dbg(&sep->pdev->dev, "sep copy sg\n");
252 if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
255 dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
257 while (count < size) {
258 if ((sg_src_tmp->length - in_offset) >
259 (sg_dst_tmp->length - out_offset))
260 seg_size = sg_dst_tmp->length - out_offset;
262 seg_size = sg_src_tmp->length - in_offset;
264 if (seg_size > (size - count))
265 seg_size = (size = count);
267 memcpy(sg_virt(sg_dst_tmp) + out_offset,
268 sg_virt(sg_src_tmp) + in_offset,
271 in_offset += seg_size;
272 out_offset += seg_size;
275 if (in_offset >= sg_src_tmp->length) {
276 sg_src_tmp = sg_next(sg_src_tmp);
280 if (out_offset >= sg_dst_tmp->length) {
281 sg_dst_tmp = sg_next(sg_dst_tmp);
288 * sep_oddball_pages -
289 * @sep: pointer to struct sep_device
290 * @sg: pointer to struct scatterlist - buffer to check
291 * @size: total data size
292 * @blocksize: minimum block size; must be multiples of this size
293 * @to_copy: 1 means do copy, 0 means do not copy
294 * @new_sg: pointer to location to put pointer to new sg area
295 * @returns: 1 if new scatterlist is needed; 0 if not needed;
296 * error value if operation failed
298 * The SEP device requires all pages to be multiples of the
299 * minimum block size appropriate for the operation
300 * This function check all pages; if any are oddball sizes
301 * (not multiple of block sizes), it creates a new scatterlist.
302 * If the to_copy parameter is set to 1, then a scatter list
303 * copy is performed. The pointer to the new scatterlist is
304 * put into the address supplied by the new_sg parameter; if
305 * no new scatterlist is needed, then a NULL is put into
306 * the location at new_sg.
309 static int sep_oddball_pages(
310 struct sep_device *sep,
311 struct scatterlist *sg,
314 struct scatterlist **new_sg,
317 struct scatterlist *sg_temp;
319 u32 nbr_pages, page_count;
321 dev_dbg(&sep->pdev->dev, "sep oddball\n");
322 if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
325 dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
333 sg_temp = sg_next(sg_temp);
337 while ((sg_temp) && (flag == 0)) {
339 if (sg_temp->length % block_size)
342 sg_temp = sg_next(sg_temp);
345 /* Do not process if last (or only) page is oddball */
346 if (nbr_pages == page_count)
350 dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
351 *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
352 if (*new_sg == NULL) {
353 dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
358 sep_copy_sg(sep, sg, *new_sg, data_size);
367 * sep_copy_offset_sg -
368 * @sep: pointer to struct sep_device;
369 * @sg: pointer to struct scatterlist
370 * @offset: offset into scatterlist memory
371 * @dst: place to put data
372 * @len: length of data
373 * @returns: number of bytes copies
375 * This copies data from scatterlist buffer
376 * offset from beginning - it is needed for
377 * handling tail data in hash
379 static size_t sep_copy_offset_sg(
380 struct sep_device *sep,
381 struct scatterlist *sg,
388 size_t offset_within_page;
389 size_t length_within_page;
390 size_t length_remaining;
391 size_t current_offset;
393 /* Find which page is beginning of segment */
395 page_end = sg->length;
396 while ((sg) && (offset > page_end)) {
397 page_start += sg->length;
400 page_end += sg->length;
406 offset_within_page = offset - page_start;
407 if ((sg->length - offset_within_page) >= len) {
408 /* All within this page */
409 memcpy(dst, sg_virt(sg) + offset_within_page, len);
412 /* Scattered multiple pages */
414 length_remaining = len;
415 while ((sg) && (current_offset < len)) {
416 length_within_page = sg->length - offset_within_page;
417 if (length_within_page >= length_remaining) {
418 memcpy(dst+current_offset,
419 sg_virt(sg) + offset_within_page,
421 length_remaining = 0;
422 current_offset = len;
424 memcpy(dst+current_offset,
425 sg_virt(sg) + offset_within_page,
427 length_remaining -= length_within_page;
428 current_offset += length_within_page;
429 offset_within_page = 0;
442 * @src_ptr: source pointer
443 * @dst_ptr: destination pointer
444 * @nbytes: number of bytes
445 * @returns: 0 for success; -1 for failure
446 * We cannot have any partial overlap. Total overlap
447 * where src is the same as dst is okay
449 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
451 /* Check for partial overlap */
452 if (src_ptr != dst_ptr) {
453 if (src_ptr < dst_ptr) {
454 if ((src_ptr + nbytes) > dst_ptr)
457 if ((dst_ptr + nbytes) > src_ptr)
465 /* Debug - prints only if DEBUG is defined */
466 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
470 struct sep_aes_internal_context *aes_internal;
471 struct sep_des_internal_context *des_internal;
474 struct this_task_ctx *ta_ctx;
475 struct crypto_ablkcipher *tfm;
476 struct sep_system_ctx *sctx;
478 ta_ctx = ablkcipher_request_ctx(req);
479 tfm = crypto_ablkcipher_reqtfm(req);
480 sctx = crypto_ablkcipher_ctx(tfm);
482 dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
483 if ((ta_ctx->current_request == DES_CBC) &&
484 (ta_ctx->des_opmode == SEP_DES_CBC)) {
486 des_internal = (struct sep_des_internal_context *)
487 sctx->des_private_ctx.ctx_buf;
489 dev_dbg(&ta_ctx->sep_used->pdev->dev,
490 "sep - vendor iv for DES\n");
491 cptr = (unsigned char *)des_internal->iv_context;
492 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
493 dev_dbg(&ta_ctx->sep_used->pdev->dev,
494 "%02x\n", *(cptr + ct1));
497 dev_dbg(&ta_ctx->sep_used->pdev->dev,
498 "sep - walk from kernel crypto iv for DES\n");
499 cptr = (unsigned char *)ta_ctx->walk.iv;
500 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
501 dev_dbg(&ta_ctx->sep_used->pdev->dev,
502 "%02x\n", *(cptr + ct1));
503 } else if ((ta_ctx->current_request == AES_CBC) &&
504 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
506 aes_internal = (struct sep_aes_internal_context *)
507 sctx->aes_private_ctx.cbuff;
509 dev_dbg(&ta_ctx->sep_used->pdev->dev,
510 "sep - vendor iv for AES\n");
511 cptr = (unsigned char *)aes_internal->aes_ctx_iv;
512 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
513 dev_dbg(&ta_ctx->sep_used->pdev->dev,
514 "%02x\n", *(cptr + ct1));
517 dev_dbg(&ta_ctx->sep_used->pdev->dev,
518 "sep - walk from kernel crypto iv for AES\n");
519 cptr = (unsigned char *)ta_ctx->walk.iv;
520 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
521 dev_dbg(&ta_ctx->sep_used->pdev->dev,
522 "%02x\n", *(cptr + ct1));
527 * RFC2451: Weak key check
528 * Returns: 1 (weak), 0 (not weak)
530 static int sep_weak_key(const u8 *key, unsigned int keylen)
532 static const u8 parity[] = {
533 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
534 0, 8, 8, 0, 8, 0, 0, 8, 8,
536 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
537 8, 0, 0, 8, 0, 8, 8, 0, 0,
539 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
540 8, 0, 0, 8, 0, 8, 8, 0, 0,
542 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
543 0, 8, 8, 0, 8, 0, 0, 8, 8,
545 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
546 8, 0, 0, 8, 0, 8, 8, 0, 0,
548 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
549 0, 8, 8, 0, 8, 0, 0, 8, 8,
551 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
552 0, 8, 8, 0, 8, 0, 0, 8, 8,
554 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
555 8, 5, 0, 8, 0, 8, 8, 0, 0,
561 n = parity[key[0]]; n <<= 4;
562 n |= parity[key[1]]; n <<= 4;
563 n |= parity[key[2]]; n <<= 4;
564 n |= parity[key[3]]; n <<= 4;
565 n |= parity[key[4]]; n <<= 4;
566 n |= parity[key[5]]; n <<= 4;
567 n |= parity[key[6]]; n <<= 4;
571 /* 1 in 10^10 keys passes this test */
572 if (!((n - (w >> 3)) & w)) {
573 if (n < 0x41415151) {
574 if (n < 0x31312121) {
575 if (n < 0x14141515) {
576 /* 01 01 01 01 01 01 01 01 */
579 /* 01 1F 01 1F 01 0E 01 0E */
583 /* 01 E0 01 E0 01 F1 01 F1 */
586 /* 01 FE 01 FE 01 FE 01 FE */
591 if (n < 0x34342525) {
592 /* 1F 01 1F 01 0E 01 0E 01 */
595 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
599 /* 1F E0 1F E0 0E F1 0E F1 */
602 /* 1F FE 1F FE 0E FE 0E FE */
608 if (n < 0x61616161) {
609 if (n < 0x44445555) {
610 /* E0 01 E0 01 F1 01 F1 01 */
613 /* E0 1F E0 1F F1 0E F1 0E */
617 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
620 /* E0 FE E0 FE F1 FE F1 FE */
625 if (n < 0x64646565) {
626 /* FE 01 FE 01 FE 01 FE 01 */
629 /* FE 1F FE 1F FE 0E FE 0E */
633 /* FE E0 FE E0 FE F1 FE F1 */
636 /* FE FE FE FE FE FE FE FE */
650 static u32 sep_sg_nents(struct scatterlist *sg)
664 * @ta_ctx: pointer to struct this_task_ctx
665 * @returns: offset to place for the next word in the message
666 * Set up pointer in message pool for new message
668 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
672 ta_ctx->msg_len_words = 2;
673 ta_ctx->msgptr = ta_ctx->msg;
674 memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
675 ta_ctx->msgptr += sizeof(u32) * 2;
676 word_ptr = (u32 *)ta_ctx->msgptr;
677 *word_ptr = SEP_START_MSG_TOKEN;
678 return sizeof(u32) * 2;
683 * @ta_ctx: pointer to struct this_task_ctx
684 * @messages_offset: current message offset
685 * Returns: 0 for success; <0 otherwise
686 * End message; set length and CRC; and
687 * send interrupt to the SEP
689 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
692 /* Msg size goes into msg after token */
693 ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
694 word_ptr = (u32 *)ta_ctx->msgptr;
696 *word_ptr = ta_ctx->msg_len_words;
698 /* CRC (currently 0) goes at end of msg */
699 word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
704 * sep_start_inbound_msg -
705 * @ta_ctx: pointer to struct this_task_ctx
706 * @msg_offset: offset to place for the next word in the message
707 * @returns: 0 for success; error value for failure
708 * Set up pointer in message pool for inbound message
710 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
716 *msg_offset = sizeof(u32) * 2;
717 word_ptr = (u32 *)ta_ctx->msgptr;
719 ta_ctx->msg_len_words = *(word_ptr + 1);
721 if (token != SEP_START_MSG_TOKEN) {
722 error = SEP_INVALID_START;
733 * @ta_ctx: pointer to struct this_task_ctx
734 * @in_addr: pointer to start of parameter
735 * @size: size of parameter to copy (in bytes)
736 * @max_size: size to move up offset; SEP mesg is in word sizes
737 * @msg_offset: pointer to current offset (is updated)
738 * @byte_array: flag ti indicate whether endian must be changed
739 * Copies data into the message area from caller
741 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
742 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
747 void_ptr = ta_ctx->msgptr + *msg_offset;
748 word_ptr = (u32 *)void_ptr;
749 memcpy(void_ptr, in_addr, size);
750 *msg_offset += max_size;
752 /* Do we need to manipulate endian? */
756 for (i = 0; i < ((size + 3) / 4); i += 1)
757 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
763 * @ta_ctx: pointer to struct this_task_ctx
764 * @msg_offset: pointer to current offset (is updated)
765 * @op_code: op code to put into message
766 * Puts op code into message and updates offset
768 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
773 *msg_offset = sep_start_msg(ta_ctx);
774 word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
776 *msg_offset += sizeof(u32);
783 * @ta_ctx: pointer to struct this_task_ctx
784 * @in_addr: pointer to start of parameter
785 * @size: size of parameter to copy (in bytes)
786 * @max_size: size to move up offset; SEP mesg is in word sizes
787 * @msg_offset: pointer to current offset (is updated)
788 * @byte_array: flag ti indicate whether endian must be changed
789 * Copies data out of the message area to caller
791 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
792 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
797 void_ptr = ta_ctx->msgptr + *msg_offset;
798 word_ptr = (u32 *)void_ptr;
800 /* Do we need to manipulate endian? */
804 for (i = 0; i < ((size + 3) / 4); i += 1)
805 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
808 memcpy(in_addr, void_ptr, size);
809 *msg_offset += max_size;
814 * @ta_ctx: pointer to struct this_task_ctx
815 * @op_code: expected op_code
816 * @msg_offset: pointer to current offset (is updated)
817 * @returns: 0 for success; error for failure
819 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
825 struct sep_device *sep = ta_ctx->sep_used;
827 dev_dbg(&sep->pdev->dev, "dumping return message\n");
828 error = sep_start_inbound_msg(ta_ctx, msg_offset);
830 dev_warn(&sep->pdev->dev,
831 "sep_start_inbound_msg error\n");
835 sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
838 if (in_ary[0] != op_code) {
839 dev_warn(&sep->pdev->dev,
840 "sep got back wrong opcode\n");
841 dev_warn(&sep->pdev->dev,
842 "got back %x; expected %x\n",
844 return SEP_WRONG_OPCODE;
847 if (in_ary[1] != SEP_OK) {
848 dev_warn(&sep->pdev->dev,
849 "sep execution error\n");
850 dev_warn(&sep->pdev->dev,
851 "got back %x; expected %x\n",
861 * @ta_ctx: pointer to struct this_task_ctx
862 * @msg_offset: point to current place in SEP msg; is updated
863 * @dst: pointer to place to put the context
864 * @len: size of the context structure (differs for crypro/hash)
865 * This function reads the context from the msg area
866 * There is a special way the vendor needs to have the maximum
867 * length calculated so that the msg_offset is updated properly;
868 * it skips over some words in the msg area depending on the size
871 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
874 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
876 sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
880 * sep_write_context -
881 * @ta_ctx: pointer to struct this_task_ctx
882 * @msg_offset: point to current place in SEP msg; is updated
883 * @src: pointer to the current context
884 * @len: size of the context structure (differs for crypro/hash)
885 * This function writes the context to the msg area
886 * There is a special way the vendor needs to have the maximum
887 * length calculated so that the msg_offset is updated properly;
888 * it skips over some words in the msg area depending on the size
891 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
894 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
896 sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
901 * @ta_ctx: pointer to struct this_task_ctx
902 * Clear out crypto related values in sep device structure
903 * to enable device to be used by anyone; either kernel
904 * crypto or userspace app via middleware
906 static void sep_clear_out(struct this_task_ctx *ta_ctx)
908 if (ta_ctx->src_sg_hold) {
909 sep_free_sg_buf(ta_ctx->src_sg_hold);
910 ta_ctx->src_sg_hold = NULL;
913 if (ta_ctx->dst_sg_hold) {
914 sep_free_sg_buf(ta_ctx->dst_sg_hold);
915 ta_ctx->dst_sg_hold = NULL;
918 ta_ctx->src_sg = NULL;
919 ta_ctx->dst_sg = NULL;
921 sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
923 if (ta_ctx->i_own_sep) {
925 * The following unlocks the sep and makes it available
926 * to any other application
927 * First, null out crypto entries in sep before releasing it
929 ta_ctx->sep_used->current_hash_req = NULL;
930 ta_ctx->sep_used->current_cypher_req = NULL;
931 ta_ctx->sep_used->current_request = 0;
932 ta_ctx->sep_used->current_hash_stage = 0;
933 ta_ctx->sep_used->ta_ctx = NULL;
934 ta_ctx->sep_used->in_kernel = 0;
936 ta_ctx->call_status.status = 0;
938 /* Remove anything confidential */
939 memset(ta_ctx->sep_used->shared_addr, 0,
940 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
942 sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
944 #ifdef SEP_ENABLE_RUNTIME_PM
945 ta_ctx->sep_used->in_use = 0;
946 pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
947 pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
950 clear_bit(SEP_WORKING_LOCK_BIT,
951 &ta_ctx->sep_used->in_use_flags);
952 ta_ctx->sep_used->pid_doing_transaction = 0;
954 dev_dbg(&ta_ctx->sep_used->pdev->dev,
955 "[PID%d] waking up next transaction\n",
958 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
959 &ta_ctx->sep_used->in_use_flags);
960 wake_up(&ta_ctx->sep_used->event_transactions);
962 ta_ctx->i_own_sep = 0;
967 * Release crypto infrastructure from EINPROGRESS and
968 * clear sep_dev so that SEP is available to anyone
970 static void sep_crypto_release(struct sep_system_ctx *sctx,
971 struct this_task_ctx *ta_ctx, u32 error)
973 struct ahash_request *hash_req = ta_ctx->current_hash_req;
974 struct ablkcipher_request *cypher_req =
975 ta_ctx->current_cypher_req;
976 struct sep_device *sep = ta_ctx->sep_used;
978 sep_clear_out(ta_ctx);
981 * This may not yet exist depending when we
982 * chose to bail out. If it does exist, set
985 if (ta_ctx->are_we_done_yet != NULL)
986 *ta_ctx->are_we_done_yet = 1;
988 if (cypher_req != NULL) {
989 if ((sctx->key_sent == 1) ||
990 ((error != 0) && (error != -EINPROGRESS))) {
991 if (cypher_req->base.complete == NULL) {
992 dev_dbg(&sep->pdev->dev,
993 "release is null for cypher!");
995 cypher_req->base.complete(
996 &cypher_req->base, error);
1001 if (hash_req != NULL) {
1002 if (hash_req->base.complete == NULL) {
1003 dev_dbg(&sep->pdev->dev,
1004 "release is null for hash!");
1006 hash_req->base.complete(
1007 &hash_req->base, error);
1013 * This is where we grab the sep itself and tell it to do something.
1014 * It will sleep if the sep is currently busy
1015 * and it will return 0 if sep is now ours; error value if there
1018 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1020 struct sep_device *sep = ta_ctx->sep_used;
1022 struct sep_msgarea_hdr *my_msg_header;
1024 my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1026 /* add to status queue */
1027 ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1028 ta_ctx->nbytes, current->pid,
1029 current->comm, sizeof(current->comm));
1031 if (!ta_ctx->queue_elem) {
1032 dev_dbg(&sep->pdev->dev,
1033 "[PID%d] updating queue status error\n", current->pid);
1037 /* get the device; this can sleep */
1038 result = sep_wait_transaction(sep);
1042 if (sep_dev->power_save_setup == 1)
1043 pm_runtime_get_sync(&sep_dev->pdev->dev);
1045 /* Copy in the message */
1046 memcpy(sep->shared_addr, ta_ctx->msg,
1047 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1049 /* Copy in the dcb information if there is any */
1050 if (ta_ctx->dcb_region) {
1051 result = sep_activate_dcb_dmatables_context(sep,
1052 &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1058 /* Mark the device so we know how to finish the job in the tasklet */
1059 if (ta_ctx->current_hash_req)
1060 sep->current_hash_req = ta_ctx->current_hash_req;
1062 sep->current_cypher_req = ta_ctx->current_cypher_req;
1064 sep->current_request = ta_ctx->current_request;
1065 sep->current_hash_stage = ta_ctx->current_hash_stage;
1066 sep->ta_ctx = ta_ctx;
1068 ta_ctx->i_own_sep = 1;
1070 /* need to set bit first to avoid race condition with interrupt */
1071 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1073 result = sep_send_command_handler(sep);
1075 dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1079 dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1082 dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1084 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1085 &ta_ctx->call_status.status);
1092 * This function sets things up for a crypto data block process
1093 * This does all preparation, but does not try to grab the
1095 * @req: pointer to struct ablkcipher_request
1096 * returns: 0 if all went well, non zero if error
1098 static int sep_crypto_block_data(struct ablkcipher_request *req)
1107 static char small_buf[100];
1108 ssize_t copy_result;
1111 struct scatterlist *new_sg;
1112 struct this_task_ctx *ta_ctx;
1113 struct crypto_ablkcipher *tfm;
1114 struct sep_system_ctx *sctx;
1116 struct sep_des_internal_context *des_internal;
1117 struct sep_aes_internal_context *aes_internal;
1119 ta_ctx = ablkcipher_request_ctx(req);
1120 tfm = crypto_ablkcipher_reqtfm(req);
1121 sctx = crypto_ablkcipher_ctx(tfm);
1123 /* start the walk on scatterlists */
1124 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1125 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1128 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1130 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1135 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1136 "crypto block: src is %lx dst is %lx\n",
1137 (unsigned long)req->src, (unsigned long)req->dst);
1139 /* Make sure all pages are even block */
1140 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1141 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1143 if (int_error < 0) {
1144 dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page error\n");
1146 } else if (int_error == 1) {
1147 ta_ctx->src_sg = new_sg;
1148 ta_ctx->src_sg_hold = new_sg;
1150 ta_ctx->src_sg = req->src;
1151 ta_ctx->src_sg_hold = NULL;
1154 int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1155 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1157 if (int_error < 0) {
1158 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1161 } else if (int_error == 1) {
1162 ta_ctx->dst_sg = new_sg;
1163 ta_ctx->dst_sg_hold = new_sg;
1165 ta_ctx->dst_sg = req->dst;
1166 ta_ctx->dst_sg_hold = NULL;
1169 /* set nbytes for queue status */
1170 ta_ctx->nbytes = req->nbytes;
1172 /* Key already done; this is for data */
1173 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1175 /* check for valid data and proper spacing */
1176 src_ptr = sg_virt(ta_ctx->src_sg);
1177 dst_ptr = sg_virt(ta_ctx->dst_sg);
1179 if (!src_ptr || !dst_ptr ||
1180 (ta_ctx->current_cypher_req->nbytes %
1181 crypto_ablkcipher_blocksize(tfm))) {
1183 dev_warn(&ta_ctx->sep_used->pdev->dev,
1184 "cipher block size odd\n");
1185 dev_warn(&ta_ctx->sep_used->pdev->dev,
1186 "cipher block size is %x\n",
1187 crypto_ablkcipher_blocksize(tfm));
1188 dev_warn(&ta_ctx->sep_used->pdev->dev,
1189 "cipher data size is %x\n",
1190 ta_ctx->current_cypher_req->nbytes);
1194 if (partial_overlap(src_ptr, dst_ptr,
1195 ta_ctx->current_cypher_req->nbytes)) {
1196 dev_warn(&ta_ctx->sep_used->pdev->dev,
1197 "block partial overlap\n");
1201 /* Put together the message */
1202 sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1204 /* If des, and size is 1 block, put directly in msg */
1205 if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1206 (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1208 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1209 "writing out one block des\n");
1211 copy_result = sg_copy_to_buffer(
1212 ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1213 small_buf, crypto_ablkcipher_blocksize(tfm));
1215 if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1216 dev_warn(&ta_ctx->sep_used->pdev->dev,
1217 "des block copy failed\n");
1221 /* Put data into message */
1222 sep_write_msg(ta_ctx, small_buf,
1223 crypto_ablkcipher_blocksize(tfm),
1224 crypto_ablkcipher_blocksize(tfm) * 2,
1227 /* Put size into message */
1228 sep_write_msg(ta_ctx, &req->nbytes,
1229 sizeof(u32), sizeof(u32), &msg_offset, 0);
1231 /* Otherwise, fill out dma tables */
1232 ta_ctx->dcb_input_data.app_in_address = src_ptr;
1233 ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1234 ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1235 ta_ctx->dcb_input_data.block_size =
1236 crypto_ablkcipher_blocksize(tfm);
1237 ta_ctx->dcb_input_data.tail_block_size = 0;
1238 ta_ctx->dcb_input_data.is_applet = 0;
1239 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1240 ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1242 result = sep_create_dcb_dmatables_context_kernel(
1244 &ta_ctx->dcb_region,
1245 &ta_ctx->dmatables_region,
1247 &ta_ctx->dcb_input_data,
1250 dev_warn(&ta_ctx->sep_used->pdev->dev,
1251 "crypto dma table create failed\n");
1255 /* Portion of msg is nulled (no data) */
1261 sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1262 sizeof(u32) * 5, &msg_offset, 0);
1266 * Before we write the message, we need to overwrite the
1267 * vendor's IV with the one from our own ablkcipher walk
1268 * iv because this is needed for dm-crypt
1270 sep_dump_ivs(req, "sending data block to sep\n");
1271 if ((ta_ctx->current_request == DES_CBC) &&
1272 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1274 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1275 "overwrite vendor iv on DES\n");
1276 des_internal = (struct sep_des_internal_context *)
1277 sctx->des_private_ctx.ctx_buf;
1278 memcpy((void *)des_internal->iv_context,
1279 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1280 } else if ((ta_ctx->current_request == AES_CBC) &&
1281 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1283 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1284 "overwrite vendor iv on AES\n");
1285 aes_internal = (struct sep_aes_internal_context *)
1286 sctx->aes_private_ctx.cbuff;
1287 memcpy((void *)aes_internal->aes_ctx_iv,
1288 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1291 /* Write context into message */
1292 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1293 sep_write_context(ta_ctx, &msg_offset,
1294 &sctx->des_private_ctx,
1295 sizeof(struct sep_des_private_context));
1297 sep_write_context(ta_ctx, &msg_offset,
1298 &sctx->aes_private_ctx,
1299 sizeof(struct sep_aes_private_context));
1302 /* conclude message */
1303 sep_end_msg(ta_ctx, msg_offset);
1305 /* Parent (caller) is now ready to tell the sep to do ahead */
1311 * This function sets things up for a crypto key submit process
1312 * This does all preparation, but does not try to grab the
1314 * @req: pointer to struct ablkcipher_request
1315 * returns: 0 if all went well, non zero if error
1317 static int sep_crypto_send_key(struct ablkcipher_request *req)
1325 struct this_task_ctx *ta_ctx;
1326 struct crypto_ablkcipher *tfm;
1327 struct sep_system_ctx *sctx;
1329 ta_ctx = ablkcipher_request_ctx(req);
1330 tfm = crypto_ablkcipher_reqtfm(req);
1331 sctx = crypto_ablkcipher_ctx(tfm);
1333 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1335 /* start the walk on scatterlists */
1336 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1337 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1338 "sep crypto block data size of %x\n", req->nbytes);
1340 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1342 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1348 if ((ta_ctx->current_request == DES_CBC) &&
1349 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1350 if (!ta_ctx->walk.iv) {
1351 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1355 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1358 if ((ta_ctx->current_request == AES_CBC) &&
1359 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1360 if (!ta_ctx->walk.iv) {
1361 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1365 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1368 /* put together message to SEP */
1369 /* Start with op code */
1370 sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1372 /* now deal with IV */
1373 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1374 if (ta_ctx->des_opmode == SEP_DES_CBC) {
1375 sep_write_msg(ta_ctx, ta_ctx->iv,
1376 SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1380 msg_offset += 4 * sizeof(u32);
1383 max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1384 sizeof(u32)) * sizeof(u32);
1385 if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1386 sep_write_msg(ta_ctx, ta_ctx->iv,
1387 SEP_AES_IV_SIZE_BYTES, max_length,
1391 msg_offset += max_length;
1396 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1397 sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1398 sizeof(u32) * 8, sizeof(u32) * 8,
1401 msg[0] = (u32)sctx->des_nbr_keys;
1402 msg[1] = (u32)ta_ctx->des_encmode;
1403 msg[2] = (u32)ta_ctx->des_opmode;
1405 sep_write_msg(ta_ctx, (void *)msg,
1406 sizeof(u32) * 3, sizeof(u32) * 3,
1409 sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1411 SEP_AES_MAX_KEY_SIZE_BYTES,
1414 msg[0] = (u32)sctx->aes_key_size;
1415 msg[1] = (u32)ta_ctx->aes_encmode;
1416 msg[2] = (u32)ta_ctx->aes_opmode;
1417 msg[3] = (u32)0; /* Secret key is not used */
1418 sep_write_msg(ta_ctx, (void *)msg,
1419 sizeof(u32) * 4, sizeof(u32) * 4,
1423 /* conclude message */
1424 sep_end_msg(ta_ctx, msg_offset);
1426 /* Parent (caller) is now ready to tell the sep to do ahead */
1431 /* This needs to be run as a work queue as it can be put asleep */
1432 static void sep_crypto_block(void *data)
1434 unsigned long end_time;
1438 struct ablkcipher_request *req;
1439 struct this_task_ctx *ta_ctx;
1440 struct crypto_ablkcipher *tfm;
1441 struct sep_system_ctx *sctx;
1442 int are_we_done_yet;
1444 req = (struct ablkcipher_request *)data;
1445 ta_ctx = ablkcipher_request_ctx(req);
1446 tfm = crypto_ablkcipher_reqtfm(req);
1447 sctx = crypto_ablkcipher_ctx(tfm);
1449 ta_ctx->are_we_done_yet = &are_we_done_yet;
1451 pr_debug("sep_crypto_block\n");
1452 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1454 pr_debug("key_sent is %d\n", sctx->key_sent);
1456 /* do we need to send the key */
1457 if (sctx->key_sent == 0) {
1458 are_we_done_yet = 0;
1459 result = sep_crypto_send_key(req); /* prep to send key */
1461 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1462 "could not prep key %x\n", result);
1463 sep_crypto_release(sctx, ta_ctx, result);
1467 result = sep_crypto_take_sep(ta_ctx);
1469 dev_warn(&ta_ctx->sep_used->pdev->dev,
1470 "sep_crypto_take_sep for key send failed\n");
1471 sep_crypto_release(sctx, ta_ctx, result);
1475 /* now we sit and wait up to a fixed time for completion */
1476 end_time = jiffies + (WAIT_TIME * HZ);
1477 while ((time_before(jiffies, end_time)) &&
1478 (are_we_done_yet == 0))
1481 /* Done waiting; still not done yet? */
1482 if (are_we_done_yet == 0) {
1483 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1484 "Send key job never got done\n");
1485 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1489 /* Set the key sent variable so this can be skipped later */
1493 /* Key sent (or maybe not if we did not have to), now send block */
1494 are_we_done_yet = 0;
1496 result = sep_crypto_block_data(req);
1499 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1500 "could prep not send block %x\n", result);
1501 sep_crypto_release(sctx, ta_ctx, result);
1505 result = sep_crypto_take_sep(ta_ctx);
1507 dev_warn(&ta_ctx->sep_used->pdev->dev,
1508 "sep_crypto_take_sep for block send failed\n");
1509 sep_crypto_release(sctx, ta_ctx, result);
1513 /* now we sit and wait up to a fixed time for completion */
1514 end_time = jiffies + (WAIT_TIME * HZ);
1515 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1518 /* Done waiting; still not done yet? */
1519 if (are_we_done_yet == 0) {
1520 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1521 "Send block job never got done\n");
1522 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1526 /* That's it; entire thing done, get out of queue */
1528 pr_debug("crypto_block leaving\n");
1529 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1533 * Post operation (after interrupt) for crypto block
1535 static u32 crypto_post_op(struct sep_device *sep)
1541 ssize_t copy_result;
1542 static char small_buf[100];
1544 struct ablkcipher_request *req;
1545 struct this_task_ctx *ta_ctx;
1546 struct sep_system_ctx *sctx;
1547 struct crypto_ablkcipher *tfm;
1549 struct sep_des_internal_context *des_internal;
1550 struct sep_aes_internal_context *aes_internal;
1552 if (!sep->current_cypher_req)
1555 /* hold req since we need to submit work after clearing sep */
1556 req = sep->current_cypher_req;
1558 ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1559 tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1560 sctx = crypto_ablkcipher_ctx(tfm);
1562 pr_debug("crypto_post op\n");
1563 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1564 sctx->key_sent, tfm, sctx, ta_ctx);
1566 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1567 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1569 /* first bring msg from shared area to local area */
1570 memcpy(ta_ctx->msg, sep->shared_addr,
1571 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1573 /* Is this the result of performing init (key to SEP */
1574 if (sctx->key_sent == 0) {
1576 /* Did SEP do it okay */
1577 u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1580 dev_warn(&ta_ctx->sep_used->pdev->dev,
1581 "aes init error %x\n", u32_error);
1582 sep_crypto_release(sctx, ta_ctx, u32_error);
1587 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1588 sep_read_context(ta_ctx, &msg_offset,
1589 &sctx->des_private_ctx,
1590 sizeof(struct sep_des_private_context));
1592 sep_read_context(ta_ctx, &msg_offset,
1593 &sctx->aes_private_ctx,
1594 sizeof(struct sep_aes_private_context));
1597 sep_dump_ivs(req, "after sending key to sep\n");
1599 /* key sent went okay; release sep, and set are_we_done_yet */
1601 sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1606 * This is the result of a block request
1608 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1609 "crypto_post_op block response\n");
1611 u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1615 dev_warn(&ta_ctx->sep_used->pdev->dev,
1616 "sep block error %x\n", u32_error);
1617 sep_crypto_release(sctx, ta_ctx, u32_error);
1621 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1623 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1624 "post op for DES\n");
1626 /* special case for 1 block des */
1627 if (sep->current_cypher_req->nbytes ==
1628 crypto_ablkcipher_blocksize(tfm)) {
1630 sep_read_msg(ta_ctx, small_buf,
1631 crypto_ablkcipher_blocksize(tfm),
1632 crypto_ablkcipher_blocksize(tfm) * 2,
1635 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1636 "reading in block des\n");
1638 copy_result = sg_copy_from_buffer(
1640 sep_sg_nents(ta_ctx->dst_sg),
1642 crypto_ablkcipher_blocksize(tfm));
1645 crypto_ablkcipher_blocksize(tfm)) {
1647 dev_warn(&ta_ctx->sep_used->pdev->dev,
1648 "des block copy failed\n");
1649 sep_crypto_release(sctx, ta_ctx,
1656 sep_read_context(ta_ctx, &msg_offset,
1657 &sctx->des_private_ctx,
1658 sizeof(struct sep_des_private_context));
1661 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1662 "post op for AES\n");
1664 /* Skip the MAC Output */
1665 msg_offset += (sizeof(u32) * 4);
1668 sep_read_context(ta_ctx, &msg_offset,
1669 &sctx->aes_private_ctx,
1670 sizeof(struct sep_aes_private_context));
1673 /* Copy to correct sg if this block had oddball pages */
1674 if (ta_ctx->dst_sg_hold)
1675 sep_copy_sg(ta_ctx->sep_used,
1677 ta_ctx->current_cypher_req->dst,
1678 ta_ctx->current_cypher_req->nbytes);
1681 * Copy the iv's back to the walk.iv
1682 * This is required for dm_crypt
1684 sep_dump_ivs(req, "got data block from sep\n");
1685 if ((ta_ctx->current_request == DES_CBC) &&
1686 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1688 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1689 "returning result iv to walk on DES\n");
1690 des_internal = (struct sep_des_internal_context *)
1691 sctx->des_private_ctx.ctx_buf;
1692 memcpy(ta_ctx->walk.iv,
1693 (void *)des_internal->iv_context,
1694 crypto_ablkcipher_ivsize(tfm));
1695 } else if ((ta_ctx->current_request == AES_CBC) &&
1696 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1698 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1699 "returning result iv to walk on AES\n");
1700 aes_internal = (struct sep_aes_internal_context *)
1701 sctx->aes_private_ctx.cbuff;
1702 memcpy(ta_ctx->walk.iv,
1703 (void *)aes_internal->aes_ctx_iv,
1704 crypto_ablkcipher_ivsize(tfm));
1707 /* finished, release everything */
1708 sep_crypto_release(sctx, ta_ctx, 0);
1710 pr_debug("crypto_post_op done\n");
1711 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1712 sctx->key_sent, tfm, sctx, ta_ctx);
1717 static u32 hash_init_post_op(struct sep_device *sep)
1721 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1722 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1723 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1724 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1725 "hash init post op\n");
1727 /* first bring msg from shared area to local area */
1728 memcpy(ta_ctx->msg, sep->shared_addr,
1729 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1731 u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1735 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1737 sep_crypto_release(sctx, ta_ctx, u32_error);
1742 sep_read_context(ta_ctx, &msg_offset,
1743 &sctx->hash_private_ctx,
1744 sizeof(struct sep_hash_private_context));
1746 /* Signal to crypto infrastructure and clear out */
1747 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1748 sep_crypto_release(sctx, ta_ctx, 0);
1752 static u32 hash_update_post_op(struct sep_device *sep)
1756 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1757 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1758 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1759 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1760 "hash update post op\n");
1762 /* first bring msg from shared area to local area */
1763 memcpy(ta_ctx->msg, sep->shared_addr,
1764 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1766 u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1770 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1772 sep_crypto_release(sctx, ta_ctx, u32_error);
1777 sep_read_context(ta_ctx, &msg_offset,
1778 &sctx->hash_private_ctx,
1779 sizeof(struct sep_hash_private_context));
1782 * Following is only for finup; if we just completed the
1783 * data portion of finup, we now need to kick off the
1784 * finish portion of finup.
1787 if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1789 /* first reset stage to HASH_FINUP_FINISH */
1790 ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1792 /* now enqueue the finish operation */
1793 spin_lock_irq(&queue_lock);
1794 u32_error = crypto_enqueue_request(&sep_queue,
1795 &ta_ctx->sep_used->current_hash_req->base);
1796 spin_unlock_irq(&queue_lock);
1798 if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1799 dev_warn(&ta_ctx->sep_used->pdev->dev,
1800 "spe cypher post op cant queue\n");
1801 sep_crypto_release(sctx, ta_ctx, u32_error);
1805 /* schedule the data send */
1806 u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1807 sep_dequeuer, (void *)&sep_queue);
1810 dev_warn(&ta_ctx->sep_used->pdev->dev,
1811 "cant submit work sep_crypto_block\n");
1812 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1817 /* Signal to crypto infrastructure and clear out */
1818 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1819 sep_crypto_release(sctx, ta_ctx, 0);
1823 static u32 hash_final_post_op(struct sep_device *sep)
1828 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1829 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1830 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1831 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1832 "hash final post op\n");
1834 /* first bring msg from shared area to local area */
1835 memcpy(ta_ctx->msg, sep->shared_addr,
1836 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1838 u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1842 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1844 sep_crypto_release(sctx, ta_ctx, u32_error);
1848 /* Grab the result */
1849 if (ta_ctx->current_hash_req->result == NULL) {
1850 /* Oops, null buffer; error out here */
1851 dev_warn(&ta_ctx->sep_used->pdev->dev,
1852 "hash finish null buffer\n");
1853 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1857 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1858 sizeof(u32)) * sizeof(u32);
1860 sep_read_msg(ta_ctx,
1861 ta_ctx->current_hash_req->result,
1862 crypto_ahash_digestsize(tfm), max_length,
1865 /* Signal to crypto infrastructure and clear out */
1866 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1867 sep_crypto_release(sctx, ta_ctx, 0);
1871 static u32 hash_digest_post_op(struct sep_device *sep)
1876 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1877 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1878 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1879 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1880 "hash digest post op\n");
1882 /* first bring msg from shared area to local area */
1883 memcpy(ta_ctx->msg, sep->shared_addr,
1884 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1886 u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1890 dev_warn(&ta_ctx->sep_used->pdev->dev,
1891 "hash digest finish error %x\n", u32_error);
1893 sep_crypto_release(sctx, ta_ctx, u32_error);
1897 /* Grab the result */
1898 if (ta_ctx->current_hash_req->result == NULL) {
1899 /* Oops, null buffer; error out here */
1900 dev_warn(&ta_ctx->sep_used->pdev->dev,
1901 "hash digest finish null buffer\n");
1902 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1906 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1907 sizeof(u32)) * sizeof(u32);
1909 sep_read_msg(ta_ctx,
1910 ta_ctx->current_hash_req->result,
1911 crypto_ahash_digestsize(tfm), max_length,
1914 /* Signal to crypto infrastructure and clear out */
1915 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1916 "hash digest finish post op done\n");
1918 sep_crypto_release(sctx, ta_ctx, 0);
1923 * The sep_finish function is the function that is scheduled (via tasklet)
1924 * by the interrupt service routine when the SEP sends and interrupt
1925 * This is only called by the interrupt handler as a tasklet.
1927 static void sep_finish(unsigned long data)
1929 struct sep_device *sep_dev;
1935 pr_debug("sep_finish called with null data\n");
1939 sep_dev = (struct sep_device *)data;
1940 if (sep_dev == NULL) {
1941 pr_debug("sep_finish; sep_dev is NULL\n");
1945 if (sep_dev->in_kernel == (u32)0) {
1946 dev_warn(&sep_dev->pdev->dev,
1947 "sep_finish; not in kernel operation\n");
1951 /* Did we really do a sep command prior to this? */
1952 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1953 &sep_dev->ta_ctx->call_status.status)) {
1955 dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
1960 if (sep_dev->send_ct != sep_dev->reply_ct) {
1961 dev_warn(&sep_dev->pdev->dev,
1962 "[PID%d] poll; no message came back\n",
1967 /* Check for error (In case time ran out) */
1968 if ((res != 0x0) && (res != 0x8)) {
1969 dev_warn(&sep_dev->pdev->dev,
1970 "[PID%d] poll; poll error GPR3 is %x\n",
1975 /* What kind of interrupt from sep was this? */
1976 res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
1978 dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
1981 /* Print request? */
1982 if ((res >> 30) & 0x1) {
1983 dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
1985 dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
1987 (char *)(sep_dev->shared_addr +
1988 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
1992 /* Request for daemon (not currently in POR)? */
1994 dev_dbg(&sep_dev->pdev->dev,
1995 "[PID%d] sep request; ignoring\n",
2000 /* If we got here, then we have a replay to a sep command */
2002 dev_dbg(&sep_dev->pdev->dev,
2003 "[PID%d] sep reply to command; processing request: %x\n",
2004 current->pid, sep_dev->current_request);
2006 switch (sep_dev->current_request) {
2011 res = crypto_post_op(sep_dev);
2017 switch (sep_dev->current_hash_stage) {
2019 res = hash_init_post_op(sep_dev);
2022 case HASH_FINUP_DATA:
2023 res = hash_update_post_op(sep_dev);
2025 case HASH_FINUP_FINISH:
2027 res = hash_final_post_op(sep_dev);
2030 res = hash_digest_post_op(sep_dev);
2033 pr_debug("sep - invalid stage for hash finish\n");
2037 pr_debug("sep - invalid request for finish\n");
2041 pr_debug("sep - finish returned error %x\n", res);
2044 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2046 const char *alg_name = crypto_tfm_alg_name(tfm);
2048 pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2050 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2051 sizeof(struct this_task_ctx));
2055 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2057 pr_debug("sep_hash_cra_exit\n");
2060 static void sep_hash_init(void *data)
2064 struct ahash_request *req;
2065 struct crypto_ahash *tfm;
2066 struct this_task_ctx *ta_ctx;
2067 struct sep_system_ctx *sctx;
2068 unsigned long end_time;
2069 int are_we_done_yet;
2071 req = (struct ahash_request *)data;
2072 tfm = crypto_ahash_reqtfm(req);
2073 sctx = crypto_ahash_ctx(tfm);
2074 ta_ctx = ahash_request_ctx(req);
2075 ta_ctx->sep_used = sep_dev;
2077 ta_ctx->are_we_done_yet = &are_we_done_yet;
2079 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2081 ta_ctx->current_hash_stage = HASH_INIT;
2082 /* opcode and mode */
2083 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2084 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2085 sizeof(u32), sizeof(u32), &msg_offset, 0);
2086 sep_end_msg(ta_ctx, msg_offset);
2088 are_we_done_yet = 0;
2089 result = sep_crypto_take_sep(ta_ctx);
2091 dev_warn(&ta_ctx->sep_used->pdev->dev,
2092 "sep_hash_init take sep failed\n");
2093 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2096 /* now we sit and wait up to a fixed time for completion */
2097 end_time = jiffies + (WAIT_TIME * HZ);
2098 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2101 /* Done waiting; still not done yet? */
2102 if (are_we_done_yet == 0) {
2103 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2104 "hash init never got done\n");
2105 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2111 static void sep_hash_update(void *data)
2116 struct sep_hash_internal_context *int_ctx;
2120 int are_we_done_yet;
2123 static char small_buf[100];
2125 struct scatterlist *new_sg;
2126 ssize_t copy_result;
2127 struct ahash_request *req;
2128 struct crypto_ahash *tfm;
2129 struct this_task_ctx *ta_ctx;
2130 struct sep_system_ctx *sctx;
2131 unsigned long end_time;
2133 req = (struct ahash_request *)data;
2134 tfm = crypto_ahash_reqtfm(req);
2135 sctx = crypto_ahash_ctx(tfm);
2136 ta_ctx = ahash_request_ctx(req);
2137 ta_ctx->sep_used = sep_dev;
2139 ta_ctx->are_we_done_yet = &are_we_done_yet;
2141 /* length for queue status */
2142 ta_ctx->nbytes = req->nbytes;
2144 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2145 "sep_hash_update\n");
2146 ta_ctx->current_hash_stage = HASH_UPDATE;
2149 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2150 tail_len = req->nbytes % block_size;
2151 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2152 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2153 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2155 /* Compute header/tail sizes */
2156 int_ctx = (struct sep_hash_internal_context *)&sctx->
2157 hash_private_ctx.internal_context;
2158 head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2159 tail_len = (req->nbytes - head_len) % block_size;
2161 /* Make sure all pages are an even block */
2162 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2164 block_size, &new_sg, 1);
2166 if (int_error < 0) {
2167 dev_warn(&ta_ctx->sep_used->pdev->dev,
2168 "oddball pages error in crash update\n");
2169 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2171 } else if (int_error == 1) {
2172 ta_ctx->src_sg = new_sg;
2173 ta_ctx->src_sg_hold = new_sg;
2175 ta_ctx->src_sg = req->src;
2176 ta_ctx->src_sg_hold = NULL;
2179 src_ptr = sg_virt(ta_ctx->src_sg);
2181 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2186 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2187 ta_ctx->dcb_input_data.data_in_size =
2188 req->nbytes - (head_len + tail_len);
2189 ta_ctx->dcb_input_data.app_out_address = NULL;
2190 ta_ctx->dcb_input_data.block_size = block_size;
2191 ta_ctx->dcb_input_data.tail_block_size = 0;
2192 ta_ctx->dcb_input_data.is_applet = 0;
2193 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2194 ta_ctx->dcb_input_data.dst_sg = NULL;
2196 int_error = sep_create_dcb_dmatables_context_kernel(
2198 &ta_ctx->dcb_region,
2199 &ta_ctx->dmatables_region,
2201 &ta_ctx->dcb_input_data,
2204 dev_warn(&ta_ctx->sep_used->pdev->dev,
2205 "hash update dma table create failed\n");
2206 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2210 /* Construct message to SEP */
2211 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2217 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2220 /* Handle remainders */
2223 sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2224 sizeof(u32), &msg_offset, 0);
2227 copy_result = sg_copy_to_buffer(
2229 sep_sg_nents(ta_ctx->src_sg),
2230 small_buf, head_len);
2232 if (copy_result != head_len) {
2233 dev_warn(&ta_ctx->sep_used->pdev->dev,
2234 "sg head copy failure in hash block\n");
2235 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2239 sep_write_msg(ta_ctx, small_buf, head_len,
2240 sizeof(u32) * 32, &msg_offset, 1);
2242 msg_offset += sizeof(u32) * 32;
2246 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2247 sizeof(u32), &msg_offset, 0);
2250 copy_result = sep_copy_offset_sg(
2253 req->nbytes - tail_len,
2254 small_buf, tail_len);
2256 if (copy_result != tail_len) {
2257 dev_warn(&ta_ctx->sep_used->pdev->dev,
2258 "sg tail copy failure in hash block\n");
2259 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2263 sep_write_msg(ta_ctx, small_buf, tail_len,
2264 sizeof(u32) * 32, &msg_offset, 1);
2266 msg_offset += sizeof(u32) * 32;
2270 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2271 sizeof(struct sep_hash_private_context));
2273 sep_end_msg(ta_ctx, msg_offset);
2274 are_we_done_yet = 0;
2275 int_error = sep_crypto_take_sep(ta_ctx);
2277 dev_warn(&ta_ctx->sep_used->pdev->dev,
2278 "sep_hash_update take sep failed\n");
2279 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2282 /* now we sit and wait up to a fixed time for completion */
2283 end_time = jiffies + (WAIT_TIME * HZ);
2284 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2287 /* Done waiting; still not done yet? */
2288 if (are_we_done_yet == 0) {
2289 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2290 "hash update never got done\n");
2291 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2297 static void sep_hash_final(void *data)
2300 struct ahash_request *req;
2301 struct crypto_ahash *tfm;
2302 struct this_task_ctx *ta_ctx;
2303 struct sep_system_ctx *sctx;
2305 unsigned long end_time;
2306 int are_we_done_yet;
2308 req = (struct ahash_request *)data;
2309 tfm = crypto_ahash_reqtfm(req);
2310 sctx = crypto_ahash_ctx(tfm);
2311 ta_ctx = ahash_request_ctx(req);
2312 ta_ctx->sep_used = sep_dev;
2314 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2315 "sep_hash_final\n");
2316 ta_ctx->current_hash_stage = HASH_FINISH;
2318 ta_ctx->are_we_done_yet = &are_we_done_yet;
2320 /* opcode and mode */
2321 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2324 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2325 sizeof(struct sep_hash_private_context));
2327 sep_end_msg(ta_ctx, msg_offset);
2328 are_we_done_yet = 0;
2329 result = sep_crypto_take_sep(ta_ctx);
2331 dev_warn(&ta_ctx->sep_used->pdev->dev,
2332 "sep_hash_final take sep failed\n");
2333 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2336 /* now we sit and wait up to a fixed time for completion */
2337 end_time = jiffies + (WAIT_TIME * HZ);
2338 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2341 /* Done waiting; still not done yet? */
2342 if (are_we_done_yet == 0) {
2343 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2344 "hash final job never got done\n");
2345 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2351 static void sep_hash_digest(void *data)
2359 int are_we_done_yet;
2361 static char small_buf[100];
2362 struct scatterlist *new_sg;
2365 struct ahash_request *req;
2366 struct crypto_ahash *tfm;
2367 struct this_task_ctx *ta_ctx;
2368 struct sep_system_ctx *sctx;
2369 unsigned long end_time;
2371 req = (struct ahash_request *)data;
2372 tfm = crypto_ahash_reqtfm(req);
2373 sctx = crypto_ahash_ctx(tfm);
2374 ta_ctx = ahash_request_ctx(req);
2375 ta_ctx->sep_used = sep_dev;
2377 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2378 "sep_hash_digest\n");
2379 ta_ctx->current_hash_stage = HASH_DIGEST;
2381 ta_ctx->are_we_done_yet = &are_we_done_yet;
2383 /* length for queue status */
2384 ta_ctx->nbytes = req->nbytes;
2386 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2387 tail_len = req->nbytes % block_size;
2388 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2389 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2390 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2392 /* Make sure all pages are an even block */
2393 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2395 block_size, &new_sg, 1);
2397 if (int_error < 0) {
2398 dev_warn(&ta_ctx->sep_used->pdev->dev,
2399 "oddball pages error in crash update\n");
2400 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2402 } else if (int_error == 1) {
2403 ta_ctx->src_sg = new_sg;
2404 ta_ctx->src_sg_hold = new_sg;
2406 ta_ctx->src_sg = req->src;
2407 ta_ctx->src_sg_hold = NULL;
2410 src_ptr = sg_virt(ta_ctx->src_sg);
2412 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2417 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2418 ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2419 ta_ctx->dcb_input_data.app_out_address = NULL;
2420 ta_ctx->dcb_input_data.block_size = block_size;
2421 ta_ctx->dcb_input_data.tail_block_size = 0;
2422 ta_ctx->dcb_input_data.is_applet = 0;
2423 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2424 ta_ctx->dcb_input_data.dst_sg = NULL;
2426 int_error = sep_create_dcb_dmatables_context_kernel(
2428 &ta_ctx->dcb_region,
2429 &ta_ctx->dmatables_region,
2431 &ta_ctx->dcb_input_data,
2434 dev_warn(&ta_ctx->sep_used->pdev->dev,
2435 "hash update dma table create failed\n");
2436 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2440 /* Construct message to SEP */
2441 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2442 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2443 sizeof(u32), sizeof(u32), &msg_offset, 0);
2449 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2453 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2454 sizeof(u32), &msg_offset, 0);
2457 copy_result = sep_copy_offset_sg(
2460 req->nbytes - tail_len,
2461 small_buf, tail_len);
2463 if (copy_result != tail_len) {
2464 dev_warn(&ta_ctx->sep_used->pdev->dev,
2465 "sg tail copy failure in hash block\n");
2466 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2470 sep_write_msg(ta_ctx, small_buf, tail_len,
2471 sizeof(u32) * 32, &msg_offset, 1);
2473 msg_offset += sizeof(u32) * 32;
2476 sep_end_msg(ta_ctx, msg_offset);
2478 are_we_done_yet = 0;
2479 result = sep_crypto_take_sep(ta_ctx);
2481 dev_warn(&ta_ctx->sep_used->pdev->dev,
2482 "sep_hash_digest take sep failed\n");
2483 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2486 /* now we sit and wait up to a fixed time for completion */
2487 end_time = jiffies + (WAIT_TIME * HZ);
2488 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2491 /* Done waiting; still not done yet? */
2492 if (are_we_done_yet == 0) {
2493 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2494 "hash digest job never got done\n");
2495 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2502 * This is what is called by each of the API's provided
2503 * in the kernel crypto descriptors. It is run in a process
2504 * context using the kernel workqueues. Therefore it can
2507 static void sep_dequeuer(void *data)
2509 struct crypto_queue *this_queue;
2510 struct crypto_async_request *async_req;
2511 struct crypto_async_request *backlog;
2512 struct ablkcipher_request *cypher_req;
2513 struct ahash_request *hash_req;
2514 struct sep_system_ctx *sctx;
2515 struct crypto_ahash *hash_tfm;
2516 struct this_task_ctx *ta_ctx;
2519 this_queue = (struct crypto_queue *)data;
2521 spin_lock_irq(&queue_lock);
2522 backlog = crypto_get_backlog(this_queue);
2523 async_req = crypto_dequeue_request(this_queue);
2524 spin_unlock_irq(&queue_lock);
2527 pr_debug("sep crypto queue is empty\n");
2532 pr_debug("sep crypto backlog set\n");
2533 if (backlog->complete)
2534 backlog->complete(backlog, -EINPROGRESS);
2538 if (!async_req->tfm) {
2539 pr_debug("sep crypto queue null tfm\n");
2543 if (!async_req->tfm->__crt_alg) {
2544 pr_debug("sep crypto queue null __crt_alg\n");
2548 if (!async_req->tfm->__crt_alg->cra_type) {
2549 pr_debug("sep crypto queue null cra_type\n");
2553 /* we have stuff in the queue */
2554 if (async_req->tfm->__crt_alg->cra_type !=
2555 &crypto_ahash_type) {
2556 /* This is for a cypher */
2557 pr_debug("sep crypto queue doing cipher\n");
2558 cypher_req = container_of(async_req,
2559 struct ablkcipher_request,
2562 pr_debug("sep crypto queue null cypher_req\n");
2566 sep_crypto_block((void *)cypher_req);
2569 /* This is a hash */
2570 pr_debug("sep crypto queue doing hash\n");
2572 * This is a bit more complex than cipher; we
2573 * need to figure out what type of operation
2575 hash_req = ahash_request_cast(async_req);
2577 pr_debug("sep crypto queue null hash_req\n");
2581 hash_tfm = crypto_ahash_reqtfm(hash_req);
2583 pr_debug("sep crypto queue null hash_tfm\n");
2588 sctx = crypto_ahash_ctx(hash_tfm);
2590 pr_debug("sep crypto queue null sctx\n");
2594 ta_ctx = ahash_request_ctx(hash_req);
2596 if (ta_ctx->current_hash_stage == HASH_INIT) {
2597 pr_debug("sep crypto queue hash init\n");
2598 sep_hash_init((void *)hash_req);
2600 } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2601 pr_debug("sep crypto queue hash update\n");
2602 sep_hash_update((void *)hash_req);
2604 } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2605 pr_debug("sep crypto queue hash final\n");
2606 sep_hash_final((void *)hash_req);
2608 } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2609 pr_debug("sep crypto queue hash digest\n");
2610 sep_hash_digest((void *)hash_req);
2612 } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2613 pr_debug("sep crypto queue hash digest\n");
2614 sep_hash_update((void *)hash_req);
2616 } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2617 pr_debug("sep crypto queue hash digest\n");
2618 sep_hash_final((void *)hash_req);
2621 pr_debug("sep crypto queue hash oops nothing\n");
2627 static int sep_sha1_init(struct ahash_request *req)
2631 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2633 pr_debug("sep - doing sha1 init\n");
2635 /* Clear out task context */
2636 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2638 ta_ctx->sep_used = sep_dev;
2639 ta_ctx->current_request = SHA1;
2640 ta_ctx->current_hash_req = req;
2641 ta_ctx->current_cypher_req = NULL;
2642 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2643 ta_ctx->current_hash_stage = HASH_INIT;
2645 /* lock necessary so that only one entity touches the queues */
2646 spin_lock_irq(&queue_lock);
2647 error = crypto_enqueue_request(&sep_queue, &req->base);
2649 if ((error != 0) && (error != -EINPROGRESS))
2650 pr_debug(" sep - crypto enqueue failed: %x\n",
2652 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2653 sep_dequeuer, (void *)&sep_queue);
2655 pr_debug(" sep - workqueue submit failed: %x\n",
2657 spin_unlock_irq(&queue_lock);
2658 /* We return result of crypto enqueue */
2662 static int sep_sha1_update(struct ahash_request *req)
2666 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2668 pr_debug("sep - doing sha1 update\n");
2670 ta_ctx->sep_used = sep_dev;
2671 ta_ctx->current_request = SHA1;
2672 ta_ctx->current_hash_req = req;
2673 ta_ctx->current_cypher_req = NULL;
2674 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2675 ta_ctx->current_hash_stage = HASH_UPDATE;
2677 /* lock necessary so that only one entity touches the queues */
2678 spin_lock_irq(&queue_lock);
2679 error = crypto_enqueue_request(&sep_queue, &req->base);
2681 if ((error != 0) && (error != -EINPROGRESS))
2682 pr_debug(" sep - crypto enqueue failed: %x\n",
2684 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2685 sep_dequeuer, (void *)&sep_queue);
2687 pr_debug(" sep - workqueue submit failed: %x\n",
2689 spin_unlock_irq(&queue_lock);
2690 /* We return result of crypto enqueue */
2694 static int sep_sha1_final(struct ahash_request *req)
2698 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2699 pr_debug("sep - doing sha1 final\n");
2701 ta_ctx->sep_used = sep_dev;
2702 ta_ctx->current_request = SHA1;
2703 ta_ctx->current_hash_req = req;
2704 ta_ctx->current_cypher_req = NULL;
2705 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2706 ta_ctx->current_hash_stage = HASH_FINISH;
2708 /* lock necessary so that only one entity touches the queues */
2709 spin_lock_irq(&queue_lock);
2710 error = crypto_enqueue_request(&sep_queue, &req->base);
2712 if ((error != 0) && (error != -EINPROGRESS))
2713 pr_debug(" sep - crypto enqueue failed: %x\n",
2715 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2716 sep_dequeuer, (void *)&sep_queue);
2718 pr_debug(" sep - workqueue submit failed: %x\n",
2720 spin_unlock_irq(&queue_lock);
2721 /* We return result of crypto enqueue */
2725 static int sep_sha1_digest(struct ahash_request *req)
2729 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2730 pr_debug("sep - doing sha1 digest\n");
2732 /* Clear out task context */
2733 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2735 ta_ctx->sep_used = sep_dev;
2736 ta_ctx->current_request = SHA1;
2737 ta_ctx->current_hash_req = req;
2738 ta_ctx->current_cypher_req = NULL;
2739 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2740 ta_ctx->current_hash_stage = HASH_DIGEST;
2742 /* lock necessary so that only one entity touches the queues */
2743 spin_lock_irq(&queue_lock);
2744 error = crypto_enqueue_request(&sep_queue, &req->base);
2746 if ((error != 0) && (error != -EINPROGRESS))
2747 pr_debug(" sep - crypto enqueue failed: %x\n",
2749 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2750 sep_dequeuer, (void *)&sep_queue);
2752 pr_debug(" sep - workqueue submit failed: %x\n",
2754 spin_unlock_irq(&queue_lock);
2755 /* We return result of crypto enqueue */
2759 static int sep_sha1_finup(struct ahash_request *req)
2763 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2764 pr_debug("sep - doing sha1 finup\n");
2766 ta_ctx->sep_used = sep_dev;
2767 ta_ctx->current_request = SHA1;
2768 ta_ctx->current_hash_req = req;
2769 ta_ctx->current_cypher_req = NULL;
2770 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2771 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2773 /* lock necessary so that only one entity touches the queues */
2774 spin_lock_irq(&queue_lock);
2775 error = crypto_enqueue_request(&sep_queue, &req->base);
2777 if ((error != 0) && (error != -EINPROGRESS))
2778 pr_debug(" sep - crypto enqueue failed: %x\n",
2780 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2781 sep_dequeuer, (void *)&sep_queue);
2783 pr_debug(" sep - workqueue submit failed: %x\n",
2785 spin_unlock_irq(&queue_lock);
2786 /* We return result of crypto enqueue */
2790 static int sep_md5_init(struct ahash_request *req)
2794 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2795 pr_debug("sep - doing md5 init\n");
2797 /* Clear out task context */
2798 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2800 ta_ctx->sep_used = sep_dev;
2801 ta_ctx->current_request = MD5;
2802 ta_ctx->current_hash_req = req;
2803 ta_ctx->current_cypher_req = NULL;
2804 ta_ctx->hash_opmode = SEP_HASH_MD5;
2805 ta_ctx->current_hash_stage = HASH_INIT;
2807 /* lock necessary so that only one entity touches the queues */
2808 spin_lock_irq(&queue_lock);
2809 error = crypto_enqueue_request(&sep_queue, &req->base);
2811 if ((error != 0) && (error != -EINPROGRESS))
2812 pr_debug(" sep - crypto enqueue failed: %x\n",
2814 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2815 sep_dequeuer, (void *)&sep_queue);
2817 pr_debug(" sep - workqueue submit failed: %x\n",
2819 spin_unlock_irq(&queue_lock);
2820 /* We return result of crypto enqueue */
2824 static int sep_md5_update(struct ahash_request *req)
2828 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2829 pr_debug("sep - doing md5 update\n");
2831 ta_ctx->sep_used = sep_dev;
2832 ta_ctx->current_request = MD5;
2833 ta_ctx->current_hash_req = req;
2834 ta_ctx->current_cypher_req = NULL;
2835 ta_ctx->hash_opmode = SEP_HASH_MD5;
2836 ta_ctx->current_hash_stage = HASH_UPDATE;
2838 /* lock necessary so that only one entity touches the queues */
2839 spin_lock_irq(&queue_lock);
2840 error = crypto_enqueue_request(&sep_queue, &req->base);
2842 if ((error != 0) && (error != -EINPROGRESS))
2843 pr_debug(" sep - crypto enqueue failed: %x\n",
2845 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2846 sep_dequeuer, (void *)&sep_queue);
2848 pr_debug(" sep - workqueue submit failed: %x\n",
2850 spin_unlock_irq(&queue_lock);
2851 /* We return result of crypto enqueue */
2855 static int sep_md5_final(struct ahash_request *req)
2859 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2860 pr_debug("sep - doing md5 final\n");
2862 ta_ctx->sep_used = sep_dev;
2863 ta_ctx->current_request = MD5;
2864 ta_ctx->current_hash_req = req;
2865 ta_ctx->current_cypher_req = NULL;
2866 ta_ctx->hash_opmode = SEP_HASH_MD5;
2867 ta_ctx->current_hash_stage = HASH_FINISH;
2869 /* lock necessary so that only one entity touches the queues */
2870 spin_lock_irq(&queue_lock);
2871 error = crypto_enqueue_request(&sep_queue, &req->base);
2873 if ((error != 0) && (error != -EINPROGRESS))
2874 pr_debug(" sep - crypto enqueue failed: %x\n",
2876 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2877 sep_dequeuer, (void *)&sep_queue);
2879 pr_debug(" sep - workqueue submit failed: %x\n",
2881 spin_unlock_irq(&queue_lock);
2882 /* We return result of crypto enqueue */
2886 static int sep_md5_digest(struct ahash_request *req)
2890 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2892 pr_debug("sep - doing md5 digest\n");
2894 /* Clear out task context */
2895 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2897 ta_ctx->sep_used = sep_dev;
2898 ta_ctx->current_request = MD5;
2899 ta_ctx->current_hash_req = req;
2900 ta_ctx->current_cypher_req = NULL;
2901 ta_ctx->hash_opmode = SEP_HASH_MD5;
2902 ta_ctx->current_hash_stage = HASH_DIGEST;
2904 /* lock necessary so that only one entity touches the queues */
2905 spin_lock_irq(&queue_lock);
2906 error = crypto_enqueue_request(&sep_queue, &req->base);
2908 if ((error != 0) && (error != -EINPROGRESS))
2909 pr_debug(" sep - crypto enqueue failed: %x\n",
2911 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2912 sep_dequeuer, (void *)&sep_queue);
2914 pr_debug(" sep - workqueue submit failed: %x\n",
2916 spin_unlock_irq(&queue_lock);
2917 /* We return result of crypto enqueue */
2921 static int sep_md5_finup(struct ahash_request *req)
2925 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2927 pr_debug("sep - doing md5 finup\n");
2929 ta_ctx->sep_used = sep_dev;
2930 ta_ctx->current_request = MD5;
2931 ta_ctx->current_hash_req = req;
2932 ta_ctx->current_cypher_req = NULL;
2933 ta_ctx->hash_opmode = SEP_HASH_MD5;
2934 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2936 /* lock necessary so that only one entity touches the queues */
2937 spin_lock_irq(&queue_lock);
2938 error = crypto_enqueue_request(&sep_queue, &req->base);
2940 if ((error != 0) && (error != -EINPROGRESS))
2941 pr_debug(" sep - crypto enqueue failed: %x\n",
2943 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2944 sep_dequeuer, (void *)&sep_queue);
2946 pr_debug(" sep - workqueue submit failed: %x\n",
2948 spin_unlock_irq(&queue_lock);
2949 /* We return result of crypto enqueue */
2953 static int sep_sha224_init(struct ahash_request *req)
2957 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2958 pr_debug("sep - doing sha224 init\n");
2960 /* Clear out task context */
2961 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2963 ta_ctx->sep_used = sep_dev;
2964 ta_ctx->current_request = SHA224;
2965 ta_ctx->current_hash_req = req;
2966 ta_ctx->current_cypher_req = NULL;
2967 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2968 ta_ctx->current_hash_stage = HASH_INIT;
2970 /* lock necessary so that only one entity touches the queues */
2971 spin_lock_irq(&queue_lock);
2972 error = crypto_enqueue_request(&sep_queue, &req->base);
2974 if ((error != 0) && (error != -EINPROGRESS))
2975 pr_debug(" sep - crypto enqueue failed: %x\n",
2977 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2978 sep_dequeuer, (void *)&sep_queue);
2980 pr_debug(" sep - workqueue submit failed: %x\n",
2982 spin_unlock_irq(&queue_lock);
2983 /* We return result of crypto enqueue */
2987 static int sep_sha224_update(struct ahash_request *req)
2991 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2992 pr_debug("sep - doing sha224 update\n");
2994 ta_ctx->sep_used = sep_dev;
2995 ta_ctx->current_request = SHA224;
2996 ta_ctx->current_hash_req = req;
2997 ta_ctx->current_cypher_req = NULL;
2998 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2999 ta_ctx->current_hash_stage = HASH_UPDATE;
3001 /* lock necessary so that only one entity touches the queues */
3002 spin_lock_irq(&queue_lock);
3003 error = crypto_enqueue_request(&sep_queue, &req->base);
3005 if ((error != 0) && (error != -EINPROGRESS))
3006 pr_debug(" sep - crypto enqueue failed: %x\n",
3008 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3009 sep_dequeuer, (void *)&sep_queue);
3011 pr_debug(" sep - workqueue submit failed: %x\n",
3013 spin_unlock_irq(&queue_lock);
3014 /* We return result of crypto enqueue */
3018 static int sep_sha224_final(struct ahash_request *req)
3022 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3023 pr_debug("sep - doing sha224 final\n");
3025 ta_ctx->sep_used = sep_dev;
3026 ta_ctx->current_request = SHA224;
3027 ta_ctx->current_hash_req = req;
3028 ta_ctx->current_cypher_req = NULL;
3029 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3030 ta_ctx->current_hash_stage = HASH_FINISH;
3032 /* lock necessary so that only one entity touches the queues */
3033 spin_lock_irq(&queue_lock);
3034 error = crypto_enqueue_request(&sep_queue, &req->base);
3036 if ((error != 0) && (error != -EINPROGRESS))
3037 pr_debug(" sep - crypto enqueue failed: %x\n",
3039 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3040 sep_dequeuer, (void *)&sep_queue);
3042 pr_debug(" sep - workqueue submit failed: %x\n",
3044 spin_unlock_irq(&queue_lock);
3045 /* We return result of crypto enqueue */
3049 static int sep_sha224_digest(struct ahash_request *req)
3053 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3055 pr_debug("sep - doing sha224 digest\n");
3057 /* Clear out task context */
3058 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3060 ta_ctx->sep_used = sep_dev;
3061 ta_ctx->current_request = SHA224;
3062 ta_ctx->current_hash_req = req;
3063 ta_ctx->current_cypher_req = NULL;
3064 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3065 ta_ctx->current_hash_stage = HASH_DIGEST;
3067 /* lock necessary so that only one entity touches the queues */
3068 spin_lock_irq(&queue_lock);
3069 error = crypto_enqueue_request(&sep_queue, &req->base);
3071 if ((error != 0) && (error != -EINPROGRESS))
3072 pr_debug(" sep - crypto enqueue failed: %x\n",
3074 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3075 sep_dequeuer, (void *)&sep_queue);
3077 pr_debug(" sep - workqueue submit failed: %x\n",
3079 spin_unlock_irq(&queue_lock);
3080 /* We return result of crypto enqueue */
3084 static int sep_sha224_finup(struct ahash_request *req)
3088 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3090 pr_debug("sep - doing sha224 finup\n");
3092 ta_ctx->sep_used = sep_dev;
3093 ta_ctx->current_request = SHA224;
3094 ta_ctx->current_hash_req = req;
3095 ta_ctx->current_cypher_req = NULL;
3096 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3097 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3099 /* lock necessary so that only one entity touches the queues */
3100 spin_lock_irq(&queue_lock);
3101 error = crypto_enqueue_request(&sep_queue, &req->base);
3103 if ((error != 0) && (error != -EINPROGRESS))
3104 pr_debug(" sep - crypto enqueue failed: %x\n",
3106 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3107 sep_dequeuer, (void *)&sep_queue);
3109 pr_debug(" sep - workqueue submit failed: %x\n",
3111 spin_unlock_irq(&queue_lock);
3112 /* We return result of crypto enqueue */
3116 static int sep_sha256_init(struct ahash_request *req)
3120 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3121 pr_debug("sep - doing sha256 init\n");
3123 /* Clear out task context */
3124 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3126 ta_ctx->sep_used = sep_dev;
3127 ta_ctx->current_request = SHA256;
3128 ta_ctx->current_hash_req = req;
3129 ta_ctx->current_cypher_req = NULL;
3130 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3131 ta_ctx->current_hash_stage = HASH_INIT;
3133 /* lock necessary so that only one entity touches the queues */
3134 spin_lock_irq(&queue_lock);
3135 error = crypto_enqueue_request(&sep_queue, &req->base);
3137 if ((error != 0) && (error != -EINPROGRESS))
3138 pr_debug(" sep - crypto enqueue failed: %x\n",
3140 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3141 sep_dequeuer, (void *)&sep_queue);
3143 pr_debug(" sep - workqueue submit failed: %x\n",
3145 spin_unlock_irq(&queue_lock);
3146 /* We return result of crypto enqueue */
3150 static int sep_sha256_update(struct ahash_request *req)
3154 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3155 pr_debug("sep - doing sha256 update\n");
3157 ta_ctx->sep_used = sep_dev;
3158 ta_ctx->current_request = SHA256;
3159 ta_ctx->current_hash_req = req;
3160 ta_ctx->current_cypher_req = NULL;
3161 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3162 ta_ctx->current_hash_stage = HASH_UPDATE;
3164 /* lock necessary so that only one entity touches the queues */
3165 spin_lock_irq(&queue_lock);
3166 error = crypto_enqueue_request(&sep_queue, &req->base);
3168 if ((error != 0) && (error != -EINPROGRESS))
3169 pr_debug(" sep - crypto enqueue failed: %x\n",
3171 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3172 sep_dequeuer, (void *)&sep_queue);
3174 pr_debug(" sep - workqueue submit failed: %x\n",
3176 spin_unlock_irq(&queue_lock);
3177 /* We return result of crypto enqueue */
3181 static int sep_sha256_final(struct ahash_request *req)
3185 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3186 pr_debug("sep - doing sha256 final\n");
3188 ta_ctx->sep_used = sep_dev;
3189 ta_ctx->current_request = SHA256;
3190 ta_ctx->current_hash_req = req;
3191 ta_ctx->current_cypher_req = NULL;
3192 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3193 ta_ctx->current_hash_stage = HASH_FINISH;
3195 /* lock necessary so that only one entity touches the queues */
3196 spin_lock_irq(&queue_lock);
3197 error = crypto_enqueue_request(&sep_queue, &req->base);
3199 if ((error != 0) && (error != -EINPROGRESS))
3200 pr_debug(" sep - crypto enqueue failed: %x\n",
3202 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3203 sep_dequeuer, (void *)&sep_queue);
3205 pr_debug(" sep - workqueue submit failed: %x\n",
3207 spin_unlock_irq(&queue_lock);
3208 /* We return result of crypto enqueue */
3212 static int sep_sha256_digest(struct ahash_request *req)
3216 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3218 pr_debug("sep - doing sha256 digest\n");
3220 /* Clear out task context */
3221 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3223 ta_ctx->sep_used = sep_dev;
3224 ta_ctx->current_request = SHA256;
3225 ta_ctx->current_hash_req = req;
3226 ta_ctx->current_cypher_req = NULL;
3227 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3228 ta_ctx->current_hash_stage = HASH_DIGEST;
3230 /* lock necessary so that only one entity touches the queues */
3231 spin_lock_irq(&queue_lock);
3232 error = crypto_enqueue_request(&sep_queue, &req->base);
3234 if ((error != 0) && (error != -EINPROGRESS))
3235 pr_debug(" sep - crypto enqueue failed: %x\n",
3237 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3238 sep_dequeuer, (void *)&sep_queue);
3240 pr_debug(" sep - workqueue submit failed: %x\n",
3242 spin_unlock_irq(&queue_lock);
3243 /* We return result of crypto enqueue */
3247 static int sep_sha256_finup(struct ahash_request *req)
3251 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3253 pr_debug("sep - doing sha256 finup\n");
3255 ta_ctx->sep_used = sep_dev;
3256 ta_ctx->current_request = SHA256;
3257 ta_ctx->current_hash_req = req;
3258 ta_ctx->current_cypher_req = NULL;
3259 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3260 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3262 /* lock necessary so that only one entity touches the queues */
3263 spin_lock_irq(&queue_lock);
3264 error = crypto_enqueue_request(&sep_queue, &req->base);
3266 if ((error != 0) && (error != -EINPROGRESS))
3267 pr_debug(" sep - crypto enqueue failed: %x\n",
3269 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3270 sep_dequeuer, (void *)&sep_queue);
3272 pr_debug(" sep - workqueue submit failed: %x\n",
3274 spin_unlock_irq(&queue_lock);
3275 /* We return result of crypto enqueue */
3279 static int sep_crypto_init(struct crypto_tfm *tfm)
3281 const char *alg_name = crypto_tfm_alg_name(tfm);
3283 if (alg_name == NULL)
3284 pr_debug("sep_crypto_init alg is NULL\n");
3286 pr_debug("sep_crypto_init alg is %s\n", alg_name);
3288 tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3292 static void sep_crypto_exit(struct crypto_tfm *tfm)
3294 pr_debug("sep_crypto_exit\n");
3297 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3298 unsigned int keylen)
3300 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3302 pr_debug("sep aes setkey\n");
3304 pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3306 case SEP_AES_KEY_128_SIZE:
3307 sctx->aes_key_size = AES_128;
3309 case SEP_AES_KEY_192_SIZE:
3310 sctx->aes_key_size = AES_192;
3312 case SEP_AES_KEY_256_SIZE:
3313 sctx->aes_key_size = AES_256;
3315 case SEP_AES_KEY_512_SIZE:
3316 sctx->aes_key_size = AES_512;
3319 pr_debug("invalid sep aes key size %x\n",
3324 memset(&sctx->key.aes, 0, sizeof(u32) *
3325 SEP_AES_MAX_KEY_SIZE_WORDS);
3326 memcpy(&sctx->key.aes, key, keylen);
3327 sctx->keylen = keylen;
3328 /* Indicate to encrypt/decrypt function to send key to SEP */
3334 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3338 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3340 pr_debug("sep - doing aes ecb encrypt\n");
3342 /* Clear out task context */
3343 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3345 ta_ctx->sep_used = sep_dev;
3346 ta_ctx->current_request = AES_ECB;
3347 ta_ctx->current_hash_req = NULL;
3348 ta_ctx->current_cypher_req = req;
3349 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3350 ta_ctx->aes_opmode = SEP_AES_ECB;
3351 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3352 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3354 /* lock necessary so that only one entity touches the queues */
3355 spin_lock_irq(&queue_lock);
3356 error = crypto_enqueue_request(&sep_queue, &req->base);
3358 if ((error != 0) && (error != -EINPROGRESS))
3359 pr_debug(" sep - crypto enqueue failed: %x\n",
3361 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3362 sep_dequeuer, (void *)&sep_queue);
3364 pr_debug(" sep - workqueue submit failed: %x\n",
3366 spin_unlock_irq(&queue_lock);
3367 /* We return result of crypto enqueue */
3371 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3375 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3377 pr_debug("sep - doing aes ecb decrypt\n");
3379 /* Clear out task context */
3380 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3382 ta_ctx->sep_used = sep_dev;
3383 ta_ctx->current_request = AES_ECB;
3384 ta_ctx->current_hash_req = NULL;
3385 ta_ctx->current_cypher_req = req;
3386 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3387 ta_ctx->aes_opmode = SEP_AES_ECB;
3388 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3389 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3391 /* lock necessary so that only one entity touches the queues */
3392 spin_lock_irq(&queue_lock);
3393 error = crypto_enqueue_request(&sep_queue, &req->base);
3395 if ((error != 0) && (error != -EINPROGRESS))
3396 pr_debug(" sep - crypto enqueue failed: %x\n",
3398 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3399 sep_dequeuer, (void *)&sep_queue);
3401 pr_debug(" sep - workqueue submit failed: %x\n",
3403 spin_unlock_irq(&queue_lock);
3404 /* We return result of crypto enqueue */
3408 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3412 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3413 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3414 crypto_ablkcipher_reqtfm(req));
3416 pr_debug("sep - doing aes cbc encrypt\n");
3418 /* Clear out task context */
3419 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3421 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3422 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3424 ta_ctx->sep_used = sep_dev;
3425 ta_ctx->current_request = AES_CBC;
3426 ta_ctx->current_hash_req = NULL;
3427 ta_ctx->current_cypher_req = req;
3428 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3429 ta_ctx->aes_opmode = SEP_AES_CBC;
3430 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3431 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3433 /* lock necessary so that only one entity touches the queues */
3434 spin_lock_irq(&queue_lock);
3435 error = crypto_enqueue_request(&sep_queue, &req->base);
3437 if ((error != 0) && (error != -EINPROGRESS))
3438 pr_debug(" sep - crypto enqueue failed: %x\n",
3440 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3441 sep_dequeuer, (void *)&sep_queue);
3443 pr_debug(" sep - workqueue submit failed: %x\n",
3445 spin_unlock_irq(&queue_lock);
3446 /* We return result of crypto enqueue */
3450 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3454 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3455 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3456 crypto_ablkcipher_reqtfm(req));
3458 pr_debug("sep - doing aes cbc decrypt\n");
3460 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3461 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3463 /* Clear out task context */
3464 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3466 ta_ctx->sep_used = sep_dev;
3467 ta_ctx->current_request = AES_CBC;
3468 ta_ctx->current_hash_req = NULL;
3469 ta_ctx->current_cypher_req = req;
3470 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3471 ta_ctx->aes_opmode = SEP_AES_CBC;
3472 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3473 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3475 /* lock necessary so that only one entity touches the queues */
3476 spin_lock_irq(&queue_lock);
3477 error = crypto_enqueue_request(&sep_queue, &req->base);
3479 if ((error != 0) && (error != -EINPROGRESS))
3480 pr_debug(" sep - crypto enqueue failed: %x\n",
3482 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3483 sep_dequeuer, (void *)&sep_queue);
3485 pr_debug(" sep - workqueue submit failed: %x\n",
3487 spin_unlock_irq(&queue_lock);
3488 /* We return result of crypto enqueue */
3492 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3493 unsigned int keylen)
3495 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3496 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3497 u32 *flags = &ctfm->crt_flags;
3499 pr_debug("sep des setkey\n");
3503 sctx->des_nbr_keys = DES_KEY_1;
3505 case DES_KEY_SIZE * 2:
3506 sctx->des_nbr_keys = DES_KEY_2;
3508 case DES_KEY_SIZE * 3:
3509 sctx->des_nbr_keys = DES_KEY_3;
3512 pr_debug("invalid key size %x\n",
3517 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3518 (sep_weak_key(key, keylen))) {
3520 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3521 pr_debug("weak key\n");
3525 memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3526 memcpy(&sctx->key.des.key1, key, keylen);
3527 sctx->keylen = keylen;
3528 /* Indicate to encrypt/decrypt function to send key to SEP */
3534 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3538 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3540 pr_debug("sep - doing des ecb encrypt\n");
3542 /* Clear out task context */
3543 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3545 ta_ctx->sep_used = sep_dev;
3546 ta_ctx->current_request = DES_ECB;
3547 ta_ctx->current_hash_req = NULL;
3548 ta_ctx->current_cypher_req = req;
3549 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3550 ta_ctx->des_opmode = SEP_DES_ECB;
3551 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3552 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3554 /* lock necessary so that only one entity touches the queues */
3555 spin_lock_irq(&queue_lock);
3556 error = crypto_enqueue_request(&sep_queue, &req->base);
3558 if ((error != 0) && (error != -EINPROGRESS))
3559 pr_debug(" sep - crypto enqueue failed: %x\n",
3561 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3562 sep_dequeuer, (void *)&sep_queue);
3564 pr_debug(" sep - workqueue submit failed: %x\n",
3566 spin_unlock_irq(&queue_lock);
3567 /* We return result of crypto enqueue */
3571 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3575 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3577 pr_debug("sep - doing des ecb decrypt\n");
3579 /* Clear out task context */
3580 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3582 ta_ctx->sep_used = sep_dev;
3583 ta_ctx->current_request = DES_ECB;
3584 ta_ctx->current_hash_req = NULL;
3585 ta_ctx->current_cypher_req = req;
3586 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3587 ta_ctx->des_opmode = SEP_DES_ECB;
3588 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3589 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3591 /* lock necessary so that only one entity touches the queues */
3592 spin_lock_irq(&queue_lock);
3593 error = crypto_enqueue_request(&sep_queue, &req->base);
3595 if ((error != 0) && (error != -EINPROGRESS))
3596 pr_debug(" sep - crypto enqueue failed: %x\n",
3598 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3599 sep_dequeuer, (void *)&sep_queue);
3601 pr_debug(" sep - workqueue submit failed: %x\n",
3603 spin_unlock_irq(&queue_lock);
3604 /* We return result of crypto enqueue */
3608 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3612 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3614 pr_debug("sep - doing des cbc encrypt\n");
3616 /* Clear out task context */
3617 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3619 ta_ctx->sep_used = sep_dev;
3620 ta_ctx->current_request = DES_CBC;
3621 ta_ctx->current_hash_req = NULL;
3622 ta_ctx->current_cypher_req = req;
3623 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3624 ta_ctx->des_opmode = SEP_DES_CBC;
3625 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3626 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3628 /* lock necessary so that only one entity touches the queues */
3629 spin_lock_irq(&queue_lock);
3630 error = crypto_enqueue_request(&sep_queue, &req->base);
3632 if ((error != 0) && (error != -EINPROGRESS))
3633 pr_debug(" sep - crypto enqueue failed: %x\n",
3635 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3636 sep_dequeuer, (void *)&sep_queue);
3638 pr_debug(" sep - workqueue submit failed: %x\n",
3640 spin_unlock_irq(&queue_lock);
3641 /* We return result of crypto enqueue */
3645 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3649 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3651 pr_debug("sep - doing des ecb decrypt\n");
3653 /* Clear out task context */
3654 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3656 ta_ctx->sep_used = sep_dev;
3657 ta_ctx->current_request = DES_CBC;
3658 ta_ctx->current_hash_req = NULL;
3659 ta_ctx->current_cypher_req = req;
3660 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3661 ta_ctx->des_opmode = SEP_DES_CBC;
3662 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3663 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3665 /* lock necessary so that only one entity touches the queues */
3666 spin_lock_irq(&queue_lock);
3667 error = crypto_enqueue_request(&sep_queue, &req->base);
3669 if ((error != 0) && (error != -EINPROGRESS))
3670 pr_debug(" sep - crypto enqueue failed: %x\n",
3672 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3673 sep_dequeuer, (void *)&sep_queue);
3675 pr_debug(" sep - workqueue submit failed: %x\n",
3677 spin_unlock_irq(&queue_lock);
3678 /* We return result of crypto enqueue */
3682 static struct ahash_alg hash_algs[] = {
3684 .init = sep_sha1_init,
3685 .update = sep_sha1_update,
3686 .final = sep_sha1_final,
3687 .digest = sep_sha1_digest,
3688 .finup = sep_sha1_finup,
3690 .digestsize = SHA1_DIGEST_SIZE,
3693 .cra_driver_name = "sha1-sep",
3694 .cra_priority = 100,
3695 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3697 .cra_blocksize = SHA1_BLOCK_SIZE,
3698 .cra_ctxsize = sizeof(struct sep_system_ctx),
3700 .cra_module = THIS_MODULE,
3701 .cra_init = sep_hash_cra_init,
3702 .cra_exit = sep_hash_cra_exit,
3707 .init = sep_md5_init,
3708 .update = sep_md5_update,
3709 .final = sep_md5_final,
3710 .digest = sep_md5_digest,
3711 .finup = sep_md5_finup,
3713 .digestsize = MD5_DIGEST_SIZE,
3716 .cra_driver_name = "md5-sep",
3717 .cra_priority = 100,
3718 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3720 .cra_blocksize = SHA1_BLOCK_SIZE,
3721 .cra_ctxsize = sizeof(struct sep_system_ctx),
3723 .cra_module = THIS_MODULE,
3724 .cra_init = sep_hash_cra_init,
3725 .cra_exit = sep_hash_cra_exit,
3730 .init = sep_sha224_init,
3731 .update = sep_sha224_update,
3732 .final = sep_sha224_final,
3733 .digest = sep_sha224_digest,
3734 .finup = sep_sha224_finup,
3736 .digestsize = SHA224_DIGEST_SIZE,
3738 .cra_name = "sha224",
3739 .cra_driver_name = "sha224-sep",
3740 .cra_priority = 100,
3741 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3743 .cra_blocksize = SHA224_BLOCK_SIZE,
3744 .cra_ctxsize = sizeof(struct sep_system_ctx),
3746 .cra_module = THIS_MODULE,
3747 .cra_init = sep_hash_cra_init,
3748 .cra_exit = sep_hash_cra_exit,
3753 .init = sep_sha256_init,
3754 .update = sep_sha256_update,
3755 .final = sep_sha256_final,
3756 .digest = sep_sha256_digest,
3757 .finup = sep_sha256_finup,
3759 .digestsize = SHA256_DIGEST_SIZE,
3761 .cra_name = "sha256",
3762 .cra_driver_name = "sha256-sep",
3763 .cra_priority = 100,
3764 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3766 .cra_blocksize = SHA256_BLOCK_SIZE,
3767 .cra_ctxsize = sizeof(struct sep_system_ctx),
3769 .cra_module = THIS_MODULE,
3770 .cra_init = sep_hash_cra_init,
3771 .cra_exit = sep_hash_cra_exit,
3777 static struct crypto_alg crypto_algs[] = {
3779 .cra_name = "ecb(aes)",
3780 .cra_driver_name = "ecb-aes-sep",
3781 .cra_priority = 100,
3782 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3783 .cra_blocksize = AES_BLOCK_SIZE,
3784 .cra_ctxsize = sizeof(struct sep_system_ctx),
3786 .cra_type = &crypto_ablkcipher_type,
3787 .cra_module = THIS_MODULE,
3788 .cra_init = sep_crypto_init,
3789 .cra_exit = sep_crypto_exit,
3790 .cra_u.ablkcipher = {
3791 .min_keysize = AES_MIN_KEY_SIZE,
3792 .max_keysize = AES_MAX_KEY_SIZE,
3793 .setkey = sep_aes_setkey,
3794 .encrypt = sep_aes_ecb_encrypt,
3795 .decrypt = sep_aes_ecb_decrypt,
3799 .cra_name = "cbc(aes)",
3800 .cra_driver_name = "cbc-aes-sep",
3801 .cra_priority = 100,
3802 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3803 .cra_blocksize = AES_BLOCK_SIZE,
3804 .cra_ctxsize = sizeof(struct sep_system_ctx),
3806 .cra_type = &crypto_ablkcipher_type,
3807 .cra_module = THIS_MODULE,
3808 .cra_init = sep_crypto_init,
3809 .cra_exit = sep_crypto_exit,
3810 .cra_u.ablkcipher = {
3811 .min_keysize = AES_MIN_KEY_SIZE,
3812 .max_keysize = AES_MAX_KEY_SIZE,
3813 .setkey = sep_aes_setkey,
3814 .encrypt = sep_aes_cbc_encrypt,
3815 .ivsize = AES_BLOCK_SIZE,
3816 .decrypt = sep_aes_cbc_decrypt,
3820 .cra_name = "ebc(des)",
3821 .cra_driver_name = "ebc-des-sep",
3822 .cra_priority = 100,
3823 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3824 .cra_blocksize = DES_BLOCK_SIZE,
3825 .cra_ctxsize = sizeof(struct sep_system_ctx),
3827 .cra_type = &crypto_ablkcipher_type,
3828 .cra_module = THIS_MODULE,
3829 .cra_init = sep_crypto_init,
3830 .cra_exit = sep_crypto_exit,
3831 .cra_u.ablkcipher = {
3832 .min_keysize = DES_KEY_SIZE,
3833 .max_keysize = DES_KEY_SIZE,
3834 .setkey = sep_des_setkey,
3835 .encrypt = sep_des_ebc_encrypt,
3836 .decrypt = sep_des_ebc_decrypt,
3840 .cra_name = "cbc(des)",
3841 .cra_driver_name = "cbc-des-sep",
3842 .cra_priority = 100,
3843 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3844 .cra_blocksize = DES_BLOCK_SIZE,
3845 .cra_ctxsize = sizeof(struct sep_system_ctx),
3847 .cra_type = &crypto_ablkcipher_type,
3848 .cra_module = THIS_MODULE,
3849 .cra_init = sep_crypto_init,
3850 .cra_exit = sep_crypto_exit,
3851 .cra_u.ablkcipher = {
3852 .min_keysize = DES_KEY_SIZE,
3853 .max_keysize = DES_KEY_SIZE,
3854 .setkey = sep_des_setkey,
3855 .encrypt = sep_des_cbc_encrypt,
3856 .ivsize = DES_BLOCK_SIZE,
3857 .decrypt = sep_des_cbc_decrypt,
3861 .cra_name = "ebc(des3-ede)",
3862 .cra_driver_name = "ebc-des3-ede-sep",
3863 .cra_priority = 100,
3864 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3865 .cra_blocksize = DES_BLOCK_SIZE,
3866 .cra_ctxsize = sizeof(struct sep_system_ctx),
3868 .cra_type = &crypto_ablkcipher_type,
3869 .cra_module = THIS_MODULE,
3870 .cra_init = sep_crypto_init,
3871 .cra_exit = sep_crypto_exit,
3872 .cra_u.ablkcipher = {
3873 .min_keysize = DES3_EDE_KEY_SIZE,
3874 .max_keysize = DES3_EDE_KEY_SIZE,
3875 .setkey = sep_des_setkey,
3876 .encrypt = sep_des_ebc_encrypt,
3877 .decrypt = sep_des_ebc_decrypt,
3881 .cra_name = "cbc(des3-ede)",
3882 .cra_driver_name = "cbc-des3--ede-sep",
3883 .cra_priority = 100,
3884 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3885 .cra_blocksize = DES_BLOCK_SIZE,
3886 .cra_ctxsize = sizeof(struct sep_system_ctx),
3888 .cra_type = &crypto_ablkcipher_type,
3889 .cra_module = THIS_MODULE,
3890 .cra_init = sep_crypto_init,
3891 .cra_exit = sep_crypto_exit,
3892 .cra_u.ablkcipher = {
3893 .min_keysize = DES3_EDE_KEY_SIZE,
3894 .max_keysize = DES3_EDE_KEY_SIZE,
3895 .setkey = sep_des_setkey,
3896 .encrypt = sep_des_cbc_encrypt,
3897 .decrypt = sep_des_cbc_decrypt,
3902 int sep_crypto_setup(void)
3906 tasklet_init(&sep_dev->finish_tasklet, sep_finish,
3907 (unsigned long)sep_dev);
3909 crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
3911 sep_dev->workqueue = create_singlethread_workqueue(
3912 "sep_crypto_workqueue");
3913 if (!sep_dev->workqueue) {
3914 dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
3918 spin_lock_init(&queue_lock);
3921 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
3922 err = crypto_register_ahash(&hash_algs[i]);
3928 for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
3929 err = crypto_register_alg(&crypto_algs[j]);
3931 goto err_crypto_algs;
3937 for (k = 0; k < i; k++)
3938 crypto_unregister_ahash(&hash_algs[k]);
3939 destroy_workqueue(sep_dev->workqueue);
3943 for (k = 0; k < j; k++)
3944 crypto_unregister_alg(&crypto_algs[k]);
3948 void sep_crypto_takedown(void)
3953 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
3954 crypto_unregister_ahash(&hash_algs[i]);
3955 for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
3956 crypto_unregister_alg(&crypto_algs[i]);
3958 destroy_workqueue(sep_dev->workqueue);
3959 tasklet_kill(&sep_dev->finish_tasklet);