3 * sep_crypto.c - Crypto interface structures
5 * Copyright(c) 2009-2011 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009-2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
30 * 2011.02.22 Enable Kernel Crypto
35 #include <linux/module.h>
36 #include <linux/miscdevice.h>
38 #include <linux/cdev.h>
39 #include <linux/kdev_t.h>
40 #include <linux/mutex.h>
41 #include <linux/sched.h>
43 #include <linux/poll.h>
44 #include <linux/wait.h>
45 #include <linux/pci.h>
46 #include <linux/pm_runtime.h>
47 #include <linux/err.h>
48 #include <linux/device.h>
49 #include <linux/errno.h>
50 #include <linux/interrupt.h>
51 #include <linux/kernel.h>
52 #include <linux/clk.h>
53 #include <linux/irq.h>
55 #include <linux/platform_device.h>
56 #include <linux/list.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/delay.h>
59 #include <linux/jiffies.h>
60 #include <linux/workqueue.h>
61 #include <linux/crypto.h>
62 #include <crypto/internal/hash.h>
63 #include <crypto/scatterwalk.h>
64 #include <crypto/sha.h>
65 #include <crypto/md5.h>
66 #include <crypto/aes.h>
67 #include <crypto/des.h>
68 #include <crypto/hash.h>
69 #include "sep_driver_hw_defs.h"
70 #include "sep_driver_config.h"
71 #include "sep_driver_api.h"
73 #include "sep_crypto.h"
75 #if defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE)
77 /* Globals for queuing */
78 static spinlock_t queue_lock;
79 static struct crypto_queue sep_queue;
81 /* Declare of dequeuer */
82 static void sep_dequeuer(void *data);
87 * @work: pointer to work_struct
88 * This is what is called by the queue; it is generic so that it
89 * can be used by any type of operation as each different callback
90 * function can use the data parameter in its own way
92 static void sep_do_callback(struct work_struct *work)
94 struct sep_work_struct *sep_work = container_of(work,
95 struct sep_work_struct, work);
96 if (sep_work != NULL) {
97 (sep_work->callback)(sep_work->data);
100 pr_debug("sep crypto: do callback - NULL container\n");
106 * @work_queue: pointer to struct_workqueue
107 * @funct: pointer to function to execute
108 * @data: pointer to data; function will know
110 * This is a generic API to submit something to
111 * the queue. The callback function will depend
112 * on what operation is to be done
114 static int sep_submit_work(struct workqueue_struct *work_queue,
115 void(*funct)(void *),
118 struct sep_work_struct *sep_work;
121 sep_work = kmalloc(sizeof(struct sep_work_struct), GFP_ATOMIC);
123 if (sep_work == NULL) {
124 pr_debug("sep crypto: cant allocate work structure\n");
128 sep_work->callback = funct;
129 sep_work->data = data;
130 INIT_WORK(&sep_work->work, sep_do_callback);
131 result = queue_work(work_queue, &sep_work->work);
133 pr_debug("sep_crypto: queue_work failed\n");
141 * @sep: pointer to struct sep_device
142 * @size: total size of area
143 * @block_size: minimum size of chunks
144 * each page is minimum or modulo this size
145 * @returns: pointer to struct scatterlist for new
148 static struct scatterlist *sep_alloc_sg_buf(
149 struct sep_device *sep,
157 size_t real_page_size;
159 struct scatterlist *sg, *sg_temp;
164 dev_dbg(&sep->pdev->dev, "sep alloc sg buf\n");
168 real_page_size = PAGE_SIZE - (PAGE_SIZE % block_size);
170 * The size of each page must be modulo of the operation
171 * block size; increment by the modified page size until
172 * the total size is reached, then you have the number of
175 while (current_size < size) {
176 current_size += real_page_size;
180 sg = kmalloc_array(nbr_pages, sizeof(struct scatterlist), GFP_ATOMIC);
184 sg_init_table(sg, nbr_pages);
188 for (ct1 = 0; ct1 < nbr_pages; ct1 += 1) {
189 buf = (void *)get_zeroed_page(GFP_ATOMIC);
191 dev_warn(&sep->pdev->dev,
192 "Cannot allocate page for new buffer\n");
197 sg_set_buf(sg_temp, buf, real_page_size);
198 if ((size - current_size) > real_page_size) {
199 sg_temp->length = real_page_size;
200 current_size += real_page_size;
202 sg_temp->length = (size - current_size);
205 sg_temp = sg_next(sg);
212 * @sg: pointer to struct scatterlist; points to area to free
214 static void sep_free_sg_buf(struct scatterlist *sg)
216 struct scatterlist *sg_temp = sg;
218 free_page((unsigned long)sg_virt(sg_temp));
219 sg_temp = sg_next(sg_temp);
226 * @sep: pointer to struct sep_device
227 * @sg_src: pointer to struct scatterlist for source
228 * @sg_dst: pointer to struct scatterlist for destination
229 * @size: size (in bytes) of data to copy
231 * Copy data from one scatterlist to another; both must
234 static void sep_copy_sg(
235 struct sep_device *sep,
236 struct scatterlist *sg_src,
237 struct scatterlist *sg_dst,
241 u32 in_offset, out_offset;
244 struct scatterlist *sg_src_tmp = sg_src;
245 struct scatterlist *sg_dst_tmp = sg_dst;
249 dev_dbg(&sep->pdev->dev, "sep copy sg\n");
251 if ((sg_src == NULL) || (sg_dst == NULL) || (size == 0))
254 dev_dbg(&sep->pdev->dev, "sep copy sg not null\n");
256 while (count < size) {
257 if ((sg_src_tmp->length - in_offset) >
258 (sg_dst_tmp->length - out_offset))
259 seg_size = sg_dst_tmp->length - out_offset;
261 seg_size = sg_src_tmp->length - in_offset;
263 if (seg_size > (size - count))
264 seg_size = (size = count);
266 memcpy(sg_virt(sg_dst_tmp) + out_offset,
267 sg_virt(sg_src_tmp) + in_offset,
270 in_offset += seg_size;
271 out_offset += seg_size;
274 if (in_offset >= sg_src_tmp->length) {
275 sg_src_tmp = sg_next(sg_src_tmp);
279 if (out_offset >= sg_dst_tmp->length) {
280 sg_dst_tmp = sg_next(sg_dst_tmp);
287 * sep_oddball_pages -
288 * @sep: pointer to struct sep_device
289 * @sg: pointer to struct scatterlist - buffer to check
290 * @size: total data size
291 * @blocksize: minimum block size; must be multiples of this size
292 * @to_copy: 1 means do copy, 0 means do not copy
293 * @new_sg: pointer to location to put pointer to new sg area
294 * @returns: 1 if new scatterlist is needed; 0 if not needed;
295 * error value if operation failed
297 * The SEP device requires all pages to be multiples of the
298 * minimum block size appropriate for the operation
299 * This function check all pages; if any are oddball sizes
300 * (not multiple of block sizes), it creates a new scatterlist.
301 * If the to_copy parameter is set to 1, then a scatter list
302 * copy is performed. The pointer to the new scatterlist is
303 * put into the address supplied by the new_sg parameter; if
304 * no new scatterlist is needed, then a NULL is put into
305 * the location at new_sg.
308 static int sep_oddball_pages(
309 struct sep_device *sep,
310 struct scatterlist *sg,
313 struct scatterlist **new_sg,
316 struct scatterlist *sg_temp;
318 u32 nbr_pages, page_count;
320 dev_dbg(&sep->pdev->dev, "sep oddball\n");
321 if ((sg == NULL) || (data_size == 0) || (data_size < block_size))
324 dev_dbg(&sep->pdev->dev, "sep oddball not null\n");
332 sg_temp = sg_next(sg_temp);
336 while ((sg_temp) && (flag == 0)) {
338 if (sg_temp->length % block_size)
341 sg_temp = sg_next(sg_temp);
344 /* Do not process if last (or only) page is oddball */
345 if (nbr_pages == page_count)
349 dev_dbg(&sep->pdev->dev, "sep oddball processing\n");
350 *new_sg = sep_alloc_sg_buf(sep, data_size, block_size);
351 if (*new_sg == NULL) {
352 dev_warn(&sep->pdev->dev, "cannot allocate new sg\n");
357 sep_copy_sg(sep, sg, *new_sg, data_size);
366 * sep_copy_offset_sg -
367 * @sep: pointer to struct sep_device;
368 * @sg: pointer to struct scatterlist
369 * @offset: offset into scatterlist memory
370 * @dst: place to put data
371 * @len: length of data
372 * @returns: number of bytes copies
374 * This copies data from scatterlist buffer
375 * offset from beginning - it is needed for
376 * handling tail data in hash
378 static size_t sep_copy_offset_sg(
379 struct sep_device *sep,
380 struct scatterlist *sg,
387 size_t offset_within_page;
388 size_t length_within_page;
389 size_t length_remaining;
390 size_t current_offset;
392 /* Find which page is beginning of segment */
394 page_end = sg->length;
395 while ((sg) && (offset > page_end)) {
396 page_start += sg->length;
399 page_end += sg->length;
405 offset_within_page = offset - page_start;
406 if ((sg->length - offset_within_page) >= len) {
407 /* All within this page */
408 memcpy(dst, sg_virt(sg) + offset_within_page, len);
411 /* Scattered multiple pages */
413 length_remaining = len;
414 while ((sg) && (current_offset < len)) {
415 length_within_page = sg->length - offset_within_page;
416 if (length_within_page >= length_remaining) {
417 memcpy(dst+current_offset,
418 sg_virt(sg) + offset_within_page,
420 length_remaining = 0;
421 current_offset = len;
423 memcpy(dst+current_offset,
424 sg_virt(sg) + offset_within_page,
426 length_remaining -= length_within_page;
427 current_offset += length_within_page;
428 offset_within_page = 0;
441 * @src_ptr: source pointer
442 * @dst_ptr: destination pointer
443 * @nbytes: number of bytes
444 * @returns: 0 for success; -1 for failure
445 * We cannot have any partial overlap. Total overlap
446 * where src is the same as dst is okay
448 static int partial_overlap(void *src_ptr, void *dst_ptr, u32 nbytes)
450 /* Check for partial overlap */
451 if (src_ptr != dst_ptr) {
452 if (src_ptr < dst_ptr) {
453 if ((src_ptr + nbytes) > dst_ptr)
456 if ((dst_ptr + nbytes) > src_ptr)
464 /* Debug - prints only if DEBUG is defined */
465 static void sep_dump_ivs(struct ablkcipher_request *req, char *reason)
469 struct sep_aes_internal_context *aes_internal;
470 struct sep_des_internal_context *des_internal;
473 struct this_task_ctx *ta_ctx;
474 struct crypto_ablkcipher *tfm;
475 struct sep_system_ctx *sctx;
477 ta_ctx = ablkcipher_request_ctx(req);
478 tfm = crypto_ablkcipher_reqtfm(req);
479 sctx = crypto_ablkcipher_ctx(tfm);
481 dev_dbg(&ta_ctx->sep_used->pdev->dev, "IV DUMP - %s\n", reason);
482 if ((ta_ctx->current_request == DES_CBC) &&
483 (ta_ctx->des_opmode == SEP_DES_CBC)) {
485 des_internal = (struct sep_des_internal_context *)
486 sctx->des_private_ctx.ctx_buf;
488 dev_dbg(&ta_ctx->sep_used->pdev->dev,
489 "sep - vendor iv for DES\n");
490 cptr = (unsigned char *)des_internal->iv_context;
491 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
492 dev_dbg(&ta_ctx->sep_used->pdev->dev,
493 "%02x\n", *(cptr + ct1));
496 dev_dbg(&ta_ctx->sep_used->pdev->dev,
497 "sep - walk from kernel crypto iv for DES\n");
498 cptr = (unsigned char *)ta_ctx->walk.iv;
499 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
500 dev_dbg(&ta_ctx->sep_used->pdev->dev,
501 "%02x\n", *(cptr + ct1));
502 } else if ((ta_ctx->current_request == AES_CBC) &&
503 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
505 aes_internal = (struct sep_aes_internal_context *)
506 sctx->aes_private_ctx.cbuff;
508 dev_dbg(&ta_ctx->sep_used->pdev->dev,
509 "sep - vendor iv for AES\n");
510 cptr = (unsigned char *)aes_internal->aes_ctx_iv;
511 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
512 dev_dbg(&ta_ctx->sep_used->pdev->dev,
513 "%02x\n", *(cptr + ct1));
516 dev_dbg(&ta_ctx->sep_used->pdev->dev,
517 "sep - walk from kernel crypto iv for AES\n");
518 cptr = (unsigned char *)ta_ctx->walk.iv;
519 for (ct1 = 0; ct1 < crypto_ablkcipher_ivsize(tfm); ct1 += 1)
520 dev_dbg(&ta_ctx->sep_used->pdev->dev,
521 "%02x\n", *(cptr + ct1));
526 * RFC2451: Weak key check
527 * Returns: 1 (weak), 0 (not weak)
529 static int sep_weak_key(const u8 *key, unsigned int keylen)
531 static const u8 parity[] = {
532 8, 1, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 2, 8,
533 0, 8, 8, 0, 8, 0, 0, 8, 8,
535 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
536 8, 0, 0, 8, 0, 8, 8, 0, 0,
538 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
539 8, 0, 0, 8, 0, 8, 8, 0, 0,
541 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
542 0, 8, 8, 0, 8, 0, 0, 8, 8,
544 0, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
545 8, 0, 0, 8, 0, 8, 8, 0, 0,
547 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
548 0, 8, 8, 0, 8, 0, 0, 8, 8,
550 8, 0, 0, 8, 0, 8, 8, 0, 0, 8, 8, 0, 8, 0, 0, 8,
551 0, 8, 8, 0, 8, 0, 0, 8, 8,
553 4, 8, 8, 0, 8, 0, 0, 8, 8, 0, 0, 8, 0, 8, 8, 0,
554 8, 5, 0, 8, 0, 8, 8, 0, 0,
560 n = parity[key[0]]; n <<= 4;
561 n |= parity[key[1]]; n <<= 4;
562 n |= parity[key[2]]; n <<= 4;
563 n |= parity[key[3]]; n <<= 4;
564 n |= parity[key[4]]; n <<= 4;
565 n |= parity[key[5]]; n <<= 4;
566 n |= parity[key[6]]; n <<= 4;
570 /* 1 in 10^10 keys passes this test */
571 if (!((n - (w >> 3)) & w)) {
572 if (n < 0x41415151) {
573 if (n < 0x31312121) {
574 if (n < 0x14141515) {
575 /* 01 01 01 01 01 01 01 01 */
578 /* 01 1F 01 1F 01 0E 01 0E */
582 /* 01 E0 01 E0 01 F1 01 F1 */
585 /* 01 FE 01 FE 01 FE 01 FE */
590 if (n < 0x34342525) {
591 /* 1F 01 1F 01 0E 01 0E 01 */
594 /* 1F 1F 1F 1F 0E 0E 0E 0E (?) */
598 /* 1F E0 1F E0 0E F1 0E F1 */
601 /* 1F FE 1F FE 0E FE 0E FE */
607 if (n < 0x61616161) {
608 if (n < 0x44445555) {
609 /* E0 01 E0 01 F1 01 F1 01 */
612 /* E0 1F E0 1F F1 0E F1 0E */
616 /* E0 E0 E0 E0 F1 F1 F1 F1 (?) */
619 /* E0 FE E0 FE F1 FE F1 FE */
624 if (n < 0x64646565) {
625 /* FE 01 FE 01 FE 01 FE 01 */
628 /* FE 1F FE 1F FE 0E FE 0E */
632 /* FE E0 FE E0 FE F1 FE F1 */
635 /* FE FE FE FE FE FE FE FE */
649 static u32 sep_sg_nents(struct scatterlist *sg)
662 * @ta_ctx: pointer to struct this_task_ctx
663 * @returns: offset to place for the next word in the message
664 * Set up pointer in message pool for new message
666 static u32 sep_start_msg(struct this_task_ctx *ta_ctx)
669 ta_ctx->msg_len_words = 2;
670 ta_ctx->msgptr = ta_ctx->msg;
671 memset(ta_ctx->msg, 0, SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
672 ta_ctx->msgptr += sizeof(u32) * 2;
673 word_ptr = (u32 *)ta_ctx->msgptr;
674 *word_ptr = SEP_START_MSG_TOKEN;
675 return sizeof(u32) * 2;
680 * @ta_ctx: pointer to struct this_task_ctx
681 * @messages_offset: current message offset
682 * Returns: 0 for success; <0 otherwise
683 * End message; set length and CRC; and
684 * send interrupt to the SEP
686 static void sep_end_msg(struct this_task_ctx *ta_ctx, u32 msg_offset)
689 /* Msg size goes into msg after token */
690 ta_ctx->msg_len_words = msg_offset / sizeof(u32) + 1;
691 word_ptr = (u32 *)ta_ctx->msgptr;
693 *word_ptr = ta_ctx->msg_len_words;
695 /* CRC (currently 0) goes at end of msg */
696 word_ptr = (u32 *)(ta_ctx->msgptr + msg_offset);
701 * sep_start_inbound_msg -
702 * @ta_ctx: pointer to struct this_task_ctx
703 * @msg_offset: offset to place for the next word in the message
704 * @returns: 0 for success; error value for failure
705 * Set up pointer in message pool for inbound message
707 static u32 sep_start_inbound_msg(struct this_task_ctx *ta_ctx, u32 *msg_offset)
713 *msg_offset = sizeof(u32) * 2;
714 word_ptr = (u32 *)ta_ctx->msgptr;
716 ta_ctx->msg_len_words = *(word_ptr + 1);
718 if (token != SEP_START_MSG_TOKEN) {
719 error = SEP_INVALID_START;
730 * @ta_ctx: pointer to struct this_task_ctx
731 * @in_addr: pointer to start of parameter
732 * @size: size of parameter to copy (in bytes)
733 * @max_size: size to move up offset; SEP mesg is in word sizes
734 * @msg_offset: pointer to current offset (is updated)
735 * @byte_array: flag ti indicate whether endian must be changed
736 * Copies data into the message area from caller
738 static void sep_write_msg(struct this_task_ctx *ta_ctx, void *in_addr,
739 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
743 void_ptr = ta_ctx->msgptr + *msg_offset;
744 word_ptr = (u32 *)void_ptr;
745 memcpy(void_ptr, in_addr, size);
746 *msg_offset += max_size;
748 /* Do we need to manipulate endian? */
751 for (i = 0; i < ((size + 3) / 4); i += 1)
752 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
758 * @ta_ctx: pointer to struct this_task_ctx
759 * @msg_offset: pointer to current offset (is updated)
760 * @op_code: op code to put into message
761 * Puts op code into message and updates offset
763 static void sep_make_header(struct this_task_ctx *ta_ctx, u32 *msg_offset,
768 *msg_offset = sep_start_msg(ta_ctx);
769 word_ptr = (u32 *)(ta_ctx->msgptr + *msg_offset);
771 *msg_offset += sizeof(u32);
778 * @ta_ctx: pointer to struct this_task_ctx
779 * @in_addr: pointer to start of parameter
780 * @size: size of parameter to copy (in bytes)
781 * @max_size: size to move up offset; SEP mesg is in word sizes
782 * @msg_offset: pointer to current offset (is updated)
783 * @byte_array: flag ti indicate whether endian must be changed
784 * Copies data out of the message area to caller
786 static void sep_read_msg(struct this_task_ctx *ta_ctx, void *in_addr,
787 u32 size, u32 max_size, u32 *msg_offset, u32 byte_array)
791 void_ptr = ta_ctx->msgptr + *msg_offset;
792 word_ptr = (u32 *)void_ptr;
794 /* Do we need to manipulate endian? */
797 for (i = 0; i < ((size + 3) / 4); i += 1)
798 *(word_ptr + i) = CHG_ENDIAN(*(word_ptr + i));
801 memcpy(in_addr, void_ptr, size);
802 *msg_offset += max_size;
807 * @ta_ctx: pointer to struct this_task_ctx
808 * @op_code: expected op_code
809 * @msg_offset: pointer to current offset (is updated)
810 * @returns: 0 for success; error for failure
812 static u32 sep_verify_op(struct this_task_ctx *ta_ctx, u32 op_code,
818 struct sep_device *sep = ta_ctx->sep_used;
820 dev_dbg(&sep->pdev->dev, "dumping return message\n");
821 error = sep_start_inbound_msg(ta_ctx, msg_offset);
823 dev_warn(&sep->pdev->dev,
824 "sep_start_inbound_msg error\n");
828 sep_read_msg(ta_ctx, in_ary, sizeof(u32) * 2, sizeof(u32) * 2,
831 if (in_ary[0] != op_code) {
832 dev_warn(&sep->pdev->dev,
833 "sep got back wrong opcode\n");
834 dev_warn(&sep->pdev->dev,
835 "got back %x; expected %x\n",
837 return SEP_WRONG_OPCODE;
840 if (in_ary[1] != SEP_OK) {
841 dev_warn(&sep->pdev->dev,
842 "sep execution error\n");
843 dev_warn(&sep->pdev->dev,
844 "got back %x; expected %x\n",
854 * @ta_ctx: pointer to struct this_task_ctx
855 * @msg_offset: point to current place in SEP msg; is updated
856 * @dst: pointer to place to put the context
857 * @len: size of the context structure (differs for crypro/hash)
858 * This function reads the context from the msg area
859 * There is a special way the vendor needs to have the maximum
860 * length calculated so that the msg_offset is updated properly;
861 * it skips over some words in the msg area depending on the size
864 static void sep_read_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
867 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
868 sep_read_msg(ta_ctx, dst, len, max_length, msg_offset, 0);
872 * sep_write_context -
873 * @ta_ctx: pointer to struct this_task_ctx
874 * @msg_offset: point to current place in SEP msg; is updated
875 * @src: pointer to the current context
876 * @len: size of the context structure (differs for crypro/hash)
877 * This function writes the context to the msg area
878 * There is a special way the vendor needs to have the maximum
879 * length calculated so that the msg_offset is updated properly;
880 * it skips over some words in the msg area depending on the size
883 static void sep_write_context(struct this_task_ctx *ta_ctx, u32 *msg_offset,
886 u32 max_length = ((len + 3) / sizeof(u32)) * sizeof(u32);
887 sep_write_msg(ta_ctx, src, len, max_length, msg_offset, 0);
892 * @ta_ctx: pointer to struct this_task_ctx
893 * Clear out crypto related values in sep device structure
894 * to enable device to be used by anyone; either kernel
895 * crypto or userspace app via middleware
897 static void sep_clear_out(struct this_task_ctx *ta_ctx)
899 if (ta_ctx->src_sg_hold) {
900 sep_free_sg_buf(ta_ctx->src_sg_hold);
901 ta_ctx->src_sg_hold = NULL;
904 if (ta_ctx->dst_sg_hold) {
905 sep_free_sg_buf(ta_ctx->dst_sg_hold);
906 ta_ctx->dst_sg_hold = NULL;
909 ta_ctx->src_sg = NULL;
910 ta_ctx->dst_sg = NULL;
912 sep_free_dma_table_data_handler(ta_ctx->sep_used, &ta_ctx->dma_ctx);
914 if (ta_ctx->i_own_sep) {
916 * The following unlocks the sep and makes it available
917 * to any other application
918 * First, null out crypto entries in sep before releasing it
920 ta_ctx->sep_used->current_hash_req = NULL;
921 ta_ctx->sep_used->current_cypher_req = NULL;
922 ta_ctx->sep_used->current_request = 0;
923 ta_ctx->sep_used->current_hash_stage = 0;
924 ta_ctx->sep_used->ta_ctx = NULL;
925 ta_ctx->sep_used->in_kernel = 0;
927 ta_ctx->call_status.status = 0;
929 /* Remove anything confidential */
930 memset(ta_ctx->sep_used->shared_addr, 0,
931 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
933 sep_queue_status_remove(ta_ctx->sep_used, &ta_ctx->queue_elem);
935 #ifdef SEP_ENABLE_RUNTIME_PM
936 ta_ctx->sep_used->in_use = 0;
937 pm_runtime_mark_last_busy(&ta_ctx->sep_used->pdev->dev);
938 pm_runtime_put_autosuspend(&ta_ctx->sep_used->pdev->dev);
941 clear_bit(SEP_WORKING_LOCK_BIT,
942 &ta_ctx->sep_used->in_use_flags);
943 ta_ctx->sep_used->pid_doing_transaction = 0;
945 dev_dbg(&ta_ctx->sep_used->pdev->dev,
946 "[PID%d] waking up next transaction\n",
949 clear_bit(SEP_TRANSACTION_STARTED_LOCK_BIT,
950 &ta_ctx->sep_used->in_use_flags);
951 wake_up(&ta_ctx->sep_used->event_transactions);
953 ta_ctx->i_own_sep = 0;
958 * Release crypto infrastructure from EINPROGRESS and
959 * clear sep_dev so that SEP is available to anyone
961 static void sep_crypto_release(struct sep_system_ctx *sctx,
962 struct this_task_ctx *ta_ctx, u32 error)
964 struct ahash_request *hash_req = ta_ctx->current_hash_req;
965 struct ablkcipher_request *cypher_req =
966 ta_ctx->current_cypher_req;
967 struct sep_device *sep = ta_ctx->sep_used;
969 sep_clear_out(ta_ctx);
972 * This may not yet exist depending when we
973 * chose to bail out. If it does exist, set
976 if (ta_ctx->are_we_done_yet != NULL)
977 *ta_ctx->are_we_done_yet = 1;
979 if (cypher_req != NULL) {
980 if ((sctx->key_sent == 1) ||
981 ((error != 0) && (error != -EINPROGRESS))) {
982 if (cypher_req->base.complete == NULL) {
983 dev_dbg(&sep->pdev->dev,
984 "release is null for cypher!");
986 cypher_req->base.complete(
987 &cypher_req->base, error);
992 if (hash_req != NULL) {
993 if (hash_req->base.complete == NULL) {
994 dev_dbg(&sep->pdev->dev,
995 "release is null for hash!");
997 hash_req->base.complete(
998 &hash_req->base, error);
1004 * This is where we grab the sep itself and tell it to do something.
1005 * It will sleep if the sep is currently busy
1006 * and it will return 0 if sep is now ours; error value if there
1009 static int sep_crypto_take_sep(struct this_task_ctx *ta_ctx)
1011 struct sep_device *sep = ta_ctx->sep_used;
1013 struct sep_msgarea_hdr *my_msg_header;
1015 my_msg_header = (struct sep_msgarea_hdr *)ta_ctx->msg;
1017 /* add to status queue */
1018 ta_ctx->queue_elem = sep_queue_status_add(sep, my_msg_header->opcode,
1019 ta_ctx->nbytes, current->pid,
1020 current->comm, sizeof(current->comm));
1022 if (!ta_ctx->queue_elem) {
1023 dev_dbg(&sep->pdev->dev,
1024 "[PID%d] updating queue status error\n", current->pid);
1028 /* get the device; this can sleep */
1029 result = sep_wait_transaction(sep);
1033 if (sep_dev->power_save_setup == 1)
1034 pm_runtime_get_sync(&sep_dev->pdev->dev);
1036 /* Copy in the message */
1037 memcpy(sep->shared_addr, ta_ctx->msg,
1038 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1040 /* Copy in the dcb information if there is any */
1041 if (ta_ctx->dcb_region) {
1042 result = sep_activate_dcb_dmatables_context(sep,
1043 &ta_ctx->dcb_region, &ta_ctx->dmatables_region,
1049 /* Mark the device so we know how to finish the job in the tasklet */
1050 if (ta_ctx->current_hash_req)
1051 sep->current_hash_req = ta_ctx->current_hash_req;
1053 sep->current_cypher_req = ta_ctx->current_cypher_req;
1055 sep->current_request = ta_ctx->current_request;
1056 sep->current_hash_stage = ta_ctx->current_hash_stage;
1057 sep->ta_ctx = ta_ctx;
1059 ta_ctx->i_own_sep = 1;
1061 /* need to set bit first to avoid race condition with interrupt */
1062 set_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, &ta_ctx->call_status.status);
1064 result = sep_send_command_handler(sep);
1066 dev_dbg(&sep->pdev->dev, "[PID%d]: sending command to the sep\n",
1070 dev_dbg(&sep->pdev->dev, "[PID%d]: command sent okay\n",
1073 dev_dbg(&sep->pdev->dev, "[PID%d]: cant send command\n",
1075 clear_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1076 &ta_ctx->call_status.status);
1083 * This function sets things up for a crypto data block process
1084 * This does all preparation, but does not try to grab the
1086 * @req: pointer to struct ablkcipher_request
1087 * returns: 0 if all went well, non zero if error
1089 static int sep_crypto_block_data(struct ablkcipher_request *req)
1098 static char small_buf[100];
1099 ssize_t copy_result;
1102 struct scatterlist *new_sg;
1103 struct this_task_ctx *ta_ctx;
1104 struct crypto_ablkcipher *tfm;
1105 struct sep_system_ctx *sctx;
1107 struct sep_des_internal_context *des_internal;
1108 struct sep_aes_internal_context *aes_internal;
1110 ta_ctx = ablkcipher_request_ctx(req);
1111 tfm = crypto_ablkcipher_reqtfm(req);
1112 sctx = crypto_ablkcipher_ctx(tfm);
1114 /* start the walk on scatterlists */
1115 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1116 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sep crypto block data size of %x\n",
1119 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1121 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1126 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1127 "crypto block: src is %lx dst is %lx\n",
1128 (unsigned long)req->src, (unsigned long)req->dst);
1130 /* Make sure all pages are even block */
1131 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
1132 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 1);
1134 if (int_error < 0) {
1135 dev_warn(&ta_ctx->sep_used->pdev->dev, "oddball page error\n");
1137 } else if (int_error == 1) {
1138 ta_ctx->src_sg = new_sg;
1139 ta_ctx->src_sg_hold = new_sg;
1141 ta_ctx->src_sg = req->src;
1142 ta_ctx->src_sg_hold = NULL;
1145 int_error = sep_oddball_pages(ta_ctx->sep_used, req->dst,
1146 req->nbytes, ta_ctx->walk.blocksize, &new_sg, 0);
1148 if (int_error < 0) {
1149 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1152 } else if (int_error == 1) {
1153 ta_ctx->dst_sg = new_sg;
1154 ta_ctx->dst_sg_hold = new_sg;
1156 ta_ctx->dst_sg = req->dst;
1157 ta_ctx->dst_sg_hold = NULL;
1160 /* set nbytes for queue status */
1161 ta_ctx->nbytes = req->nbytes;
1163 /* Key already done; this is for data */
1164 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending data\n");
1166 /* check for valid data and proper spacing */
1167 src_ptr = sg_virt(ta_ctx->src_sg);
1168 dst_ptr = sg_virt(ta_ctx->dst_sg);
1170 if (!src_ptr || !dst_ptr ||
1171 (ta_ctx->current_cypher_req->nbytes %
1172 crypto_ablkcipher_blocksize(tfm))) {
1174 dev_warn(&ta_ctx->sep_used->pdev->dev,
1175 "cipher block size odd\n");
1176 dev_warn(&ta_ctx->sep_used->pdev->dev,
1177 "cipher block size is %x\n",
1178 crypto_ablkcipher_blocksize(tfm));
1179 dev_warn(&ta_ctx->sep_used->pdev->dev,
1180 "cipher data size is %x\n",
1181 ta_ctx->current_cypher_req->nbytes);
1185 if (partial_overlap(src_ptr, dst_ptr,
1186 ta_ctx->current_cypher_req->nbytes)) {
1187 dev_warn(&ta_ctx->sep_used->pdev->dev,
1188 "block partial overlap\n");
1192 /* Put together the message */
1193 sep_make_header(ta_ctx, &msg_offset, ta_ctx->block_opcode);
1195 /* If des, and size is 1 block, put directly in msg */
1196 if ((ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) &&
1197 (req->nbytes == crypto_ablkcipher_blocksize(tfm))) {
1199 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1200 "writing out one block des\n");
1202 copy_result = sg_copy_to_buffer(
1203 ta_ctx->src_sg, sep_sg_nents(ta_ctx->src_sg),
1204 small_buf, crypto_ablkcipher_blocksize(tfm));
1206 if (copy_result != crypto_ablkcipher_blocksize(tfm)) {
1207 dev_warn(&ta_ctx->sep_used->pdev->dev,
1208 "des block copy failed\n");
1212 /* Put data into message */
1213 sep_write_msg(ta_ctx, small_buf,
1214 crypto_ablkcipher_blocksize(tfm),
1215 crypto_ablkcipher_blocksize(tfm) * 2,
1218 /* Put size into message */
1219 sep_write_msg(ta_ctx, &req->nbytes,
1220 sizeof(u32), sizeof(u32), &msg_offset, 0);
1222 /* Otherwise, fill out dma tables */
1223 ta_ctx->dcb_input_data.app_in_address = src_ptr;
1224 ta_ctx->dcb_input_data.data_in_size = req->nbytes;
1225 ta_ctx->dcb_input_data.app_out_address = dst_ptr;
1226 ta_ctx->dcb_input_data.block_size =
1227 crypto_ablkcipher_blocksize(tfm);
1228 ta_ctx->dcb_input_data.tail_block_size = 0;
1229 ta_ctx->dcb_input_data.is_applet = 0;
1230 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
1231 ta_ctx->dcb_input_data.dst_sg = ta_ctx->dst_sg;
1233 result = sep_create_dcb_dmatables_context_kernel(
1235 &ta_ctx->dcb_region,
1236 &ta_ctx->dmatables_region,
1238 &ta_ctx->dcb_input_data,
1241 dev_warn(&ta_ctx->sep_used->pdev->dev,
1242 "crypto dma table create failed\n");
1246 /* Portion of msg is nulled (no data) */
1252 sep_write_msg(ta_ctx, (void *)msg, sizeof(u32) * 5,
1253 sizeof(u32) * 5, &msg_offset, 0);
1257 * Before we write the message, we need to overwrite the
1258 * vendor's IV with the one from our own ablkcipher walk
1259 * iv because this is needed for dm-crypt
1261 sep_dump_ivs(req, "sending data block to sep\n");
1262 if ((ta_ctx->current_request == DES_CBC) &&
1263 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1265 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1266 "overwrite vendor iv on DES\n");
1267 des_internal = (struct sep_des_internal_context *)
1268 sctx->des_private_ctx.ctx_buf;
1269 memcpy((void *)des_internal->iv_context,
1270 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1271 } else if ((ta_ctx->current_request == AES_CBC) &&
1272 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1274 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1275 "overwrite vendor iv on AES\n");
1276 aes_internal = (struct sep_aes_internal_context *)
1277 sctx->aes_private_ctx.cbuff;
1278 memcpy((void *)aes_internal->aes_ctx_iv,
1279 ta_ctx->walk.iv, crypto_ablkcipher_ivsize(tfm));
1282 /* Write context into message */
1283 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1284 sep_write_context(ta_ctx, &msg_offset,
1285 &sctx->des_private_ctx,
1286 sizeof(struct sep_des_private_context));
1288 sep_write_context(ta_ctx, &msg_offset,
1289 &sctx->aes_private_ctx,
1290 sizeof(struct sep_aes_private_context));
1293 /* conclude message */
1294 sep_end_msg(ta_ctx, msg_offset);
1296 /* Parent (caller) is now ready to tell the sep to do ahead */
1302 * This function sets things up for a crypto key submit process
1303 * This does all preparation, but does not try to grab the
1305 * @req: pointer to struct ablkcipher_request
1306 * returns: 0 if all went well, non zero if error
1308 static int sep_crypto_send_key(struct ablkcipher_request *req)
1316 struct this_task_ctx *ta_ctx;
1317 struct crypto_ablkcipher *tfm;
1318 struct sep_system_ctx *sctx;
1320 ta_ctx = ablkcipher_request_ctx(req);
1321 tfm = crypto_ablkcipher_reqtfm(req);
1322 sctx = crypto_ablkcipher_ctx(tfm);
1324 dev_dbg(&ta_ctx->sep_used->pdev->dev, "sending key\n");
1326 /* start the walk on scatterlists */
1327 ablkcipher_walk_init(&ta_ctx->walk, req->src, req->dst, req->nbytes);
1328 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1329 "sep crypto block data size of %x\n", req->nbytes);
1331 int_error = ablkcipher_walk_phys(req, &ta_ctx->walk);
1333 dev_warn(&ta_ctx->sep_used->pdev->dev, "walk phys error %x\n",
1339 if ((ta_ctx->current_request == DES_CBC) &&
1340 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1341 if (!ta_ctx->walk.iv) {
1342 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1346 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_DES_IV_SIZE_BYTES);
1349 if ((ta_ctx->current_request == AES_CBC) &&
1350 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1351 if (!ta_ctx->walk.iv) {
1352 dev_warn(&ta_ctx->sep_used->pdev->dev, "no iv found\n");
1356 memcpy(ta_ctx->iv, ta_ctx->walk.iv, SEP_AES_IV_SIZE_BYTES);
1359 /* put together message to SEP */
1360 /* Start with op code */
1361 sep_make_header(ta_ctx, &msg_offset, ta_ctx->init_opcode);
1363 /* now deal with IV */
1364 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1365 if (ta_ctx->des_opmode == SEP_DES_CBC) {
1366 sep_write_msg(ta_ctx, ta_ctx->iv,
1367 SEP_DES_IV_SIZE_BYTES, sizeof(u32) * 4,
1371 msg_offset += 4 * sizeof(u32);
1374 max_length = ((SEP_AES_IV_SIZE_BYTES + 3) /
1375 sizeof(u32)) * sizeof(u32);
1376 if (ta_ctx->aes_opmode == SEP_AES_CBC) {
1377 sep_write_msg(ta_ctx, ta_ctx->iv,
1378 SEP_AES_IV_SIZE_BYTES, max_length,
1382 msg_offset += max_length;
1387 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1388 sep_write_msg(ta_ctx, (void *)&sctx->key.des.key1,
1389 sizeof(u32) * 8, sizeof(u32) * 8,
1392 msg[0] = (u32)sctx->des_nbr_keys;
1393 msg[1] = (u32)ta_ctx->des_encmode;
1394 msg[2] = (u32)ta_ctx->des_opmode;
1396 sep_write_msg(ta_ctx, (void *)msg,
1397 sizeof(u32) * 3, sizeof(u32) * 3,
1400 sep_write_msg(ta_ctx, (void *)&sctx->key.aes,
1402 SEP_AES_MAX_KEY_SIZE_BYTES,
1405 msg[0] = (u32)sctx->aes_key_size;
1406 msg[1] = (u32)ta_ctx->aes_encmode;
1407 msg[2] = (u32)ta_ctx->aes_opmode;
1408 msg[3] = (u32)0; /* Secret key is not used */
1409 sep_write_msg(ta_ctx, (void *)msg,
1410 sizeof(u32) * 4, sizeof(u32) * 4,
1414 /* conclude message */
1415 sep_end_msg(ta_ctx, msg_offset);
1417 /* Parent (caller) is now ready to tell the sep to do ahead */
1422 /* This needs to be run as a work queue as it can be put asleep */
1423 static void sep_crypto_block(void *data)
1425 unsigned long end_time;
1429 struct ablkcipher_request *req;
1430 struct this_task_ctx *ta_ctx;
1431 struct crypto_ablkcipher *tfm;
1432 struct sep_system_ctx *sctx;
1433 int are_we_done_yet;
1435 req = (struct ablkcipher_request *)data;
1436 ta_ctx = ablkcipher_request_ctx(req);
1437 tfm = crypto_ablkcipher_reqtfm(req);
1438 sctx = crypto_ablkcipher_ctx(tfm);
1440 ta_ctx->are_we_done_yet = &are_we_done_yet;
1442 pr_debug("sep_crypto_block\n");
1443 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n",
1445 pr_debug("key_sent is %d\n", sctx->key_sent);
1447 /* do we need to send the key */
1448 if (sctx->key_sent == 0) {
1449 are_we_done_yet = 0;
1450 result = sep_crypto_send_key(req); /* prep to send key */
1452 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1453 "could not prep key %x\n", result);
1454 sep_crypto_release(sctx, ta_ctx, result);
1458 result = sep_crypto_take_sep(ta_ctx);
1460 dev_warn(&ta_ctx->sep_used->pdev->dev,
1461 "sep_crypto_take_sep for key send failed\n");
1462 sep_crypto_release(sctx, ta_ctx, result);
1466 /* now we sit and wait up to a fixed time for completion */
1467 end_time = jiffies + (WAIT_TIME * HZ);
1468 while ((time_before(jiffies, end_time)) &&
1469 (are_we_done_yet == 0))
1472 /* Done waiting; still not done yet? */
1473 if (are_we_done_yet == 0) {
1474 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1475 "Send key job never got done\n");
1476 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1480 /* Set the key sent variable so this can be skipped later */
1484 /* Key sent (or maybe not if we did not have to), now send block */
1485 are_we_done_yet = 0;
1487 result = sep_crypto_block_data(req);
1490 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1491 "could prep not send block %x\n", result);
1492 sep_crypto_release(sctx, ta_ctx, result);
1496 result = sep_crypto_take_sep(ta_ctx);
1498 dev_warn(&ta_ctx->sep_used->pdev->dev,
1499 "sep_crypto_take_sep for block send failed\n");
1500 sep_crypto_release(sctx, ta_ctx, result);
1504 /* now we sit and wait up to a fixed time for completion */
1505 end_time = jiffies + (WAIT_TIME * HZ);
1506 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
1509 /* Done waiting; still not done yet? */
1510 if (are_we_done_yet == 0) {
1511 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1512 "Send block job never got done\n");
1513 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1517 /* That's it; entire thing done, get out of queue */
1519 pr_debug("crypto_block leaving\n");
1520 pr_debug("tfm is %p sctx is %p ta_ctx is %p\n", tfm, sctx, ta_ctx);
1524 * Post operation (after interrupt) for crypto block
1526 static u32 crypto_post_op(struct sep_device *sep)
1532 ssize_t copy_result;
1533 static char small_buf[100];
1535 struct ablkcipher_request *req;
1536 struct this_task_ctx *ta_ctx;
1537 struct sep_system_ctx *sctx;
1538 struct crypto_ablkcipher *tfm;
1540 struct sep_des_internal_context *des_internal;
1541 struct sep_aes_internal_context *aes_internal;
1543 if (!sep->current_cypher_req)
1546 /* hold req since we need to submit work after clearing sep */
1547 req = sep->current_cypher_req;
1549 ta_ctx = ablkcipher_request_ctx(sep->current_cypher_req);
1550 tfm = crypto_ablkcipher_reqtfm(sep->current_cypher_req);
1551 sctx = crypto_ablkcipher_ctx(tfm);
1553 pr_debug("crypto_post op\n");
1554 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1555 sctx->key_sent, tfm, sctx, ta_ctx);
1557 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op\n");
1558 dev_dbg(&ta_ctx->sep_used->pdev->dev, "crypto post_op message dump\n");
1560 /* first bring msg from shared area to local area */
1561 memcpy(ta_ctx->msg, sep->shared_addr,
1562 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1564 /* Is this the result of performing init (key to SEP */
1565 if (sctx->key_sent == 0) {
1567 /* Did SEP do it okay */
1568 u32_error = sep_verify_op(ta_ctx, ta_ctx->init_opcode,
1571 dev_warn(&ta_ctx->sep_used->pdev->dev,
1572 "aes init error %x\n", u32_error);
1573 sep_crypto_release(sctx, ta_ctx, u32_error);
1578 if (ta_ctx->init_opcode == SEP_DES_INIT_OPCODE) {
1579 sep_read_context(ta_ctx, &msg_offset,
1580 &sctx->des_private_ctx,
1581 sizeof(struct sep_des_private_context));
1583 sep_read_context(ta_ctx, &msg_offset,
1584 &sctx->aes_private_ctx,
1585 sizeof(struct sep_aes_private_context));
1588 sep_dump_ivs(req, "after sending key to sep\n");
1590 /* key sent went okay; release sep, and set are_we_done_yet */
1592 sep_crypto_release(sctx, ta_ctx, -EINPROGRESS);
1597 * This is the result of a block request
1599 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1600 "crypto_post_op block response\n");
1602 u32_error = sep_verify_op(ta_ctx, ta_ctx->block_opcode,
1606 dev_warn(&ta_ctx->sep_used->pdev->dev,
1607 "sep block error %x\n", u32_error);
1608 sep_crypto_release(sctx, ta_ctx, u32_error);
1612 if (ta_ctx->block_opcode == SEP_DES_BLOCK_OPCODE) {
1614 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1615 "post op for DES\n");
1617 /* special case for 1 block des */
1618 if (sep->current_cypher_req->nbytes ==
1619 crypto_ablkcipher_blocksize(tfm)) {
1621 sep_read_msg(ta_ctx, small_buf,
1622 crypto_ablkcipher_blocksize(tfm),
1623 crypto_ablkcipher_blocksize(tfm) * 2,
1626 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1627 "reading in block des\n");
1629 copy_result = sg_copy_from_buffer(
1631 sep_sg_nents(ta_ctx->dst_sg),
1633 crypto_ablkcipher_blocksize(tfm));
1636 crypto_ablkcipher_blocksize(tfm)) {
1638 dev_warn(&ta_ctx->sep_used->pdev->dev,
1639 "des block copy failed\n");
1640 sep_crypto_release(sctx, ta_ctx,
1647 sep_read_context(ta_ctx, &msg_offset,
1648 &sctx->des_private_ctx,
1649 sizeof(struct sep_des_private_context));
1652 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1653 "post op for AES\n");
1655 /* Skip the MAC Output */
1656 msg_offset += (sizeof(u32) * 4);
1659 sep_read_context(ta_ctx, &msg_offset,
1660 &sctx->aes_private_ctx,
1661 sizeof(struct sep_aes_private_context));
1664 /* Copy to correct sg if this block had oddball pages */
1665 if (ta_ctx->dst_sg_hold)
1666 sep_copy_sg(ta_ctx->sep_used,
1668 ta_ctx->current_cypher_req->dst,
1669 ta_ctx->current_cypher_req->nbytes);
1672 * Copy the iv's back to the walk.iv
1673 * This is required for dm_crypt
1675 sep_dump_ivs(req, "got data block from sep\n");
1676 if ((ta_ctx->current_request == DES_CBC) &&
1677 (ta_ctx->des_opmode == SEP_DES_CBC)) {
1679 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1680 "returning result iv to walk on DES\n");
1681 des_internal = (struct sep_des_internal_context *)
1682 sctx->des_private_ctx.ctx_buf;
1683 memcpy(ta_ctx->walk.iv,
1684 (void *)des_internal->iv_context,
1685 crypto_ablkcipher_ivsize(tfm));
1686 } else if ((ta_ctx->current_request == AES_CBC) &&
1687 (ta_ctx->aes_opmode == SEP_AES_CBC)) {
1689 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1690 "returning result iv to walk on AES\n");
1691 aes_internal = (struct sep_aes_internal_context *)
1692 sctx->aes_private_ctx.cbuff;
1693 memcpy(ta_ctx->walk.iv,
1694 (void *)aes_internal->aes_ctx_iv,
1695 crypto_ablkcipher_ivsize(tfm));
1698 /* finished, release everything */
1699 sep_crypto_release(sctx, ta_ctx, 0);
1701 pr_debug("crypto_post_op done\n");
1702 pr_debug("key_sent is %d tfm is %p sctx is %p ta_ctx is %p\n",
1703 sctx->key_sent, tfm, sctx, ta_ctx);
1708 static u32 hash_init_post_op(struct sep_device *sep)
1712 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1713 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1714 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1715 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1716 "hash init post op\n");
1718 /* first bring msg from shared area to local area */
1719 memcpy(ta_ctx->msg, sep->shared_addr,
1720 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1722 u32_error = sep_verify_op(ta_ctx, SEP_HASH_INIT_OPCODE,
1726 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1728 sep_crypto_release(sctx, ta_ctx, u32_error);
1733 sep_read_context(ta_ctx, &msg_offset,
1734 &sctx->hash_private_ctx,
1735 sizeof(struct sep_hash_private_context));
1737 /* Signal to crypto infrastructure and clear out */
1738 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash init post op done\n");
1739 sep_crypto_release(sctx, ta_ctx, 0);
1743 static u32 hash_update_post_op(struct sep_device *sep)
1747 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1748 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1749 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1750 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1751 "hash update post op\n");
1753 /* first bring msg from shared area to local area */
1754 memcpy(ta_ctx->msg, sep->shared_addr,
1755 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1757 u32_error = sep_verify_op(ta_ctx, SEP_HASH_UPDATE_OPCODE,
1761 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash init error %x\n",
1763 sep_crypto_release(sctx, ta_ctx, u32_error);
1768 sep_read_context(ta_ctx, &msg_offset,
1769 &sctx->hash_private_ctx,
1770 sizeof(struct sep_hash_private_context));
1773 * Following is only for finup; if we just completed the
1774 * data portion of finup, we now need to kick off the
1775 * finish portion of finup.
1778 if (ta_ctx->sep_used->current_hash_stage == HASH_FINUP_DATA) {
1780 /* first reset stage to HASH_FINUP_FINISH */
1781 ta_ctx->sep_used->current_hash_stage = HASH_FINUP_FINISH;
1783 /* now enqueue the finish operation */
1784 spin_lock_irq(&queue_lock);
1785 u32_error = crypto_enqueue_request(&sep_queue,
1786 &ta_ctx->sep_used->current_hash_req->base);
1787 spin_unlock_irq(&queue_lock);
1789 if ((u32_error != 0) && (u32_error != -EINPROGRESS)) {
1790 dev_warn(&ta_ctx->sep_used->pdev->dev,
1791 "spe cypher post op cant queue\n");
1792 sep_crypto_release(sctx, ta_ctx, u32_error);
1796 /* schedule the data send */
1797 u32_error = sep_submit_work(ta_ctx->sep_used->workqueue,
1798 sep_dequeuer, (void *)&sep_queue);
1801 dev_warn(&ta_ctx->sep_used->pdev->dev,
1802 "cant submit work sep_crypto_block\n");
1803 sep_crypto_release(sctx, ta_ctx, -EINVAL);
1808 /* Signal to crypto infrastructure and clear out */
1809 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash update post op done\n");
1810 sep_crypto_release(sctx, ta_ctx, 0);
1814 static u32 hash_final_post_op(struct sep_device *sep)
1819 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1820 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1821 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1822 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1823 "hash final post op\n");
1825 /* first bring msg from shared area to local area */
1826 memcpy(ta_ctx->msg, sep->shared_addr,
1827 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1829 u32_error = sep_verify_op(ta_ctx, SEP_HASH_FINISH_OPCODE,
1833 dev_warn(&ta_ctx->sep_used->pdev->dev, "hash finish error %x\n",
1835 sep_crypto_release(sctx, ta_ctx, u32_error);
1839 /* Grab the result */
1840 if (ta_ctx->current_hash_req->result == NULL) {
1841 /* Oops, null buffer; error out here */
1842 dev_warn(&ta_ctx->sep_used->pdev->dev,
1843 "hash finish null buffer\n");
1844 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1848 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1849 sizeof(u32)) * sizeof(u32);
1851 sep_read_msg(ta_ctx,
1852 ta_ctx->current_hash_req->result,
1853 crypto_ahash_digestsize(tfm), max_length,
1856 /* Signal to crypto infrastructure and clear out */
1857 dev_dbg(&ta_ctx->sep_used->pdev->dev, "hash finish post op done\n");
1858 sep_crypto_release(sctx, ta_ctx, 0);
1862 static u32 hash_digest_post_op(struct sep_device *sep)
1867 struct crypto_ahash *tfm = crypto_ahash_reqtfm(sep->current_hash_req);
1868 struct sep_system_ctx *sctx = crypto_ahash_ctx(tfm);
1869 struct this_task_ctx *ta_ctx = ahash_request_ctx(sep->current_hash_req);
1870 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1871 "hash digest post op\n");
1873 /* first bring msg from shared area to local area */
1874 memcpy(ta_ctx->msg, sep->shared_addr,
1875 SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES);
1877 u32_error = sep_verify_op(ta_ctx, SEP_HASH_SINGLE_OPCODE,
1881 dev_warn(&ta_ctx->sep_used->pdev->dev,
1882 "hash digest finish error %x\n", u32_error);
1884 sep_crypto_release(sctx, ta_ctx, u32_error);
1888 /* Grab the result */
1889 if (ta_ctx->current_hash_req->result == NULL) {
1890 /* Oops, null buffer; error out here */
1891 dev_warn(&ta_ctx->sep_used->pdev->dev,
1892 "hash digest finish null buffer\n");
1893 sep_crypto_release(sctx, ta_ctx, (u32)-ENOMEM);
1897 max_length = (((SEP_HASH_RESULT_SIZE_WORDS * sizeof(u32)) + 3) /
1898 sizeof(u32)) * sizeof(u32);
1900 sep_read_msg(ta_ctx,
1901 ta_ctx->current_hash_req->result,
1902 crypto_ahash_digestsize(tfm), max_length,
1905 /* Signal to crypto infrastructure and clear out */
1906 dev_dbg(&ta_ctx->sep_used->pdev->dev,
1907 "hash digest finish post op done\n");
1909 sep_crypto_release(sctx, ta_ctx, 0);
1914 * The sep_finish function is the function that is scheduled (via tasklet)
1915 * by the interrupt service routine when the SEP sends and interrupt
1916 * This is only called by the interrupt handler as a tasklet.
1918 static void sep_finish(unsigned long data)
1920 struct sep_device *sep_dev;
1926 pr_debug("sep_finish called with null data\n");
1930 sep_dev = (struct sep_device *)data;
1931 if (sep_dev == NULL) {
1932 pr_debug("sep_finish; sep_dev is NULL\n");
1936 if (sep_dev->in_kernel == (u32)0) {
1937 dev_warn(&sep_dev->pdev->dev,
1938 "sep_finish; not in kernel operation\n");
1942 /* Did we really do a sep command prior to this? */
1943 if (0 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
1944 &sep_dev->ta_ctx->call_status.status)) {
1946 dev_warn(&sep_dev->pdev->dev, "[PID%d] sendmsg not called\n",
1951 if (sep_dev->send_ct != sep_dev->reply_ct) {
1952 dev_warn(&sep_dev->pdev->dev,
1953 "[PID%d] poll; no message came back\n",
1958 /* Check for error (In case time ran out) */
1959 if ((res != 0x0) && (res != 0x8)) {
1960 dev_warn(&sep_dev->pdev->dev,
1961 "[PID%d] poll; poll error GPR3 is %x\n",
1966 /* What kind of interrupt from sep was this? */
1967 res = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
1969 dev_dbg(&sep_dev->pdev->dev, "[PID%d] GPR2 at crypto finish is %x\n",
1972 /* Print request? */
1973 if ((res >> 30) & 0x1) {
1974 dev_dbg(&sep_dev->pdev->dev, "[PID%d] sep print req\n",
1976 dev_dbg(&sep_dev->pdev->dev, "[PID%d] contents: %s\n",
1978 (char *)(sep_dev->shared_addr +
1979 SEP_DRIVER_PRINTF_OFFSET_IN_BYTES));
1983 /* Request for daemon (not currently in POR)? */
1985 dev_dbg(&sep_dev->pdev->dev,
1986 "[PID%d] sep request; ignoring\n",
1991 /* If we got here, then we have a replay to a sep command */
1993 dev_dbg(&sep_dev->pdev->dev,
1994 "[PID%d] sep reply to command; processing request: %x\n",
1995 current->pid, sep_dev->current_request);
1997 switch (sep_dev->current_request) {
2002 res = crypto_post_op(sep_dev);
2008 switch (sep_dev->current_hash_stage) {
2010 res = hash_init_post_op(sep_dev);
2013 case HASH_FINUP_DATA:
2014 res = hash_update_post_op(sep_dev);
2016 case HASH_FINUP_FINISH:
2018 res = hash_final_post_op(sep_dev);
2021 res = hash_digest_post_op(sep_dev);
2024 pr_debug("sep - invalid stage for hash finish\n");
2028 pr_debug("sep - invalid request for finish\n");
2032 pr_debug("sep - finish returned error %x\n", res);
2035 static int sep_hash_cra_init(struct crypto_tfm *tfm)
2037 const char *alg_name = crypto_tfm_alg_name(tfm);
2039 pr_debug("sep_hash_cra_init name is %s\n", alg_name);
2041 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2042 sizeof(struct this_task_ctx));
2046 static void sep_hash_cra_exit(struct crypto_tfm *tfm)
2048 pr_debug("sep_hash_cra_exit\n");
2051 static void sep_hash_init(void *data)
2055 struct ahash_request *req;
2056 struct crypto_ahash *tfm;
2057 struct this_task_ctx *ta_ctx;
2058 struct sep_system_ctx *sctx;
2059 unsigned long end_time;
2060 int are_we_done_yet;
2062 req = (struct ahash_request *)data;
2063 tfm = crypto_ahash_reqtfm(req);
2064 sctx = crypto_ahash_ctx(tfm);
2065 ta_ctx = ahash_request_ctx(req);
2066 ta_ctx->sep_used = sep_dev;
2068 ta_ctx->are_we_done_yet = &are_we_done_yet;
2070 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2072 ta_ctx->current_hash_stage = HASH_INIT;
2073 /* opcode and mode */
2074 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_INIT_OPCODE);
2075 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2076 sizeof(u32), sizeof(u32), &msg_offset, 0);
2077 sep_end_msg(ta_ctx, msg_offset);
2079 are_we_done_yet = 0;
2080 result = sep_crypto_take_sep(ta_ctx);
2082 dev_warn(&ta_ctx->sep_used->pdev->dev,
2083 "sep_hash_init take sep failed\n");
2084 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2087 /* now we sit and wait up to a fixed time for completion */
2088 end_time = jiffies + (WAIT_TIME * HZ);
2089 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2092 /* Done waiting; still not done yet? */
2093 if (are_we_done_yet == 0) {
2094 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2095 "hash init never got done\n");
2096 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2102 static void sep_hash_update(void *data)
2107 struct sep_hash_internal_context *int_ctx;
2111 int are_we_done_yet;
2114 static char small_buf[100];
2116 struct scatterlist *new_sg;
2117 ssize_t copy_result;
2118 struct ahash_request *req;
2119 struct crypto_ahash *tfm;
2120 struct this_task_ctx *ta_ctx;
2121 struct sep_system_ctx *sctx;
2122 unsigned long end_time;
2124 req = (struct ahash_request *)data;
2125 tfm = crypto_ahash_reqtfm(req);
2126 sctx = crypto_ahash_ctx(tfm);
2127 ta_ctx = ahash_request_ctx(req);
2128 ta_ctx->sep_used = sep_dev;
2130 ta_ctx->are_we_done_yet = &are_we_done_yet;
2132 /* length for queue status */
2133 ta_ctx->nbytes = req->nbytes;
2135 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2136 "sep_hash_update\n");
2137 ta_ctx->current_hash_stage = HASH_UPDATE;
2140 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2141 tail_len = req->nbytes % block_size;
2142 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", len);
2143 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2144 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2146 /* Compute header/tail sizes */
2147 int_ctx = (struct sep_hash_internal_context *)&sctx->
2148 hash_private_ctx.internal_context;
2149 head_len = (block_size - int_ctx->prev_update_bytes) % block_size;
2150 tail_len = (req->nbytes - head_len) % block_size;
2152 /* Make sure all pages are an even block */
2153 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2155 block_size, &new_sg, 1);
2157 if (int_error < 0) {
2158 dev_warn(&ta_ctx->sep_used->pdev->dev,
2159 "oddball pages error in crash update\n");
2160 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2162 } else if (int_error == 1) {
2163 ta_ctx->src_sg = new_sg;
2164 ta_ctx->src_sg_hold = new_sg;
2166 ta_ctx->src_sg = req->src;
2167 ta_ctx->src_sg_hold = NULL;
2170 src_ptr = sg_virt(ta_ctx->src_sg);
2172 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2177 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2178 ta_ctx->dcb_input_data.data_in_size =
2179 req->nbytes - (head_len + tail_len);
2180 ta_ctx->dcb_input_data.app_out_address = NULL;
2181 ta_ctx->dcb_input_data.block_size = block_size;
2182 ta_ctx->dcb_input_data.tail_block_size = 0;
2183 ta_ctx->dcb_input_data.is_applet = 0;
2184 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2185 ta_ctx->dcb_input_data.dst_sg = NULL;
2187 int_error = sep_create_dcb_dmatables_context_kernel(
2189 &ta_ctx->dcb_region,
2190 &ta_ctx->dmatables_region,
2192 &ta_ctx->dcb_input_data,
2195 dev_warn(&ta_ctx->sep_used->pdev->dev,
2196 "hash update dma table create failed\n");
2197 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2201 /* Construct message to SEP */
2202 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_UPDATE_OPCODE);
2208 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2211 /* Handle remainders */
2214 sep_write_msg(ta_ctx, &head_len, sizeof(u32),
2215 sizeof(u32), &msg_offset, 0);
2218 copy_result = sg_copy_to_buffer(
2220 sep_sg_nents(ta_ctx->src_sg),
2221 small_buf, head_len);
2223 if (copy_result != head_len) {
2224 dev_warn(&ta_ctx->sep_used->pdev->dev,
2225 "sg head copy failure in hash block\n");
2226 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2230 sep_write_msg(ta_ctx, small_buf, head_len,
2231 sizeof(u32) * 32, &msg_offset, 1);
2233 msg_offset += sizeof(u32) * 32;
2237 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2238 sizeof(u32), &msg_offset, 0);
2241 copy_result = sep_copy_offset_sg(
2244 req->nbytes - tail_len,
2245 small_buf, tail_len);
2247 if (copy_result != tail_len) {
2248 dev_warn(&ta_ctx->sep_used->pdev->dev,
2249 "sg tail copy failure in hash block\n");
2250 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2254 sep_write_msg(ta_ctx, small_buf, tail_len,
2255 sizeof(u32) * 32, &msg_offset, 1);
2257 msg_offset += sizeof(u32) * 32;
2261 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2262 sizeof(struct sep_hash_private_context));
2264 sep_end_msg(ta_ctx, msg_offset);
2265 are_we_done_yet = 0;
2266 int_error = sep_crypto_take_sep(ta_ctx);
2268 dev_warn(&ta_ctx->sep_used->pdev->dev,
2269 "sep_hash_update take sep failed\n");
2270 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2273 /* now we sit and wait up to a fixed time for completion */
2274 end_time = jiffies + (WAIT_TIME * HZ);
2275 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2278 /* Done waiting; still not done yet? */
2279 if (are_we_done_yet == 0) {
2280 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2281 "hash update never got done\n");
2282 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2288 static void sep_hash_final(void *data)
2291 struct ahash_request *req;
2292 struct crypto_ahash *tfm;
2293 struct this_task_ctx *ta_ctx;
2294 struct sep_system_ctx *sctx;
2296 unsigned long end_time;
2297 int are_we_done_yet;
2299 req = (struct ahash_request *)data;
2300 tfm = crypto_ahash_reqtfm(req);
2301 sctx = crypto_ahash_ctx(tfm);
2302 ta_ctx = ahash_request_ctx(req);
2303 ta_ctx->sep_used = sep_dev;
2305 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2306 "sep_hash_final\n");
2307 ta_ctx->current_hash_stage = HASH_FINISH;
2309 ta_ctx->are_we_done_yet = &are_we_done_yet;
2311 /* opcode and mode */
2312 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_FINISH_OPCODE);
2315 sep_write_context(ta_ctx, &msg_offset, &sctx->hash_private_ctx,
2316 sizeof(struct sep_hash_private_context));
2318 sep_end_msg(ta_ctx, msg_offset);
2319 are_we_done_yet = 0;
2320 result = sep_crypto_take_sep(ta_ctx);
2322 dev_warn(&ta_ctx->sep_used->pdev->dev,
2323 "sep_hash_final take sep failed\n");
2324 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2327 /* now we sit and wait up to a fixed time for completion */
2328 end_time = jiffies + (WAIT_TIME * HZ);
2329 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2332 /* Done waiting; still not done yet? */
2333 if (are_we_done_yet == 0) {
2334 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2335 "hash final job never got done\n");
2336 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2342 static void sep_hash_digest(void *data)
2350 int are_we_done_yet;
2352 static char small_buf[100];
2353 struct scatterlist *new_sg;
2356 struct ahash_request *req;
2357 struct crypto_ahash *tfm;
2358 struct this_task_ctx *ta_ctx;
2359 struct sep_system_ctx *sctx;
2360 unsigned long end_time;
2362 req = (struct ahash_request *)data;
2363 tfm = crypto_ahash_reqtfm(req);
2364 sctx = crypto_ahash_ctx(tfm);
2365 ta_ctx = ahash_request_ctx(req);
2366 ta_ctx->sep_used = sep_dev;
2368 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2369 "sep_hash_digest\n");
2370 ta_ctx->current_hash_stage = HASH_DIGEST;
2372 ta_ctx->are_we_done_yet = &are_we_done_yet;
2374 /* length for queue status */
2375 ta_ctx->nbytes = req->nbytes;
2377 block_size = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2378 tail_len = req->nbytes % block_size;
2379 dev_dbg(&ta_ctx->sep_used->pdev->dev, "length is %x\n", req->nbytes);
2380 dev_dbg(&ta_ctx->sep_used->pdev->dev, "block_size is %x\n", block_size);
2381 dev_dbg(&ta_ctx->sep_used->pdev->dev, "tail len is %x\n", tail_len);
2383 /* Make sure all pages are an even block */
2384 int_error = sep_oddball_pages(ta_ctx->sep_used, req->src,
2386 block_size, &new_sg, 1);
2388 if (int_error < 0) {
2389 dev_warn(&ta_ctx->sep_used->pdev->dev,
2390 "oddball pages error in crash update\n");
2391 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2393 } else if (int_error == 1) {
2394 ta_ctx->src_sg = new_sg;
2395 ta_ctx->src_sg_hold = new_sg;
2397 ta_ctx->src_sg = req->src;
2398 ta_ctx->src_sg_hold = NULL;
2401 src_ptr = sg_virt(ta_ctx->src_sg);
2403 if ((!req->nbytes) || (!ta_ctx->src_sg)) {
2408 ta_ctx->dcb_input_data.app_in_address = src_ptr;
2409 ta_ctx->dcb_input_data.data_in_size = req->nbytes - tail_len;
2410 ta_ctx->dcb_input_data.app_out_address = NULL;
2411 ta_ctx->dcb_input_data.block_size = block_size;
2412 ta_ctx->dcb_input_data.tail_block_size = 0;
2413 ta_ctx->dcb_input_data.is_applet = 0;
2414 ta_ctx->dcb_input_data.src_sg = ta_ctx->src_sg;
2415 ta_ctx->dcb_input_data.dst_sg = NULL;
2417 int_error = sep_create_dcb_dmatables_context_kernel(
2419 &ta_ctx->dcb_region,
2420 &ta_ctx->dmatables_region,
2422 &ta_ctx->dcb_input_data,
2425 dev_warn(&ta_ctx->sep_used->pdev->dev,
2426 "hash update dma table create failed\n");
2427 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2431 /* Construct message to SEP */
2432 sep_make_header(ta_ctx, &msg_offset, SEP_HASH_SINGLE_OPCODE);
2433 sep_write_msg(ta_ctx, &ta_ctx->hash_opmode,
2434 sizeof(u32), sizeof(u32), &msg_offset, 0);
2440 sep_write_msg(ta_ctx, msg, sizeof(u32) * 3, sizeof(u32) * 3,
2444 sep_write_msg(ta_ctx, &tail_len, sizeof(u32),
2445 sizeof(u32), &msg_offset, 0);
2448 copy_result = sep_copy_offset_sg(
2451 req->nbytes - tail_len,
2452 small_buf, tail_len);
2454 if (copy_result != tail_len) {
2455 dev_warn(&ta_ctx->sep_used->pdev->dev,
2456 "sg tail copy failure in hash block\n");
2457 sep_crypto_release(sctx, ta_ctx, -ENOMEM);
2461 sep_write_msg(ta_ctx, small_buf, tail_len,
2462 sizeof(u32) * 32, &msg_offset, 1);
2464 msg_offset += sizeof(u32) * 32;
2467 sep_end_msg(ta_ctx, msg_offset);
2469 are_we_done_yet = 0;
2470 result = sep_crypto_take_sep(ta_ctx);
2472 dev_warn(&ta_ctx->sep_used->pdev->dev,
2473 "sep_hash_digest take sep failed\n");
2474 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2477 /* now we sit and wait up to a fixed time for completion */
2478 end_time = jiffies + (WAIT_TIME * HZ);
2479 while ((time_before(jiffies, end_time)) && (are_we_done_yet == 0))
2482 /* Done waiting; still not done yet? */
2483 if (are_we_done_yet == 0) {
2484 dev_dbg(&ta_ctx->sep_used->pdev->dev,
2485 "hash digest job never got done\n");
2486 sep_crypto_release(sctx, ta_ctx, -EINVAL);
2493 * This is what is called by each of the API's provided
2494 * in the kernel crypto descriptors. It is run in a process
2495 * context using the kernel workqueues. Therefore it can
2498 static void sep_dequeuer(void *data)
2500 struct crypto_queue *this_queue;
2501 struct crypto_async_request *async_req;
2502 struct crypto_async_request *backlog;
2503 struct ablkcipher_request *cypher_req;
2504 struct ahash_request *hash_req;
2505 struct sep_system_ctx *sctx;
2506 struct crypto_ahash *hash_tfm;
2507 struct this_task_ctx *ta_ctx;
2510 this_queue = (struct crypto_queue *)data;
2512 spin_lock_irq(&queue_lock);
2513 backlog = crypto_get_backlog(this_queue);
2514 async_req = crypto_dequeue_request(this_queue);
2515 spin_unlock_irq(&queue_lock);
2518 pr_debug("sep crypto queue is empty\n");
2523 pr_debug("sep crypto backlog set\n");
2524 if (backlog->complete)
2525 backlog->complete(backlog, -EINPROGRESS);
2529 if (!async_req->tfm) {
2530 pr_debug("sep crypto queue null tfm\n");
2534 if (!async_req->tfm->__crt_alg) {
2535 pr_debug("sep crypto queue null __crt_alg\n");
2539 if (!async_req->tfm->__crt_alg->cra_type) {
2540 pr_debug("sep crypto queue null cra_type\n");
2544 /* we have stuff in the queue */
2545 if (async_req->tfm->__crt_alg->cra_type !=
2546 &crypto_ahash_type) {
2547 /* This is for a cypher */
2548 pr_debug("sep crypto queue doing cipher\n");
2549 cypher_req = container_of(async_req,
2550 struct ablkcipher_request,
2553 pr_debug("sep crypto queue null cypher_req\n");
2557 sep_crypto_block((void *)cypher_req);
2560 /* This is a hash */
2561 pr_debug("sep crypto queue doing hash\n");
2563 * This is a bit more complex than cipher; we
2564 * need to figure out what type of operation
2566 hash_req = ahash_request_cast(async_req);
2568 pr_debug("sep crypto queue null hash_req\n");
2572 hash_tfm = crypto_ahash_reqtfm(hash_req);
2574 pr_debug("sep crypto queue null hash_tfm\n");
2579 sctx = crypto_ahash_ctx(hash_tfm);
2581 pr_debug("sep crypto queue null sctx\n");
2585 ta_ctx = ahash_request_ctx(hash_req);
2587 if (ta_ctx->current_hash_stage == HASH_INIT) {
2588 pr_debug("sep crypto queue hash init\n");
2589 sep_hash_init((void *)hash_req);
2591 } else if (ta_ctx->current_hash_stage == HASH_UPDATE) {
2592 pr_debug("sep crypto queue hash update\n");
2593 sep_hash_update((void *)hash_req);
2595 } else if (ta_ctx->current_hash_stage == HASH_FINISH) {
2596 pr_debug("sep crypto queue hash final\n");
2597 sep_hash_final((void *)hash_req);
2599 } else if (ta_ctx->current_hash_stage == HASH_DIGEST) {
2600 pr_debug("sep crypto queue hash digest\n");
2601 sep_hash_digest((void *)hash_req);
2603 } else if (ta_ctx->current_hash_stage == HASH_FINUP_DATA) {
2604 pr_debug("sep crypto queue hash digest\n");
2605 sep_hash_update((void *)hash_req);
2607 } else if (ta_ctx->current_hash_stage == HASH_FINUP_FINISH) {
2608 pr_debug("sep crypto queue hash digest\n");
2609 sep_hash_final((void *)hash_req);
2612 pr_debug("sep crypto queue hash oops nothing\n");
2618 static int sep_sha1_init(struct ahash_request *req)
2622 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2624 pr_debug("sep - doing sha1 init\n");
2626 /* Clear out task context */
2627 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2629 ta_ctx->sep_used = sep_dev;
2630 ta_ctx->current_request = SHA1;
2631 ta_ctx->current_hash_req = req;
2632 ta_ctx->current_cypher_req = NULL;
2633 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2634 ta_ctx->current_hash_stage = HASH_INIT;
2636 /* lock necessary so that only one entity touches the queues */
2637 spin_lock_irq(&queue_lock);
2638 error = crypto_enqueue_request(&sep_queue, &req->base);
2640 if ((error != 0) && (error != -EINPROGRESS))
2641 pr_debug(" sep - crypto enqueue failed: %x\n",
2643 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2644 sep_dequeuer, (void *)&sep_queue);
2646 pr_debug(" sep - workqueue submit failed: %x\n",
2648 spin_unlock_irq(&queue_lock);
2649 /* We return result of crypto enqueue */
2653 static int sep_sha1_update(struct ahash_request *req)
2657 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2659 pr_debug("sep - doing sha1 update\n");
2661 ta_ctx->sep_used = sep_dev;
2662 ta_ctx->current_request = SHA1;
2663 ta_ctx->current_hash_req = req;
2664 ta_ctx->current_cypher_req = NULL;
2665 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2666 ta_ctx->current_hash_stage = HASH_UPDATE;
2668 /* lock necessary so that only one entity touches the queues */
2669 spin_lock_irq(&queue_lock);
2670 error = crypto_enqueue_request(&sep_queue, &req->base);
2672 if ((error != 0) && (error != -EINPROGRESS))
2673 pr_debug(" sep - crypto enqueue failed: %x\n",
2675 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2676 sep_dequeuer, (void *)&sep_queue);
2678 pr_debug(" sep - workqueue submit failed: %x\n",
2680 spin_unlock_irq(&queue_lock);
2681 /* We return result of crypto enqueue */
2685 static int sep_sha1_final(struct ahash_request *req)
2689 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2690 pr_debug("sep - doing sha1 final\n");
2692 ta_ctx->sep_used = sep_dev;
2693 ta_ctx->current_request = SHA1;
2694 ta_ctx->current_hash_req = req;
2695 ta_ctx->current_cypher_req = NULL;
2696 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2697 ta_ctx->current_hash_stage = HASH_FINISH;
2699 /* lock necessary so that only one entity touches the queues */
2700 spin_lock_irq(&queue_lock);
2701 error = crypto_enqueue_request(&sep_queue, &req->base);
2703 if ((error != 0) && (error != -EINPROGRESS))
2704 pr_debug(" sep - crypto enqueue failed: %x\n",
2706 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2707 sep_dequeuer, (void *)&sep_queue);
2709 pr_debug(" sep - workqueue submit failed: %x\n",
2711 spin_unlock_irq(&queue_lock);
2712 /* We return result of crypto enqueue */
2716 static int sep_sha1_digest(struct ahash_request *req)
2720 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2721 pr_debug("sep - doing sha1 digest\n");
2723 /* Clear out task context */
2724 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2726 ta_ctx->sep_used = sep_dev;
2727 ta_ctx->current_request = SHA1;
2728 ta_ctx->current_hash_req = req;
2729 ta_ctx->current_cypher_req = NULL;
2730 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2731 ta_ctx->current_hash_stage = HASH_DIGEST;
2733 /* lock necessary so that only one entity touches the queues */
2734 spin_lock_irq(&queue_lock);
2735 error = crypto_enqueue_request(&sep_queue, &req->base);
2737 if ((error != 0) && (error != -EINPROGRESS))
2738 pr_debug(" sep - crypto enqueue failed: %x\n",
2740 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2741 sep_dequeuer, (void *)&sep_queue);
2743 pr_debug(" sep - workqueue submit failed: %x\n",
2745 spin_unlock_irq(&queue_lock);
2746 /* We return result of crypto enqueue */
2750 static int sep_sha1_finup(struct ahash_request *req)
2754 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2755 pr_debug("sep - doing sha1 finup\n");
2757 ta_ctx->sep_used = sep_dev;
2758 ta_ctx->current_request = SHA1;
2759 ta_ctx->current_hash_req = req;
2760 ta_ctx->current_cypher_req = NULL;
2761 ta_ctx->hash_opmode = SEP_HASH_SHA1;
2762 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2764 /* lock necessary so that only one entity touches the queues */
2765 spin_lock_irq(&queue_lock);
2766 error = crypto_enqueue_request(&sep_queue, &req->base);
2768 if ((error != 0) && (error != -EINPROGRESS))
2769 pr_debug(" sep - crypto enqueue failed: %x\n",
2771 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2772 sep_dequeuer, (void *)&sep_queue);
2774 pr_debug(" sep - workqueue submit failed: %x\n",
2776 spin_unlock_irq(&queue_lock);
2777 /* We return result of crypto enqueue */
2781 static int sep_md5_init(struct ahash_request *req)
2785 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2786 pr_debug("sep - doing md5 init\n");
2788 /* Clear out task context */
2789 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2791 ta_ctx->sep_used = sep_dev;
2792 ta_ctx->current_request = MD5;
2793 ta_ctx->current_hash_req = req;
2794 ta_ctx->current_cypher_req = NULL;
2795 ta_ctx->hash_opmode = SEP_HASH_MD5;
2796 ta_ctx->current_hash_stage = HASH_INIT;
2798 /* lock necessary so that only one entity touches the queues */
2799 spin_lock_irq(&queue_lock);
2800 error = crypto_enqueue_request(&sep_queue, &req->base);
2802 if ((error != 0) && (error != -EINPROGRESS))
2803 pr_debug(" sep - crypto enqueue failed: %x\n",
2805 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2806 sep_dequeuer, (void *)&sep_queue);
2808 pr_debug(" sep - workqueue submit failed: %x\n",
2810 spin_unlock_irq(&queue_lock);
2811 /* We return result of crypto enqueue */
2815 static int sep_md5_update(struct ahash_request *req)
2819 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2820 pr_debug("sep - doing md5 update\n");
2822 ta_ctx->sep_used = sep_dev;
2823 ta_ctx->current_request = MD5;
2824 ta_ctx->current_hash_req = req;
2825 ta_ctx->current_cypher_req = NULL;
2826 ta_ctx->hash_opmode = SEP_HASH_MD5;
2827 ta_ctx->current_hash_stage = HASH_UPDATE;
2829 /* lock necessary so that only one entity touches the queues */
2830 spin_lock_irq(&queue_lock);
2831 error = crypto_enqueue_request(&sep_queue, &req->base);
2833 if ((error != 0) && (error != -EINPROGRESS))
2834 pr_debug(" sep - crypto enqueue failed: %x\n",
2836 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2837 sep_dequeuer, (void *)&sep_queue);
2839 pr_debug(" sep - workqueue submit failed: %x\n",
2841 spin_unlock_irq(&queue_lock);
2842 /* We return result of crypto enqueue */
2846 static int sep_md5_final(struct ahash_request *req)
2850 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2851 pr_debug("sep - doing md5 final\n");
2853 ta_ctx->sep_used = sep_dev;
2854 ta_ctx->current_request = MD5;
2855 ta_ctx->current_hash_req = req;
2856 ta_ctx->current_cypher_req = NULL;
2857 ta_ctx->hash_opmode = SEP_HASH_MD5;
2858 ta_ctx->current_hash_stage = HASH_FINISH;
2860 /* lock necessary so that only one entity touches the queues */
2861 spin_lock_irq(&queue_lock);
2862 error = crypto_enqueue_request(&sep_queue, &req->base);
2864 if ((error != 0) && (error != -EINPROGRESS))
2865 pr_debug(" sep - crypto enqueue failed: %x\n",
2867 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2868 sep_dequeuer, (void *)&sep_queue);
2870 pr_debug(" sep - workqueue submit failed: %x\n",
2872 spin_unlock_irq(&queue_lock);
2873 /* We return result of crypto enqueue */
2877 static int sep_md5_digest(struct ahash_request *req)
2881 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2883 pr_debug("sep - doing md5 digest\n");
2885 /* Clear out task context */
2886 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2888 ta_ctx->sep_used = sep_dev;
2889 ta_ctx->current_request = MD5;
2890 ta_ctx->current_hash_req = req;
2891 ta_ctx->current_cypher_req = NULL;
2892 ta_ctx->hash_opmode = SEP_HASH_MD5;
2893 ta_ctx->current_hash_stage = HASH_DIGEST;
2895 /* lock necessary so that only one entity touches the queues */
2896 spin_lock_irq(&queue_lock);
2897 error = crypto_enqueue_request(&sep_queue, &req->base);
2899 if ((error != 0) && (error != -EINPROGRESS))
2900 pr_debug(" sep - crypto enqueue failed: %x\n",
2902 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2903 sep_dequeuer, (void *)&sep_queue);
2905 pr_debug(" sep - workqueue submit failed: %x\n",
2907 spin_unlock_irq(&queue_lock);
2908 /* We return result of crypto enqueue */
2912 static int sep_md5_finup(struct ahash_request *req)
2916 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2918 pr_debug("sep - doing md5 finup\n");
2920 ta_ctx->sep_used = sep_dev;
2921 ta_ctx->current_request = MD5;
2922 ta_ctx->current_hash_req = req;
2923 ta_ctx->current_cypher_req = NULL;
2924 ta_ctx->hash_opmode = SEP_HASH_MD5;
2925 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
2927 /* lock necessary so that only one entity touches the queues */
2928 spin_lock_irq(&queue_lock);
2929 error = crypto_enqueue_request(&sep_queue, &req->base);
2931 if ((error != 0) && (error != -EINPROGRESS))
2932 pr_debug(" sep - crypto enqueue failed: %x\n",
2934 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2935 sep_dequeuer, (void *)&sep_queue);
2937 pr_debug(" sep - workqueue submit failed: %x\n",
2939 spin_unlock_irq(&queue_lock);
2940 /* We return result of crypto enqueue */
2944 static int sep_sha224_init(struct ahash_request *req)
2948 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2949 pr_debug("sep - doing sha224 init\n");
2951 /* Clear out task context */
2952 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
2954 ta_ctx->sep_used = sep_dev;
2955 ta_ctx->current_request = SHA224;
2956 ta_ctx->current_hash_req = req;
2957 ta_ctx->current_cypher_req = NULL;
2958 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2959 ta_ctx->current_hash_stage = HASH_INIT;
2961 /* lock necessary so that only one entity touches the queues */
2962 spin_lock_irq(&queue_lock);
2963 error = crypto_enqueue_request(&sep_queue, &req->base);
2965 if ((error != 0) && (error != -EINPROGRESS))
2966 pr_debug(" sep - crypto enqueue failed: %x\n",
2968 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
2969 sep_dequeuer, (void *)&sep_queue);
2971 pr_debug(" sep - workqueue submit failed: %x\n",
2973 spin_unlock_irq(&queue_lock);
2974 /* We return result of crypto enqueue */
2978 static int sep_sha224_update(struct ahash_request *req)
2982 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
2983 pr_debug("sep - doing sha224 update\n");
2985 ta_ctx->sep_used = sep_dev;
2986 ta_ctx->current_request = SHA224;
2987 ta_ctx->current_hash_req = req;
2988 ta_ctx->current_cypher_req = NULL;
2989 ta_ctx->hash_opmode = SEP_HASH_SHA224;
2990 ta_ctx->current_hash_stage = HASH_UPDATE;
2992 /* lock necessary so that only one entity touches the queues */
2993 spin_lock_irq(&queue_lock);
2994 error = crypto_enqueue_request(&sep_queue, &req->base);
2996 if ((error != 0) && (error != -EINPROGRESS))
2997 pr_debug(" sep - crypto enqueue failed: %x\n",
2999 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3000 sep_dequeuer, (void *)&sep_queue);
3002 pr_debug(" sep - workqueue submit failed: %x\n",
3004 spin_unlock_irq(&queue_lock);
3005 /* We return result of crypto enqueue */
3009 static int sep_sha224_final(struct ahash_request *req)
3013 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3014 pr_debug("sep - doing sha224 final\n");
3016 ta_ctx->sep_used = sep_dev;
3017 ta_ctx->current_request = SHA224;
3018 ta_ctx->current_hash_req = req;
3019 ta_ctx->current_cypher_req = NULL;
3020 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3021 ta_ctx->current_hash_stage = HASH_FINISH;
3023 /* lock necessary so that only one entity touches the queues */
3024 spin_lock_irq(&queue_lock);
3025 error = crypto_enqueue_request(&sep_queue, &req->base);
3027 if ((error != 0) && (error != -EINPROGRESS))
3028 pr_debug(" sep - crypto enqueue failed: %x\n",
3030 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3031 sep_dequeuer, (void *)&sep_queue);
3033 pr_debug(" sep - workqueue submit failed: %x\n",
3035 spin_unlock_irq(&queue_lock);
3036 /* We return result of crypto enqueue */
3040 static int sep_sha224_digest(struct ahash_request *req)
3044 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3046 pr_debug("sep - doing sha224 digest\n");
3048 /* Clear out task context */
3049 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3051 ta_ctx->sep_used = sep_dev;
3052 ta_ctx->current_request = SHA224;
3053 ta_ctx->current_hash_req = req;
3054 ta_ctx->current_cypher_req = NULL;
3055 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3056 ta_ctx->current_hash_stage = HASH_DIGEST;
3058 /* lock necessary so that only one entity touches the queues */
3059 spin_lock_irq(&queue_lock);
3060 error = crypto_enqueue_request(&sep_queue, &req->base);
3062 if ((error != 0) && (error != -EINPROGRESS))
3063 pr_debug(" sep - crypto enqueue failed: %x\n",
3065 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3066 sep_dequeuer, (void *)&sep_queue);
3068 pr_debug(" sep - workqueue submit failed: %x\n",
3070 spin_unlock_irq(&queue_lock);
3071 /* We return result of crypto enqueue */
3075 static int sep_sha224_finup(struct ahash_request *req)
3079 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3081 pr_debug("sep - doing sha224 finup\n");
3083 ta_ctx->sep_used = sep_dev;
3084 ta_ctx->current_request = SHA224;
3085 ta_ctx->current_hash_req = req;
3086 ta_ctx->current_cypher_req = NULL;
3087 ta_ctx->hash_opmode = SEP_HASH_SHA224;
3088 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3090 /* lock necessary so that only one entity touches the queues */
3091 spin_lock_irq(&queue_lock);
3092 error = crypto_enqueue_request(&sep_queue, &req->base);
3094 if ((error != 0) && (error != -EINPROGRESS))
3095 pr_debug(" sep - crypto enqueue failed: %x\n",
3097 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3098 sep_dequeuer, (void *)&sep_queue);
3100 pr_debug(" sep - workqueue submit failed: %x\n",
3102 spin_unlock_irq(&queue_lock);
3103 /* We return result of crypto enqueue */
3107 static int sep_sha256_init(struct ahash_request *req)
3111 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3112 pr_debug("sep - doing sha256 init\n");
3114 /* Clear out task context */
3115 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3117 ta_ctx->sep_used = sep_dev;
3118 ta_ctx->current_request = SHA256;
3119 ta_ctx->current_hash_req = req;
3120 ta_ctx->current_cypher_req = NULL;
3121 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3122 ta_ctx->current_hash_stage = HASH_INIT;
3124 /* lock necessary so that only one entity touches the queues */
3125 spin_lock_irq(&queue_lock);
3126 error = crypto_enqueue_request(&sep_queue, &req->base);
3128 if ((error != 0) && (error != -EINPROGRESS))
3129 pr_debug(" sep - crypto enqueue failed: %x\n",
3131 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3132 sep_dequeuer, (void *)&sep_queue);
3134 pr_debug(" sep - workqueue submit failed: %x\n",
3136 spin_unlock_irq(&queue_lock);
3137 /* We return result of crypto enqueue */
3141 static int sep_sha256_update(struct ahash_request *req)
3145 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3146 pr_debug("sep - doing sha256 update\n");
3148 ta_ctx->sep_used = sep_dev;
3149 ta_ctx->current_request = SHA256;
3150 ta_ctx->current_hash_req = req;
3151 ta_ctx->current_cypher_req = NULL;
3152 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3153 ta_ctx->current_hash_stage = HASH_UPDATE;
3155 /* lock necessary so that only one entity touches the queues */
3156 spin_lock_irq(&queue_lock);
3157 error = crypto_enqueue_request(&sep_queue, &req->base);
3159 if ((error != 0) && (error != -EINPROGRESS))
3160 pr_debug(" sep - crypto enqueue failed: %x\n",
3162 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3163 sep_dequeuer, (void *)&sep_queue);
3165 pr_debug(" sep - workqueue submit failed: %x\n",
3167 spin_unlock_irq(&queue_lock);
3168 /* We return result of crypto enqueue */
3172 static int sep_sha256_final(struct ahash_request *req)
3176 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3177 pr_debug("sep - doing sha256 final\n");
3179 ta_ctx->sep_used = sep_dev;
3180 ta_ctx->current_request = SHA256;
3181 ta_ctx->current_hash_req = req;
3182 ta_ctx->current_cypher_req = NULL;
3183 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3184 ta_ctx->current_hash_stage = HASH_FINISH;
3186 /* lock necessary so that only one entity touches the queues */
3187 spin_lock_irq(&queue_lock);
3188 error = crypto_enqueue_request(&sep_queue, &req->base);
3190 if ((error != 0) && (error != -EINPROGRESS))
3191 pr_debug(" sep - crypto enqueue failed: %x\n",
3193 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3194 sep_dequeuer, (void *)&sep_queue);
3196 pr_debug(" sep - workqueue submit failed: %x\n",
3198 spin_unlock_irq(&queue_lock);
3199 /* We return result of crypto enqueue */
3203 static int sep_sha256_digest(struct ahash_request *req)
3207 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3209 pr_debug("sep - doing sha256 digest\n");
3211 /* Clear out task context */
3212 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3214 ta_ctx->sep_used = sep_dev;
3215 ta_ctx->current_request = SHA256;
3216 ta_ctx->current_hash_req = req;
3217 ta_ctx->current_cypher_req = NULL;
3218 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3219 ta_ctx->current_hash_stage = HASH_DIGEST;
3221 /* lock necessary so that only one entity touches the queues */
3222 spin_lock_irq(&queue_lock);
3223 error = crypto_enqueue_request(&sep_queue, &req->base);
3225 if ((error != 0) && (error != -EINPROGRESS))
3226 pr_debug(" sep - crypto enqueue failed: %x\n",
3228 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3229 sep_dequeuer, (void *)&sep_queue);
3231 pr_debug(" sep - workqueue submit failed: %x\n",
3233 spin_unlock_irq(&queue_lock);
3234 /* We return result of crypto enqueue */
3238 static int sep_sha256_finup(struct ahash_request *req)
3242 struct this_task_ctx *ta_ctx = ahash_request_ctx(req);
3244 pr_debug("sep - doing sha256 finup\n");
3246 ta_ctx->sep_used = sep_dev;
3247 ta_ctx->current_request = SHA256;
3248 ta_ctx->current_hash_req = req;
3249 ta_ctx->current_cypher_req = NULL;
3250 ta_ctx->hash_opmode = SEP_HASH_SHA256;
3251 ta_ctx->current_hash_stage = HASH_FINUP_DATA;
3253 /* lock necessary so that only one entity touches the queues */
3254 spin_lock_irq(&queue_lock);
3255 error = crypto_enqueue_request(&sep_queue, &req->base);
3257 if ((error != 0) && (error != -EINPROGRESS))
3258 pr_debug(" sep - crypto enqueue failed: %x\n",
3260 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3261 sep_dequeuer, (void *)&sep_queue);
3263 pr_debug(" sep - workqueue submit failed: %x\n",
3265 spin_unlock_irq(&queue_lock);
3266 /* We return result of crypto enqueue */
3270 static int sep_crypto_init(struct crypto_tfm *tfm)
3272 const char *alg_name = crypto_tfm_alg_name(tfm);
3274 if (alg_name == NULL)
3275 pr_debug("sep_crypto_init alg is NULL\n");
3277 pr_debug("sep_crypto_init alg is %s\n", alg_name);
3279 tfm->crt_ablkcipher.reqsize = sizeof(struct this_task_ctx);
3283 static void sep_crypto_exit(struct crypto_tfm *tfm)
3285 pr_debug("sep_crypto_exit\n");
3288 static int sep_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3289 unsigned int keylen)
3291 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3293 pr_debug("sep aes setkey\n");
3295 pr_debug("tfm is %p sctx is %p\n", tfm, sctx);
3297 case SEP_AES_KEY_128_SIZE:
3298 sctx->aes_key_size = AES_128;
3300 case SEP_AES_KEY_192_SIZE:
3301 sctx->aes_key_size = AES_192;
3303 case SEP_AES_KEY_256_SIZE:
3304 sctx->aes_key_size = AES_256;
3306 case SEP_AES_KEY_512_SIZE:
3307 sctx->aes_key_size = AES_512;
3310 pr_debug("invalid sep aes key size %x\n",
3315 memset(&sctx->key.aes, 0, sizeof(u32) *
3316 SEP_AES_MAX_KEY_SIZE_WORDS);
3317 memcpy(&sctx->key.aes, key, keylen);
3318 sctx->keylen = keylen;
3319 /* Indicate to encrypt/decrypt function to send key to SEP */
3325 static int sep_aes_ecb_encrypt(struct ablkcipher_request *req)
3329 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3331 pr_debug("sep - doing aes ecb encrypt\n");
3333 /* Clear out task context */
3334 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3336 ta_ctx->sep_used = sep_dev;
3337 ta_ctx->current_request = AES_ECB;
3338 ta_ctx->current_hash_req = NULL;
3339 ta_ctx->current_cypher_req = req;
3340 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3341 ta_ctx->aes_opmode = SEP_AES_ECB;
3342 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3343 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3345 /* lock necessary so that only one entity touches the queues */
3346 spin_lock_irq(&queue_lock);
3347 error = crypto_enqueue_request(&sep_queue, &req->base);
3349 if ((error != 0) && (error != -EINPROGRESS))
3350 pr_debug(" sep - crypto enqueue failed: %x\n",
3352 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3353 sep_dequeuer, (void *)&sep_queue);
3355 pr_debug(" sep - workqueue submit failed: %x\n",
3357 spin_unlock_irq(&queue_lock);
3358 /* We return result of crypto enqueue */
3362 static int sep_aes_ecb_decrypt(struct ablkcipher_request *req)
3366 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3368 pr_debug("sep - doing aes ecb decrypt\n");
3370 /* Clear out task context */
3371 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3373 ta_ctx->sep_used = sep_dev;
3374 ta_ctx->current_request = AES_ECB;
3375 ta_ctx->current_hash_req = NULL;
3376 ta_ctx->current_cypher_req = req;
3377 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3378 ta_ctx->aes_opmode = SEP_AES_ECB;
3379 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3380 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3382 /* lock necessary so that only one entity touches the queues */
3383 spin_lock_irq(&queue_lock);
3384 error = crypto_enqueue_request(&sep_queue, &req->base);
3386 if ((error != 0) && (error != -EINPROGRESS))
3387 pr_debug(" sep - crypto enqueue failed: %x\n",
3389 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3390 sep_dequeuer, (void *)&sep_queue);
3392 pr_debug(" sep - workqueue submit failed: %x\n",
3394 spin_unlock_irq(&queue_lock);
3395 /* We return result of crypto enqueue */
3399 static int sep_aes_cbc_encrypt(struct ablkcipher_request *req)
3403 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3404 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3405 crypto_ablkcipher_reqtfm(req));
3407 pr_debug("sep - doing aes cbc encrypt\n");
3409 /* Clear out task context */
3410 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3412 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3413 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3415 ta_ctx->sep_used = sep_dev;
3416 ta_ctx->current_request = AES_CBC;
3417 ta_ctx->current_hash_req = NULL;
3418 ta_ctx->current_cypher_req = req;
3419 ta_ctx->aes_encmode = SEP_AES_ENCRYPT;
3420 ta_ctx->aes_opmode = SEP_AES_CBC;
3421 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3422 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3424 /* lock necessary so that only one entity touches the queues */
3425 spin_lock_irq(&queue_lock);
3426 error = crypto_enqueue_request(&sep_queue, &req->base);
3428 if ((error != 0) && (error != -EINPROGRESS))
3429 pr_debug(" sep - crypto enqueue failed: %x\n",
3431 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3432 sep_dequeuer, (void *)&sep_queue);
3434 pr_debug(" sep - workqueue submit failed: %x\n",
3436 spin_unlock_irq(&queue_lock);
3437 /* We return result of crypto enqueue */
3441 static int sep_aes_cbc_decrypt(struct ablkcipher_request *req)
3445 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3446 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(
3447 crypto_ablkcipher_reqtfm(req));
3449 pr_debug("sep - doing aes cbc decrypt\n");
3451 pr_debug("tfm is %p sctx is %p and ta_ctx is %p\n",
3452 crypto_ablkcipher_reqtfm(req), sctx, ta_ctx);
3454 /* Clear out task context */
3455 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3457 ta_ctx->sep_used = sep_dev;
3458 ta_ctx->current_request = AES_CBC;
3459 ta_ctx->current_hash_req = NULL;
3460 ta_ctx->current_cypher_req = req;
3461 ta_ctx->aes_encmode = SEP_AES_DECRYPT;
3462 ta_ctx->aes_opmode = SEP_AES_CBC;
3463 ta_ctx->init_opcode = SEP_AES_INIT_OPCODE;
3464 ta_ctx->block_opcode = SEP_AES_BLOCK_OPCODE;
3466 /* lock necessary so that only one entity touches the queues */
3467 spin_lock_irq(&queue_lock);
3468 error = crypto_enqueue_request(&sep_queue, &req->base);
3470 if ((error != 0) && (error != -EINPROGRESS))
3471 pr_debug(" sep - crypto enqueue failed: %x\n",
3473 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3474 sep_dequeuer, (void *)&sep_queue);
3476 pr_debug(" sep - workqueue submit failed: %x\n",
3478 spin_unlock_irq(&queue_lock);
3479 /* We return result of crypto enqueue */
3483 static int sep_des_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
3484 unsigned int keylen)
3486 struct sep_system_ctx *sctx = crypto_ablkcipher_ctx(tfm);
3487 struct crypto_tfm *ctfm = crypto_ablkcipher_tfm(tfm);
3488 u32 *flags = &ctfm->crt_flags;
3490 pr_debug("sep des setkey\n");
3494 sctx->des_nbr_keys = DES_KEY_1;
3496 case DES_KEY_SIZE * 2:
3497 sctx->des_nbr_keys = DES_KEY_2;
3499 case DES_KEY_SIZE * 3:
3500 sctx->des_nbr_keys = DES_KEY_3;
3503 pr_debug("invalid key size %x\n",
3508 if ((*flags & CRYPTO_TFM_REQ_WEAK_KEY) &&
3509 (sep_weak_key(key, keylen))) {
3511 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
3512 pr_debug("weak key\n");
3516 memset(&sctx->key.des, 0, sizeof(struct sep_des_key));
3517 memcpy(&sctx->key.des.key1, key, keylen);
3518 sctx->keylen = keylen;
3519 /* Indicate to encrypt/decrypt function to send key to SEP */
3525 static int sep_des_ebc_encrypt(struct ablkcipher_request *req)
3529 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3531 pr_debug("sep - doing des ecb encrypt\n");
3533 /* Clear out task context */
3534 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3536 ta_ctx->sep_used = sep_dev;
3537 ta_ctx->current_request = DES_ECB;
3538 ta_ctx->current_hash_req = NULL;
3539 ta_ctx->current_cypher_req = req;
3540 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3541 ta_ctx->des_opmode = SEP_DES_ECB;
3542 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3543 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3545 /* lock necessary so that only one entity touches the queues */
3546 spin_lock_irq(&queue_lock);
3547 error = crypto_enqueue_request(&sep_queue, &req->base);
3549 if ((error != 0) && (error != -EINPROGRESS))
3550 pr_debug(" sep - crypto enqueue failed: %x\n",
3552 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3553 sep_dequeuer, (void *)&sep_queue);
3555 pr_debug(" sep - workqueue submit failed: %x\n",
3557 spin_unlock_irq(&queue_lock);
3558 /* We return result of crypto enqueue */
3562 static int sep_des_ebc_decrypt(struct ablkcipher_request *req)
3566 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3568 pr_debug("sep - doing des ecb decrypt\n");
3570 /* Clear out task context */
3571 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3573 ta_ctx->sep_used = sep_dev;
3574 ta_ctx->current_request = DES_ECB;
3575 ta_ctx->current_hash_req = NULL;
3576 ta_ctx->current_cypher_req = req;
3577 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3578 ta_ctx->des_opmode = SEP_DES_ECB;
3579 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3580 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3582 /* lock necessary so that only one entity touches the queues */
3583 spin_lock_irq(&queue_lock);
3584 error = crypto_enqueue_request(&sep_queue, &req->base);
3586 if ((error != 0) && (error != -EINPROGRESS))
3587 pr_debug(" sep - crypto enqueue failed: %x\n",
3589 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3590 sep_dequeuer, (void *)&sep_queue);
3592 pr_debug(" sep - workqueue submit failed: %x\n",
3594 spin_unlock_irq(&queue_lock);
3595 /* We return result of crypto enqueue */
3599 static int sep_des_cbc_encrypt(struct ablkcipher_request *req)
3603 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3605 pr_debug("sep - doing des cbc encrypt\n");
3607 /* Clear out task context */
3608 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3610 ta_ctx->sep_used = sep_dev;
3611 ta_ctx->current_request = DES_CBC;
3612 ta_ctx->current_hash_req = NULL;
3613 ta_ctx->current_cypher_req = req;
3614 ta_ctx->des_encmode = SEP_DES_ENCRYPT;
3615 ta_ctx->des_opmode = SEP_DES_CBC;
3616 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3617 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3619 /* lock necessary so that only one entity touches the queues */
3620 spin_lock_irq(&queue_lock);
3621 error = crypto_enqueue_request(&sep_queue, &req->base);
3623 if ((error != 0) && (error != -EINPROGRESS))
3624 pr_debug(" sep - crypto enqueue failed: %x\n",
3626 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3627 sep_dequeuer, (void *)&sep_queue);
3629 pr_debug(" sep - workqueue submit failed: %x\n",
3631 spin_unlock_irq(&queue_lock);
3632 /* We return result of crypto enqueue */
3636 static int sep_des_cbc_decrypt(struct ablkcipher_request *req)
3640 struct this_task_ctx *ta_ctx = ablkcipher_request_ctx(req);
3642 pr_debug("sep - doing des ecb decrypt\n");
3644 /* Clear out task context */
3645 memset(ta_ctx, 0, sizeof(struct this_task_ctx));
3647 ta_ctx->sep_used = sep_dev;
3648 ta_ctx->current_request = DES_CBC;
3649 ta_ctx->current_hash_req = NULL;
3650 ta_ctx->current_cypher_req = req;
3651 ta_ctx->des_encmode = SEP_DES_DECRYPT;
3652 ta_ctx->des_opmode = SEP_DES_CBC;
3653 ta_ctx->init_opcode = SEP_DES_INIT_OPCODE;
3654 ta_ctx->block_opcode = SEP_DES_BLOCK_OPCODE;
3656 /* lock necessary so that only one entity touches the queues */
3657 spin_lock_irq(&queue_lock);
3658 error = crypto_enqueue_request(&sep_queue, &req->base);
3660 if ((error != 0) && (error != -EINPROGRESS))
3661 pr_debug(" sep - crypto enqueue failed: %x\n",
3663 error1 = sep_submit_work(ta_ctx->sep_used->workqueue,
3664 sep_dequeuer, (void *)&sep_queue);
3666 pr_debug(" sep - workqueue submit failed: %x\n",
3668 spin_unlock_irq(&queue_lock);
3669 /* We return result of crypto enqueue */
3673 static struct ahash_alg hash_algs[] = {
3675 .init = sep_sha1_init,
3676 .update = sep_sha1_update,
3677 .final = sep_sha1_final,
3678 .digest = sep_sha1_digest,
3679 .finup = sep_sha1_finup,
3681 .digestsize = SHA1_DIGEST_SIZE,
3684 .cra_driver_name = "sha1-sep",
3685 .cra_priority = 100,
3686 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3688 .cra_blocksize = SHA1_BLOCK_SIZE,
3689 .cra_ctxsize = sizeof(struct sep_system_ctx),
3691 .cra_module = THIS_MODULE,
3692 .cra_init = sep_hash_cra_init,
3693 .cra_exit = sep_hash_cra_exit,
3698 .init = sep_md5_init,
3699 .update = sep_md5_update,
3700 .final = sep_md5_final,
3701 .digest = sep_md5_digest,
3702 .finup = sep_md5_finup,
3704 .digestsize = MD5_DIGEST_SIZE,
3707 .cra_driver_name = "md5-sep",
3708 .cra_priority = 100,
3709 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3711 .cra_blocksize = SHA1_BLOCK_SIZE,
3712 .cra_ctxsize = sizeof(struct sep_system_ctx),
3714 .cra_module = THIS_MODULE,
3715 .cra_init = sep_hash_cra_init,
3716 .cra_exit = sep_hash_cra_exit,
3721 .init = sep_sha224_init,
3722 .update = sep_sha224_update,
3723 .final = sep_sha224_final,
3724 .digest = sep_sha224_digest,
3725 .finup = sep_sha224_finup,
3727 .digestsize = SHA224_DIGEST_SIZE,
3729 .cra_name = "sha224",
3730 .cra_driver_name = "sha224-sep",
3731 .cra_priority = 100,
3732 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3734 .cra_blocksize = SHA224_BLOCK_SIZE,
3735 .cra_ctxsize = sizeof(struct sep_system_ctx),
3737 .cra_module = THIS_MODULE,
3738 .cra_init = sep_hash_cra_init,
3739 .cra_exit = sep_hash_cra_exit,
3744 .init = sep_sha256_init,
3745 .update = sep_sha256_update,
3746 .final = sep_sha256_final,
3747 .digest = sep_sha256_digest,
3748 .finup = sep_sha256_finup,
3750 .digestsize = SHA256_DIGEST_SIZE,
3752 .cra_name = "sha256",
3753 .cra_driver_name = "sha256-sep",
3754 .cra_priority = 100,
3755 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
3757 .cra_blocksize = SHA256_BLOCK_SIZE,
3758 .cra_ctxsize = sizeof(struct sep_system_ctx),
3760 .cra_module = THIS_MODULE,
3761 .cra_init = sep_hash_cra_init,
3762 .cra_exit = sep_hash_cra_exit,
3768 static struct crypto_alg crypto_algs[] = {
3770 .cra_name = "ecb(aes)",
3771 .cra_driver_name = "ecb-aes-sep",
3772 .cra_priority = 100,
3773 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3774 .cra_blocksize = AES_BLOCK_SIZE,
3775 .cra_ctxsize = sizeof(struct sep_system_ctx),
3777 .cra_type = &crypto_ablkcipher_type,
3778 .cra_module = THIS_MODULE,
3779 .cra_init = sep_crypto_init,
3780 .cra_exit = sep_crypto_exit,
3781 .cra_u.ablkcipher = {
3782 .min_keysize = AES_MIN_KEY_SIZE,
3783 .max_keysize = AES_MAX_KEY_SIZE,
3784 .setkey = sep_aes_setkey,
3785 .encrypt = sep_aes_ecb_encrypt,
3786 .decrypt = sep_aes_ecb_decrypt,
3790 .cra_name = "cbc(aes)",
3791 .cra_driver_name = "cbc-aes-sep",
3792 .cra_priority = 100,
3793 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3794 .cra_blocksize = AES_BLOCK_SIZE,
3795 .cra_ctxsize = sizeof(struct sep_system_ctx),
3797 .cra_type = &crypto_ablkcipher_type,
3798 .cra_module = THIS_MODULE,
3799 .cra_init = sep_crypto_init,
3800 .cra_exit = sep_crypto_exit,
3801 .cra_u.ablkcipher = {
3802 .min_keysize = AES_MIN_KEY_SIZE,
3803 .max_keysize = AES_MAX_KEY_SIZE,
3804 .setkey = sep_aes_setkey,
3805 .encrypt = sep_aes_cbc_encrypt,
3806 .ivsize = AES_BLOCK_SIZE,
3807 .decrypt = sep_aes_cbc_decrypt,
3811 .cra_name = "ebc(des)",
3812 .cra_driver_name = "ebc-des-sep",
3813 .cra_priority = 100,
3814 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3815 .cra_blocksize = DES_BLOCK_SIZE,
3816 .cra_ctxsize = sizeof(struct sep_system_ctx),
3818 .cra_type = &crypto_ablkcipher_type,
3819 .cra_module = THIS_MODULE,
3820 .cra_init = sep_crypto_init,
3821 .cra_exit = sep_crypto_exit,
3822 .cra_u.ablkcipher = {
3823 .min_keysize = DES_KEY_SIZE,
3824 .max_keysize = DES_KEY_SIZE,
3825 .setkey = sep_des_setkey,
3826 .encrypt = sep_des_ebc_encrypt,
3827 .decrypt = sep_des_ebc_decrypt,
3831 .cra_name = "cbc(des)",
3832 .cra_driver_name = "cbc-des-sep",
3833 .cra_priority = 100,
3834 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3835 .cra_blocksize = DES_BLOCK_SIZE,
3836 .cra_ctxsize = sizeof(struct sep_system_ctx),
3838 .cra_type = &crypto_ablkcipher_type,
3839 .cra_module = THIS_MODULE,
3840 .cra_init = sep_crypto_init,
3841 .cra_exit = sep_crypto_exit,
3842 .cra_u.ablkcipher = {
3843 .min_keysize = DES_KEY_SIZE,
3844 .max_keysize = DES_KEY_SIZE,
3845 .setkey = sep_des_setkey,
3846 .encrypt = sep_des_cbc_encrypt,
3847 .ivsize = DES_BLOCK_SIZE,
3848 .decrypt = sep_des_cbc_decrypt,
3852 .cra_name = "ebc(des3-ede)",
3853 .cra_driver_name = "ebc-des3-ede-sep",
3854 .cra_priority = 100,
3855 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3856 .cra_blocksize = DES_BLOCK_SIZE,
3857 .cra_ctxsize = sizeof(struct sep_system_ctx),
3859 .cra_type = &crypto_ablkcipher_type,
3860 .cra_module = THIS_MODULE,
3861 .cra_init = sep_crypto_init,
3862 .cra_exit = sep_crypto_exit,
3863 .cra_u.ablkcipher = {
3864 .min_keysize = DES3_EDE_KEY_SIZE,
3865 .max_keysize = DES3_EDE_KEY_SIZE,
3866 .setkey = sep_des_setkey,
3867 .encrypt = sep_des_ebc_encrypt,
3868 .decrypt = sep_des_ebc_decrypt,
3872 .cra_name = "cbc(des3-ede)",
3873 .cra_driver_name = "cbc-des3--ede-sep",
3874 .cra_priority = 100,
3875 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
3876 .cra_blocksize = DES_BLOCK_SIZE,
3877 .cra_ctxsize = sizeof(struct sep_system_ctx),
3879 .cra_type = &crypto_ablkcipher_type,
3880 .cra_module = THIS_MODULE,
3881 .cra_init = sep_crypto_init,
3882 .cra_exit = sep_crypto_exit,
3883 .cra_u.ablkcipher = {
3884 .min_keysize = DES3_EDE_KEY_SIZE,
3885 .max_keysize = DES3_EDE_KEY_SIZE,
3886 .setkey = sep_des_setkey,
3887 .encrypt = sep_des_cbc_encrypt,
3888 .decrypt = sep_des_cbc_decrypt,
3893 int sep_crypto_setup(void)
3896 tasklet_init(&sep_dev->finish_tasklet, sep_finish,
3897 (unsigned long)sep_dev);
3899 crypto_init_queue(&sep_queue, SEP_QUEUE_LENGTH);
3901 sep_dev->workqueue = create_singlethread_workqueue(
3902 "sep_crypto_workqueue");
3903 if (!sep_dev->workqueue) {
3904 dev_warn(&sep_dev->pdev->dev, "cant create workqueue\n");
3908 spin_lock_init(&queue_lock);
3911 for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
3912 err = crypto_register_ahash(&hash_algs[i]);
3918 for (j = 0; j < ARRAY_SIZE(crypto_algs); j++) {
3919 err = crypto_register_alg(&crypto_algs[j]);
3921 goto err_crypto_algs;
3927 for (k = 0; k < i; k++)
3928 crypto_unregister_ahash(&hash_algs[k]);
3929 destroy_workqueue(sep_dev->workqueue);
3933 for (k = 0; k < j; k++)
3934 crypto_unregister_alg(&crypto_algs[k]);
3938 void sep_crypto_takedown(void)
3943 for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
3944 crypto_unregister_ahash(&hash_algs[i]);
3945 for (i = 0; i < ARRAY_SIZE(crypto_algs); i++)
3946 crypto_unregister_alg(&crypto_algs[i]);
3948 destroy_workqueue(sep_dev->workqueue);
3949 tasklet_kill(&sep_dev->finish_tasklet);